unique_id
int64
200
186k
target
int64
0
1
code
stringlengths
76
52.1k
__index_level_0__
int64
0
199
8,052
0
static int CVE_2006_0039_PATCHED_do_add_counters(void __user *user, unsigned int len) { unsigned int i; struct xt_counters_info tmp, *paddc; struct arpt_table *t; struct xt_table_info *private; int ret = 0; void *loc_cpu_entry; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters)) return -EINVAL; paddc = vmalloc(len); if (!paddc) return -ENOMEM; if (copy_from_user(paddc, user, len) != 0) { ret = -EFAULT; goto free; } t = xt_find_table_lock(NF_ARP, tmp.name); if (!t || IS_ERR(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; } write_lock_bh(&t->lock); private = t->private; if (private->number != tmp.num_counters) { ret = -EINVAL; goto unlock_up_free; } i = 0; /* Choose the copy that is on our node */ loc_cpu_entry = private->entries[smp_processor_id()]; ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size, add_counter_to_entry, paddc->counters, &i); unlock_up_free: write_unlock_bh(&t->lock); xt_table_unlock(t); module_put(t->me); free: vfree(paddc); return ret; }
100
149,182
0
NS_IMETHODIMP CVE_2006_2778_PATCHED_nsCrypto::SignText(const nsAString& aStringToSign, const nsAString& aCaOption, nsAString& aResult) { // XXX This code should return error codes, but we're keeping this // backwards compatible with NS4.x and so we can't throw exceptions. NS_NAMED_LITERAL_STRING(internalError, "error:internalError"); aResult.Truncate(); nsCOMPtr<nsIXPCNativeCallContext> ncc; nsCOMPtr<nsIXPConnect> xpc(do_GetService(nsIXPConnect::GetCID())); if (xpc) { xpc->GetCurrentNativeCallContext(getter_AddRefs(ncc)); } if (!ncc) { aResult.Append(internalError); return NS_OK; } PRUint32 argc; ncc->GetArgc(&argc); JSContext *cx; ncc->GetJSContext(&cx); if (!cx) { aResult.Append(internalError); return NS_OK; } if (!aCaOption.Equals(NS_LITERAL_STRING("auto")) && !aCaOption.Equals(NS_LITERAL_STRING("ask"))) { JS_ReportError(cx, "%s%s\n", JS_ERROR, "caOption argument must be ask or auto"); aResult.Append(internalError); return NS_OK; } // It was decided to always behave as if "ask" were specified. // XXX Should we warn in the JS Console for auto? nsCOMPtr<nsIInterfaceRequestor> uiContext = new PipUIContext; if (!uiContext) { aResult.Append(internalError); return NS_OK; } PRBool bestOnly = PR_TRUE; PRBool validOnly = PR_TRUE; CERTCertList* certList = CERT_FindUserCertsByUsage(CERT_GetDefaultCertDB(), certUsageEmailSigner, bestOnly, validOnly, uiContext); PRUint32 numCAs = argc - 2; if (numCAs > 0) { nsAutoArrayPtr<char*> caNames(new char*[numCAs]); if (!caNames) { aResult.Append(internalError); return NS_OK; } jsval *argv = nsnull; ncc->GetArgvPtr(&argv); PRUint32 i; for (i = 2; i < argc; ++i) { JSString *caName = JS_ValueToString(cx, argv[i]); if (!caName) { aResult.Append(internalError); return NS_OK; } caNames[i - 2] = JS_GetStringBytes(caName); } if (certList && CERT_FilterCertListByCANames(certList, numCAs, caNames, certUsageEmailSigner) != SECSuccess) { aResult.Append(internalError); return NS_OK; } } if (!certList || CERT_LIST_EMPTY(certList)) { aResult.Append(NS_LITERAL_STRING("error:noMatchingCert")); return NS_OK; } nsCOMPtr<nsIFormSigningDialog> fsd = do_CreateInstance(NS_FORMSIGNINGDIALOG_CONTRACTID); if (!fsd) { aResult.Append(internalError); return NS_OK; } nsCOMPtr<nsIProxyObjectManager> proxyman = do_GetService(NS_XPCOMPROXY_CONTRACTID); if (!proxyman) { aResult.Append(internalError); return NS_OK; } nsCOMPtr<nsIFormSigningDialog> proxied_fsd; nsresult rv = proxyman->GetProxyForObject(NS_UI_THREAD_EVENTQ, NS_GET_IID(nsIFormSigningDialog), fsd, PROXY_SYNC, getter_AddRefs(proxied_fsd)); if (NS_FAILED(rv)) { aResult.Append(internalError); return NS_OK; } nsCOMPtr<nsIDocument> document; GetDocumentFromContext(cx, getter_AddRefs(document)); if (!document) { aResult.Append(internalError); return NS_OK; } // Get the hostname from the URL of the document. nsIURI* uri = document->GetDocumentURI(); if (!uri) { aResult.Append(internalError); return NS_OK; } nsCString host; rv = uri->GetHost(host); if (NS_FAILED(rv)) { aResult.Append(internalError); return NS_OK; } PRInt32 numberOfCerts = 0; CERTCertListNode* node; for (node = CERT_LIST_HEAD(certList); !CERT_LIST_END(node, certList); node = CERT_LIST_NEXT(node)) { ++numberOfCerts; } CERTCertNicknames* nicknames = CERT_NicknameStringsFromCertList(certList, NICKNAME_EXPIRED_STRING, NICKNAME_NOT_YET_VALID_STRING); if (!nicknames) { aResult.Append(internalError); return NS_OK; } CERTCertNicknamesCleaner cnc(nicknames); NS_ASSERTION(nicknames->numnicknames == numberOfCerts, "nicknames->numnicknames != numberOfCerts"); nsAutoArrayPtr<PRUnichar*> certNicknameList(new PRUnichar*[nicknames->numnicknames * 2]); if (!certNicknameList) { aResult.Append(internalError); return NS_OK; } PRUnichar** certDetailsList = certNicknameList.get() + nicknames->numnicknames; PRInt32 certsToUse; for (node = CERT_LIST_HEAD(certList), certsToUse = 0; !CERT_LIST_END(node, certList) && certsToUse < nicknames->numnicknames; node = CERT_LIST_NEXT(node)) { nsRefPtr<nsNSSCertificate> tempCert = new nsNSSCertificate(node->cert); if (tempCert) { nsAutoString nickWithSerial, details; rv = tempCert->FormatUIStrings(NS_ConvertUTF8toUTF16(nicknames->nicknames[certsToUse]), nickWithSerial, details); if (NS_SUCCEEDED(rv)) { certNicknameList[certsToUse] = ToNewUnicode(nickWithSerial); if (certNicknameList[certsToUse]) { certDetailsList[certsToUse] = ToNewUnicode(details); if (!certDetailsList[certsToUse]) { nsMemory::Free(certNicknameList[certsToUse]); continue; } ++certsToUse; } } } } if (certsToUse == 0) { aResult.Append(internalError); return NS_OK; } NS_ConvertUTF8toUTF16 utf16Host(host); CERTCertificate *signingCert = nsnull; PRBool tryAgain, canceled; nsAutoString password; do { // Throw up the form signing confirmation dialog and get back the index // of the selected cert. PRInt32 selectedIndex = -1; rv = proxied_fsd->ConfirmSignText(uiContext, utf16Host, aStringToSign, NS_CONST_CAST(const PRUnichar**, certNicknameList.get()), NS_CONST_CAST(const PRUnichar**, certDetailsList), certsToUse, &selectedIndex, password, &canceled); if (NS_FAILED(rv) || canceled) { break; // out of tryAgain loop } PRInt32 j = 0; for (node = CERT_LIST_HEAD(certList); !CERT_LIST_END(node, certList); node = CERT_LIST_NEXT(node)) { if (j == selectedIndex) { signingCert = CERT_DupCertificate(node->cert); break; // out of cert list iteration loop } ++j; } if (!signingCert) { rv = NS_ERROR_FAILURE; break; // out of tryAgain loop } NS_ConvertUTF16toUTF8 pwUtf8(password); tryAgain = PK11_CheckUserPassword(signingCert->slot, NS_CONST_CAST(char *, pwUtf8.get())) != SECSuccess; // XXX we should show an error dialog before retrying } while (tryAgain); PRInt32 k; for (k = 0; k < certsToUse; ++k) { nsMemory::Free(certNicknameList[k]); nsMemory::Free(certDetailsList[k]); } if (NS_FAILED(rv)) { // something went wrong inside the tryAgain loop aResult.Append(internalError); return NS_OK; } if (canceled) { aResult.Append(NS_LITERAL_STRING("error:userCancel")); return NS_OK; } SECKEYPrivateKey* privKey = PK11_FindKeyByAnyCert(signingCert, uiContext); if (!privKey) { aResult.Append(internalError); return NS_OK; } nsCAutoString charset(document->GetDocumentCharacterSet()); // XXX Doing what nsFormSubmission::GetEncoder does (see // http://bugzilla.mozilla.org/show_bug.cgi?id=81203). if (charset.Equals(NS_LITERAL_CSTRING("ISO-8859-1"))) { charset.Assign(NS_LITERAL_CSTRING("windows-1252")); } nsCOMPtr<nsISaveAsCharset> encoder = do_CreateInstance(NS_SAVEASCHARSET_CONTRACTID); if (encoder) { rv = encoder->Init(charset.get(), (nsISaveAsCharset::attr_EntityAfterCharsetConv + nsISaveAsCharset::attr_FallbackDecimalNCR), 0); } nsXPIDLCString buffer; if (aStringToSign.Length() > 0) { if (encoder && NS_SUCCEEDED(rv)) { rv = encoder->Convert(PromiseFlatString(aStringToSign).get(), getter_Copies(buffer)); if (NS_FAILED(rv)) { aResult.Append(internalError); return NS_OK; } } else { AppendUTF16toUTF8(aStringToSign, buffer); } } HASHContext *hc = HASH_Create(HASH_AlgSHA1); if (!hc) { aResult.Append(internalError); return NS_OK; } unsigned char hash[SHA1_LENGTH]; SECItem digest; digest.data = hash; HASH_Begin(hc); HASH_Update(hc, NS_REINTERPRET_CAST(const unsigned char*, buffer.get()), buffer.Length()); HASH_End(hc, digest.data, &digest.len, SHA1_LENGTH); HASH_Destroy(hc); nsCString p7; SECStatus srv = SECFailure; SEC_PKCS7ContentInfo *ci = SEC_PKCS7CreateSignedData(signingCert, certUsageEmailSigner, nsnull, SEC_OID_SHA1, &digest, nsnull, uiContext); if (ci) { srv = SEC_PKCS7IncludeCertChain(ci, nsnull); if (srv == SECSuccess) { srv = SEC_PKCS7AddSigningTime(ci); if (srv == SECSuccess) { srv = SEC_PKCS7Encode(ci, signTextOutputCallback, &p7, nsnull, nsnull, uiContext); } } SEC_PKCS7DestroyContentInfo(ci); } if (srv != SECSuccess) { aResult.Append(internalError); return NS_OK; } SECItem binary_item; binary_item.data = NS_REINTERPRET_CAST(unsigned char*, NS_CONST_CAST(char*, p7.get())); binary_item.len = p7.Length(); char *result = NSSBase64_EncodeItem(nsnull, nsnull, 0, &binary_item); if (result) { AppendASCIItoUTF16(result, aResult); } else { aResult.Append(internalError); } PORT_Free(result); return NS_OK; }
101
61,530
0
SECStatus CVE_2006_5462_PATCHED_RSA_CheckSign(NSSLOWKEYPublicKey *key, unsigned char * sign, unsigned int sign_len, unsigned char * hash, unsigned int hash_len) { SECStatus rv; unsigned int modulus_len = nsslowkey_PublicModulusLen(key); unsigned int i; unsigned char * buffer; modulus_len = nsslowkey_PublicModulusLen(key); if (sign_len != modulus_len) goto failure; if (hash_len > modulus_len - 8) goto failure; PORT_Assert(key->keyType == NSSLOWKEYRSAKey); if (key->keyType != NSSLOWKEYRSAKey) goto failure; buffer = (unsigned char *)PORT_Alloc(modulus_len + 1); if (!buffer) goto failure; rv = RSA_PublicKeyOp(&key->u.rsa, buffer, sign); if (rv != SECSuccess) goto loser; /* * check the padding that was used */ if (buffer[0] != 0 || buffer[1] != 1) goto loser; for (i = 2; i < modulus_len - hash_len - 1; i++) { if (buffer[i] != 0xff) goto loser; } if (buffer[i] != 0) goto loser; /* * make sure we get the same results */ if (PORT_Memcmp(buffer + modulus_len - hash_len, hash, hash_len) != 0) goto loser; PORT_Free(buffer); return SECSuccess; loser: PORT_Free(buffer); failure: return SECFailure; }
102
179,598
0
static void CVE_2006_6106_PATCHED_cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *skb) { struct capi_ctr *ctrl = &session->ctrl; struct cmtp_application *application; __u16 appl, msgnum, func, info; __u32 controller; BT_DBG("session %p skb %p len %d", session, skb, skb->len); switch (CAPIMSG_SUBCOMMAND(skb->data)) { case CAPI_CONF: if (skb->len < CAPI_MSG_BASELEN + 10) break; func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 5); info = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 8); switch (func) { case CAPI_FUNCTION_REGISTER: msgnum = CAPIMSG_MSGID(skb->data); application = cmtp_application_get(session, CMTP_MSGNUM, msgnum); if (application) { application->state = BT_CONNECTED; application->msgnum = 0; application->mapping = CAPIMSG_APPID(skb->data); wake_up_interruptible(&session->wait); } break; case CAPI_FUNCTION_RELEASE: appl = CAPIMSG_APPID(skb->data); application = cmtp_application_get(session, CMTP_MAPPING, appl); if (application) { application->state = BT_CLOSED; application->msgnum = 0; wake_up_interruptible(&session->wait); } break; case CAPI_FUNCTION_GET_PROFILE: if (skb->len < CAPI_MSG_BASELEN + 11 + sizeof(capi_profile)) break; controller = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 11); msgnum = CAPIMSG_MSGID(skb->data); if (!info && (msgnum == CMTP_INITIAL_MSGNUM)) { session->ncontroller = controller; wake_up_interruptible(&session->wait); break; } if (!info && ctrl) { memcpy(&ctrl->profile, skb->data + CAPI_MSG_BASELEN + 11, sizeof(capi_profile)); session->state = BT_CONNECTED; capi_ctr_ready(ctrl); } break; case CAPI_FUNCTION_GET_MANUFACTURER: if (skb->len < CAPI_MSG_BASELEN + 15) break; controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 10); if (!info && ctrl) { int len = min_t(uint, CAPI_MANUFACTURER_LEN, skb->data[CAPI_MSG_BASELEN + 14]); memset(ctrl->manu, 0, CAPI_MANUFACTURER_LEN); strncpy(ctrl->manu, skb->data + CAPI_MSG_BASELEN + 15, len); } break; case CAPI_FUNCTION_GET_VERSION: if (skb->len < CAPI_MSG_BASELEN + 32) break; controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 12); if (!info && ctrl) { ctrl->version.majorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 16); ctrl->version.minorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 20); ctrl->version.majormanuversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 24); ctrl->version.minormanuversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 28); } break; case CAPI_FUNCTION_GET_SERIAL_NUMBER: if (skb->len < CAPI_MSG_BASELEN + 17) break; controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 12); if (!info && ctrl) { int len = min_t(uint, CAPI_SERIAL_LEN, skb->data[CAPI_MSG_BASELEN + 16]); memset(ctrl->serial, 0, CAPI_SERIAL_LEN); strncpy(ctrl->serial, skb->data + CAPI_MSG_BASELEN + 17, len); } break; } break; case CAPI_IND: if (skb->len < CAPI_MSG_BASELEN + 6) break; func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 3); if (func == CAPI_FUNCTION_LOOPBACK) { int len = min_t(uint, skb->len - CAPI_MSG_BASELEN - 6, skb->data[CAPI_MSG_BASELEN + 5]); appl = CAPIMSG_APPID(skb->data); msgnum = CAPIMSG_MSGID(skb->data); cmtp_send_interopmsg(session, CAPI_RESP, appl, msgnum, func, skb->data + CAPI_MSG_BASELEN + 6, len); } break; } kfree_skb(skb); }
103
98,708
0
static int CVE_2007_1000_PATCHED_ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt, char __user *optval, int len) { struct ipv6_opt_hdr *hdr; if (!opt || !opt->hopopt) return 0; hdr = opt->hopopt; len = min_t(int, len, ipv6_optlen(hdr)); if (copy_to_user(optval, hdr, len)) return -EFAULT; return len; }
104
145,106
0
static int CVE_2007_6151_PATCHED_isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) { uint minor = iminor(inode); isdn_ctrl c; int drvidx; int chidx; int ret; int i; char __user *p; char *s; union iocpar { char name[10]; char bname[22]; isdn_ioctl_struct iocts; isdn_net_ioctl_phone phone; isdn_net_ioctl_cfg cfg; } iocpar; void __user *argp = (void __user *)arg; #define name iocpar.name #define bname iocpar.bname #define iocts iocpar.iocts #define phone iocpar.phone #define cfg iocpar.cfg if (minor == ISDN_MINOR_STATUS) { switch (cmd) { case IIOCGETDVR: return (TTY_DV + (NET_DV << 8) + (INF_DV << 16)); case IIOCGETCPS: if (arg) { ulong __user *p = argp; int i; if (!access_ok(VERIFY_WRITE, p, sizeof(ulong) * ISDN_MAX_CHANNELS * 2)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { put_user(dev->ibytes[i], p++); put_user(dev->obytes[i], p++); } return 0; } else return -EINVAL; break; #ifdef CONFIG_NETDEVICES case IIOCNETGPN: /* Get peer phone number of a connected * isdn network interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; return isdn_net_getpeer(&phone, argp); } else return -EINVAL; #endif default: return -EINVAL; } } if (!dev->drivers) return -ENODEV; if (minor <= ISDN_MINOR_BMAX) { drvidx = isdn_minor2drv(minor); if (drvidx < 0) return -ENODEV; chidx = isdn_minor2chan(minor); if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING)) return -ENODEV; return 0; } if (minor <= ISDN_MINOR_CTRLMAX) { /* * isdn net devices manage lots of configuration variables as linked lists. * Those lists must only be manipulated from user space. Some of the ioctl's * service routines access user space and are not atomic. Therefor, ioctl's * manipulating the lists and ioctl's sleeping while accessing the lists * are serialized by means of a semaphore. */ switch (cmd) { case IIOCNETDWRSET: printk(KERN_INFO "INFO: ISDN_DW_ABC_EXTENSION not enabled\n"); return(-EINVAL); case IIOCNETLCR: printk(KERN_INFO "INFO: ISDN_ABC_LCR_SUPPORT not enabled\n"); return -ENODEV; #ifdef CONFIG_NETDEVICES case IIOCNETAIF: /* Add a network-interface */ if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; s = name; } else { s = NULL; } ret = down_interruptible(&dev->sem); if( ret ) return ret; if ((s = isdn_net_new(s, NULL))) { if (copy_to_user(argp, s, strlen(s) + 1)){ ret = -EFAULT; } else { ret = 0; } } else ret = -ENODEV; up(&dev->sem); return ret; case IIOCNETASL: /* Add a slave to a network-interface */ if (arg) { if (copy_from_user(bname, argp, sizeof(bname) - 1)) return -EFAULT; } else return -EINVAL; ret = down_interruptible(&dev->sem); if( ret ) return ret; if ((s = isdn_net_newslave(bname))) { if (copy_to_user(argp, s, strlen(s) + 1)){ ret = -EFAULT; } else { ret = 0; } } else ret = -ENODEV; up(&dev->sem); return ret; case IIOCNETDIF: /* Delete a network-interface */ if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; ret = down_interruptible(&dev->sem); if( ret ) return ret; ret = isdn_net_rm(name); up(&dev->sem); return ret; } else return -EINVAL; case IIOCNETSCF: /* Set configurable parameters of a network-interface */ if (arg) { if (copy_from_user(&cfg, argp, sizeof(cfg))) return -EFAULT; return isdn_net_setcfg(&cfg); } else return -EINVAL; case IIOCNETGCF: /* Get configurable parameters of a network-interface */ if (arg) { if (copy_from_user(&cfg, argp, sizeof(cfg))) return -EFAULT; if (!(ret = isdn_net_getcfg(&cfg))) { if (copy_to_user(argp, &cfg, sizeof(cfg))) return -EFAULT; } return ret; } else return -EINVAL; case IIOCNETANM: /* Add a phone-number to a network-interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = down_interruptible(&dev->sem); if( ret ) return ret; ret = isdn_net_addphone(&phone); up(&dev->sem); return ret; } else return -EINVAL; case IIOCNETGNM: /* Get list of phone-numbers of a network-interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = down_interruptible(&dev->sem); if( ret ) return ret; ret = isdn_net_getphones(&phone, argp); up(&dev->sem); return ret; } else return -EINVAL; case IIOCNETDNM: /* Delete a phone-number of a network-interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = down_interruptible(&dev->sem); if( ret ) return ret; ret = isdn_net_delphone(&phone); up(&dev->sem); return ret; } else return -EINVAL; case IIOCNETDIL: /* Force dialing of a network-interface */ if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_net_force_dial(name); } else return -EINVAL; #ifdef CONFIG_ISDN_PPP case IIOCNETALN: if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_ppp_dial_slave(name); case IIOCNETDLN: if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_ppp_hangup_slave(name); #endif case IIOCNETHUP: /* Force hangup of a network-interface */ if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_net_force_hangup(name); break; #endif /* CONFIG_NETDEVICES */ case IIOCSETVER: dev->net_verbose = arg; printk(KERN_INFO "isdn: Verbose-Level is %d\n", dev->net_verbose); return 0; case IIOCSETGST: if (arg) dev->global_flags |= ISDN_GLOBAL_STOPPED; else dev->global_flags &= ~ISDN_GLOBAL_STOPPED; printk(KERN_INFO "isdn: Global Mode %s\n", (dev->global_flags & ISDN_GLOBAL_STOPPED) ? "stopped" : "running"); return 0; case IIOCSETBRJ: drvidx = -1; if (arg) { int i; char *p; if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; iocts.drvid[sizeof(iocts.drvid)-1] = 0; if (strlen(iocts.drvid)) { if ((p = strchr(iocts.drvid, ','))) *p = 0; drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } } if (drvidx == -1) return -ENODEV; if (iocts.arg) dev->drv[drvidx]->flags |= DRV_FLAG_REJBUS; else dev->drv[drvidx]->flags &= ~DRV_FLAG_REJBUS; return 0; case IIOCSIGPRF: dev->profd = current; return 0; break; case IIOCGETPRF: /* Get all Modem-Profiles */ if (arg) { char __user *p = argp; int i; if (!access_ok(VERIFY_WRITE, argp, (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (copy_to_user(p, dev->mdm.info[i].emu.profile, ISDN_MODEM_NUMREG)) return -EFAULT; p += ISDN_MODEM_NUMREG; if (copy_to_user(p, dev->mdm.info[i].emu.pmsn, ISDN_MSNLEN)) return -EFAULT; p += ISDN_MSNLEN; if (copy_to_user(p, dev->mdm.info[i].emu.plmsn, ISDN_LMSNLEN)) return -EFAULT; p += ISDN_LMSNLEN; } return (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS; } else return -EINVAL; break; case IIOCSETPRF: /* Set all Modem-Profiles */ if (arg) { char __user *p = argp; int i; if (!access_ok(VERIFY_READ, argp, (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (copy_from_user(dev->mdm.info[i].emu.profile, p, ISDN_MODEM_NUMREG)) return -EFAULT; p += ISDN_MODEM_NUMREG; if (copy_from_user(dev->mdm.info[i].emu.plmsn, p, ISDN_LMSNLEN)) return -EFAULT; p += ISDN_LMSNLEN; if (copy_from_user(dev->mdm.info[i].emu.pmsn, p, ISDN_MSNLEN)) return -EFAULT; p += ISDN_MSNLEN; } return 0; } else return -EINVAL; break; case IIOCSETMAP: case IIOCGETMAP: /* Set/Get MSN->EAZ-Mapping for a driver */ if (arg) { if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; iocts.drvid[sizeof(iocts.drvid)-1] = 0; if (strlen(iocts.drvid)) { drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } else drvidx = 0; if (drvidx == -1) return -ENODEV; if (cmd == IIOCSETMAP) { int loop = 1; p = (char __user *) iocts.arg; i = 0; while (loop) { int j = 0; while (1) { if (!access_ok(VERIFY_READ, p, 1)) return -EFAULT; get_user(bname[j], p++); switch (bname[j]) { case '\0': loop = 0; /* Fall through */ case ',': bname[j] = '\0'; strcpy(dev->drv[drvidx]->msn2eaz[i], bname); j = ISDN_MSNLEN; break; default: j++; } if (j >= ISDN_MSNLEN) break; } if (++i > 9) break; } } else { p = (char __user *) iocts.arg; for (i = 0; i < 10; i++) { snprintf(bname, sizeof(bname), "%s%s", strlen(dev->drv[drvidx]->msn2eaz[i]) ? dev->drv[drvidx]->msn2eaz[i] : "_", (i < 9) ? "," : "\0"); if (copy_to_user(p, bname, strlen(bname) + 1)) return -EFAULT; p += strlen(bname); } } return 0; } else return -EINVAL; case IIOCDBGVAR: if (arg) { if (copy_to_user(argp, &dev, sizeof(ulong))) return -EFAULT; return 0; } else return -EINVAL; break; default: if ((cmd & IIOCDRVCTL) == IIOCDRVCTL) cmd = ((cmd >> _IOC_NRSHIFT) & _IOC_NRMASK) & ISDN_DRVIOCTL_MASK; else return -EINVAL; if (arg) { int i; char *p; if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; iocts.drvid[sizeof(iocts.drvid)-1] = 0; if (strlen(iocts.drvid)) { if ((p = strchr(iocts.drvid, ','))) *p = 0; drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } else drvidx = 0; if (drvidx == -1) return -ENODEV; if (!access_ok(VERIFY_WRITE, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; c.driver = drvidx; c.command = ISDN_CMD_IOCTL; c.arg = cmd; memcpy(c.parm.num, &iocts.arg, sizeof(ulong)); ret = isdn_command(&c); memcpy(&iocts.arg, c.parm.num, sizeof(ulong)); if (copy_to_user(argp, &iocts, sizeof(isdn_ioctl_struct))) return -EFAULT; return ret; } else return -EINVAL; } } #ifdef CONFIG_ISDN_PPP if (minor <= ISDN_MINOR_PPPMAX) return (isdn_ppp_ioctl(minor - ISDN_MINOR_PPP, file, cmd, arg)); #endif return -ENODEV; #undef name #undef bname #undef iocts #undef phone #undef cfg }
105
19,217
0
void CVE_2008_5713_PATCHED___qdisc_run(struct net_device *dev) { unsigned long start_time = jiffies; while (qdisc_restart(dev)) { if (netif_queue_stopped(dev)) break; /* * Postpone processing if * 1. another process needs the CPU; * 2. we've been doing it for too long. */ if (need_resched() || jiffies != start_time) { netif_schedule(dev); break; } } clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); }
106
131,840
0
static struct task_struct *CVE_2009_0028_PATCHED_copy_process(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *child_tidptr, struct pid *pid, int trace) { int retval; struct task_struct *p; int cgroup_callbacks_done = 0; if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. */ if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) return ERR_PTR(-EINVAL); /* * Shared signal handlers imply shared VM. By way of the above, * thread groups also imply shared VM. Blocking this case allows * for various simplifications in other code. */ if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) return ERR_PTR(-EINVAL); retval = security_task_create(clone_flags); if (retval) goto fork_out; retval = -ENOMEM; p = dup_task_struct(current); if (!p) goto fork_out; rt_mutex_init_task(p); #ifdef CONFIG_PROVE_LOCKING DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif retval = -EAGAIN; if (atomic_read(&p->user->processes) >= p->signal->rlim[RLIMIT_NPROC].rlim_cur) { if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && p->user != current->nsproxy->user_ns->root_user) goto bad_fork_free; } atomic_inc(&p->user->__count); atomic_inc(&p->user->processes); get_group_info(p->group_info); /* * If multiple threads are within CVE_2009_0028_PATCHED_copy_process(), then this check * triggers too late. This doesn't hurt, the check is only there * to stop root fork bombs. */ if (nr_threads >= max_threads) goto bad_fork_cleanup_count; if (!try_module_get(task_thread_info(p)->exec_domain->module)) goto bad_fork_cleanup_count; if (p->binfmt && !try_module_get(p->binfmt->module)) goto bad_fork_cleanup_put_domain; p->did_exec = 0; delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ copy_flags(clone_flags, p); INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); #ifdef CONFIG_PREEMPT_RCU p->rcu_read_lock_nesting = 0; p->rcu_flipctr_idx = 0; #endif /* #ifdef CONFIG_PREEMPT_RCU */ p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); clear_tsk_thread_flag(p, TIF_SIGPENDING); init_sigpending(&p->pending); p->utime = cputime_zero; p->stime = cputime_zero; p->gtime = cputime_zero; p->utimescaled = cputime_zero; p->stimescaled = cputime_zero; p->prev_utime = cputime_zero; p->prev_stime = cputime_zero; p->default_timer_slack_ns = current->timer_slack_ns; #ifdef CONFIG_DETECT_SOFTLOCKUP p->last_switch_count = 0; p->last_switch_timestamp = 0; #endif task_io_accounting_init(&p->ioac); acct_clear_integrals(p); posix_cpu_timers_init(p); p->lock_depth = -1; /* -1 = no lock */ do_posix_clock_monotonic_gettime(&p->start_time); p->real_start_time = p->start_time; monotonic_to_bootbased(&p->real_start_time); #ifdef CONFIG_SECURITY p->security = NULL; #endif p->cap_bset = current->cap_bset; p->io_context = NULL; p->audit_context = NULL; cgroup_fork(p); #ifdef CONFIG_NUMA p->mempolicy = mpol_dup(p->mempolicy); if (IS_ERR(p->mempolicy)) { retval = PTR_ERR(p->mempolicy); p->mempolicy = NULL; goto bad_fork_cleanup_cgroup; } mpol_fix_fork_child_flag(p); #endif #ifdef CONFIG_TRACE_IRQFLAGS p->irq_events = 0; #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW p->hardirqs_enabled = 1; #else p->hardirqs_enabled = 0; #endif p->hardirq_enable_ip = 0; p->hardirq_enable_event = 0; p->hardirq_disable_ip = _THIS_IP_; p->hardirq_disable_event = 0; p->softirqs_enabled = 1; p->softirq_enable_ip = _THIS_IP_; p->softirq_enable_event = 0; p->softirq_disable_ip = 0; p->softirq_disable_event = 0; p->hardirq_context = 0; p->softirq_context = 0; #endif #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; p->lockdep_recursion = 0; #endif #ifdef CONFIG_DEBUG_MUTEXES p->blocked_on = NULL; /* not blocked yet */ #endif /* Perform scheduler related setup. Assign this task to a CPU. */ sched_fork(p, clone_flags); if ((retval = security_task_alloc(p))) goto bad_fork_cleanup_policy; if ((retval = audit_alloc(p))) goto bad_fork_cleanup_security; /* copy all the process information */ if ((retval = copy_semundo(clone_flags, p))) goto bad_fork_cleanup_audit; if ((retval = copy_files(clone_flags, p))) goto bad_fork_cleanup_semundo; if ((retval = copy_fs(clone_flags, p))) goto bad_fork_cleanup_files; if ((retval = copy_sighand(clone_flags, p))) goto bad_fork_cleanup_fs; if ((retval = copy_signal(clone_flags, p))) goto bad_fork_cleanup_sighand; if ((retval = copy_mm(clone_flags, p))) goto bad_fork_cleanup_signal; if ((retval = copy_keys(clone_flags, p))) goto bad_fork_cleanup_mm; if ((retval = copy_namespaces(clone_flags, p))) goto bad_fork_cleanup_keys; if ((retval = copy_io(clone_flags, p))) goto bad_fork_cleanup_namespaces; retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); if (retval) goto bad_fork_cleanup_io; if (pid != &init_struct_pid) { retval = -ENOMEM; pid = alloc_pid(task_active_pid_ns(p)); if (!pid) goto bad_fork_cleanup_io; if (clone_flags & CLONE_NEWPID) { retval = pid_ns_prepare_proc(task_active_pid_ns(p)); if (retval < 0) goto bad_fork_free_pid; } } p->pid = pid_nr(pid); p->tgid = p->pid; if (clone_flags & CLONE_THREAD) p->tgid = current->tgid; if (current->nsproxy != p->nsproxy) { retval = ns_cgroup_clone(p, pid); if (retval) goto bad_fork_free_pid; } p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; /* * Clear TID on mm_release()? */ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; #ifdef CONFIG_FUTEX p->robust_list = NULL; #ifdef CONFIG_COMPAT p->compat_robust_list = NULL; #endif INIT_LIST_HEAD(&p->pi_state_list); p->pi_state_cache = NULL; #endif /* * sigaltstack should be cleared when sharing the same VM */ if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) p->sas_ss_sp = p->sas_ss_size = 0; /* * Syscall tracing should be turned off in the child regardless * of CLONE_PTRACE. */ clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); #ifdef TIF_SYSCALL_EMU clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); #endif clear_all_latency_tracing(p); /* Our parent execution domain becomes current domain These must match for thread signalling to apply */ p->parent_exec_id = p->self_exec_id; /* ok, now we should be set up.. */ p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CLONE_PARENT) ? current->group_leader->exit_signal : (clone_flags & CSIGNAL); p->pdeath_signal = 0; p->exit_state = 0; /* * Ok, make it visible to the rest of the system. * We dont wake it up yet. */ p->group_leader = p; INIT_LIST_HEAD(&p->thread_group); /* Now that the task is set up, run cgroup callbacks if * necessary. We need to run them before the task is visible * on the tasklist. */ cgroup_fork_callbacks(p); cgroup_callbacks_done = 1; /* Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* * The task hasn't been attached yet, so its cpus_allowed mask will * not be changed, nor will its assigned CPU. * * The cpus_allowed mask of the parent may have changed after it was * copied first time - so re-copy it here, then check the child's CPU * to ensure it is on a valid CPU (and if not, just force it back to * parent's CPU). This avoids alot of nasty races. */ p->cpus_allowed = current->cpus_allowed; p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed; if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || !cpu_online(task_cpu(p)))) set_task_cpu(p, smp_processor_id()); /* CLONE_PARENT re-uses the old parent */ if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) p->real_parent = current->real_parent; else p->real_parent = current; spin_lock(&current->sighand->siglock); /* * Process group and session signals need to be delivered to just the * parent before the fork or both the parent and the child after the * fork. Restart if a signal comes in before we add the new process to * it's process group. * A fatal signal pending means that current will exit, so the new * thread can't slip out of an OOM kill (or normal SIGKILL). */ recalc_sigpending(); if (signal_pending(current)) { spin_unlock(&current->sighand->siglock); write_unlock_irq(&tasklist_lock); retval = -ERESTARTNOINTR; goto bad_fork_free_pid; } if (clone_flags & CLONE_THREAD) { p->group_leader = current->group_leader; list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); } if (likely(p->pid)) { list_add_tail(&p->sibling, &p->real_parent->children); tracehook_finish_clone(p, clone_flags, trace); if (thread_group_leader(p)) { if (clone_flags & CLONE_NEWPID) p->nsproxy->pid_ns->child_reaper = p; p->signal->leader_pid = pid; tty_kref_put(p->signal->tty); p->signal->tty = tty_kref_get(current->signal->tty); set_task_pgrp(p, task_pgrp_nr(current)); set_task_session(p, task_session_nr(current)); attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); attach_pid(p, PIDTYPE_SID, task_session(current)); list_add_tail_rcu(&p->tasks, &init_task.tasks); __get_cpu_var(process_counts)++; } attach_pid(p, PIDTYPE_PID, pid); nr_threads++; } total_forks++; spin_unlock(&current->sighand->siglock); write_unlock_irq(&tasklist_lock); proc_fork_connector(p); cgroup_post_fork(p); return p; bad_fork_free_pid: if (pid != &init_struct_pid) free_pid(pid); bad_fork_cleanup_io: put_io_context(p->io_context); bad_fork_cleanup_namespaces: exit_task_namespaces(p); bad_fork_cleanup_keys: exit_keys(p); bad_fork_cleanup_mm: if (p->mm) mmput(p->mm); bad_fork_cleanup_signal: cleanup_signal(p); bad_fork_cleanup_sighand: __cleanup_sighand(p->sighand); bad_fork_cleanup_fs: exit_fs(p); /* blocking */ bad_fork_cleanup_files: exit_files(p); /* blocking */ bad_fork_cleanup_semundo: exit_sem(p); bad_fork_cleanup_audit: audit_free(p); bad_fork_cleanup_security: security_task_free(p); bad_fork_cleanup_policy: #ifdef CONFIG_NUMA mpol_put(p->mempolicy); bad_fork_cleanup_cgroup: #endif cgroup_exit(p, cgroup_callbacks_done); delayacct_tsk_free(p); if (p->binfmt) module_put(p->binfmt->module); bad_fork_cleanup_put_domain: module_put(task_thread_info(p)->exec_domain->module); bad_fork_cleanup_count: put_group_info(p->group_info); atomic_dec(&p->user->processes); free_uid(p->user); bad_fork_free: free_task(p); fork_out: return ERR_PTR(retval); }
107
183,848
0
struct nfs_server *CVE_2009_1336_PATCHED_nfs4_create_server(const struct nfs4_mount_data *data, const char *hostname, const struct sockaddr_in *addr, const char *mntpath, const char *ip_addr, rpc_authflavor_t authflavour, struct nfs_fh *mntfh) { struct nfs_fattr fattr; struct nfs_server *server; int error; dprintk("--> CVE_2009_1336_PATCHED_nfs4_create_server()\n"); server = nfs_alloc_server(); if (!server) return ERR_PTR(-ENOMEM); /* Get a client record */ error = nfs4_set_client(server, hostname, addr, ip_addr, authflavour, data->proto, data->timeo, data->retrans); if (error < 0) goto error; /* set up the general RPC client */ error = nfs4_init_server(server, data, authflavour); if (error < 0) goto error; if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) server->namelen = NFS4_MAXNAMLEN; BUG_ON(!server->nfs_client); BUG_ON(!server->nfs_client->rpc_ops); BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); /* Probe the root fh to retrieve its FSID */ error = nfs4_path_walk(server, mntfh, mntpath); if (error < 0) goto error; dprintk("Server FSID: %llx:%llx\n", (unsigned long long) server->fsid.major, (unsigned long long) server->fsid.minor); dprintk("Mount FH: %d\n", mntfh->size); error = nfs_probe_fsinfo(server, mntfh, &fattr); if (error < 0) goto error; BUG_ON(!server->nfs_client); BUG_ON(!server->nfs_client->rpc_ops); BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); spin_lock(&nfs_client_lock); list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks); list_add_tail(&server->master_link, &nfs_volume_list); spin_unlock(&nfs_client_lock); server->mount_time = jiffies; dprintk("<-- CVE_2009_1336_PATCHED_nfs4_create_server() = %p\n", server); return server; error: nfs_free_server(server); dprintk("<-- CVE_2009_1336_PATCHED_nfs4_create_server() = error %d\n", error); return ERR_PTR(error); }
108
95,501
0
static unsigned int CVE_2009_1897_PATCHED_tun_chr_poll(struct file *file, poll_table * wait) { struct tun_file *tfile = file->private_data; struct tun_struct *tun = __tun_get(tfile); struct sock *sk; unsigned int mask = 0; if (!tun) return POLLERR; sk = tun->sk; DBG(KERN_INFO "%s: CVE_2009_1897_PATCHED_tun_chr_poll\n", tun->dev->name); poll_wait(file, &tun->socket.wait, wait); if (!skb_queue_empty(&tun->readq)) mask |= POLLIN | POLLRDNORM; if (sock_writeable(sk) || (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) && sock_writeable(sk))) mask |= POLLOUT | POLLWRNORM; if (tun->dev->reg_state != NETREG_REGISTERED) mask = POLLERR; tun_put(tun); return mask; }
109
55,021
0
int CVE_2009_2287_PATCHED_kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { int mmu_reset_needed = 0; int i, pending_vec, max_bits; struct descriptor_table dt; vcpu_load(vcpu); dt.limit = sregs->idt.limit; dt.base = sregs->idt.base; kvm_x86_ops->set_idt(vcpu, &dt); dt.limit = sregs->gdt.limit; dt.base = sregs->gdt.base; kvm_x86_ops->set_gdt(vcpu, &dt); vcpu->arch.cr2 = sregs->cr2; mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3; down_read(&vcpu->kvm->slots_lock); if (gfn_to_memslot(vcpu->kvm, sregs->cr3 >> PAGE_SHIFT)) vcpu->arch.cr3 = sregs->cr3; else set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests); up_read(&vcpu->kvm->slots_lock); kvm_set_cr8(vcpu, sregs->cr8); mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer; kvm_x86_ops->set_efer(vcpu, sregs->efer); kvm_set_apic_base(vcpu, sregs->apic_base); kvm_x86_ops->decache_cr4_guest_bits(vcpu); mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0; kvm_x86_ops->set_cr0(vcpu, sregs->cr0); vcpu->arch.cr0 = sregs->cr0; mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4; kvm_x86_ops->set_cr4(vcpu, sregs->cr4); if (!is_long_mode(vcpu) && is_pae(vcpu)) load_pdptrs(vcpu, vcpu->arch.cr3); if (mmu_reset_needed) kvm_mmu_reset_context(vcpu); if (!irqchip_in_kernel(vcpu->kvm)) { memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap, sizeof vcpu->arch.irq_pending); vcpu->arch.irq_summary = 0; for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i) if (vcpu->arch.irq_pending[i]) __set_bit(i, &vcpu->arch.irq_summary); } else { max_bits = (sizeof sregs->interrupt_bitmap) << 3; pending_vec = find_first_bit( (const unsigned long *)sregs->interrupt_bitmap, max_bits); /* Only pending external irq is handled here */ if (pending_vec < max_bits) { kvm_x86_ops->set_irq(vcpu, pending_vec); pr_debug("Set back pending irq %d\n", pending_vec); } kvm_pic_clear_isr_ack(vcpu->kvm); } kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); /* Older userspace won't unhalt the vcpu on reset. */ if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 && sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && !(vcpu->arch.cr0 & X86_CR0_PE)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; vcpu_put(vcpu); return 0; }
110
90,832
0
int CVE_2009_2847_PATCHED_do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) { stack_t oss; int error; oss.ss_sp = (void __user *) current->sas_ss_sp; oss.ss_size = current->sas_ss_size; oss.ss_flags = sas_ss_flags(sp); if (uss) { void __user *ss_sp; size_t ss_size; int ss_flags; error = -EFAULT; if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) || __get_user(ss_sp, &uss->ss_sp) || __get_user(ss_flags, &uss->ss_flags) || __get_user(ss_size, &uss->ss_size)) goto out; error = -EPERM; if (on_sig_stack(sp)) goto out; error = -EINVAL; /* * * Note - this code used to test ss_flags incorrectly * old code may have been written using ss_flags==0 * to mean ss_flags==SS_ONSTACK (as this was the only * way that worked) - this fix preserves that older * mechanism */ if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) goto out; if (ss_flags == SS_DISABLE) { ss_size = 0; ss_sp = NULL; } else { error = -ENOMEM; if (ss_size < MINSIGSTKSZ) goto out; } current->sas_ss_sp = (unsigned long) ss_sp; current->sas_ss_size = ss_size; } error = 0; if (uoss) { error = -EFAULT; if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) goto out; error = __put_user(oss.ss_sp, &uoss->ss_sp) | __put_user(oss.ss_size, &uoss->ss_size) | __put_user(oss.ss_flags, &uoss->ss_flags); } out: return error; }
111
17,978
0
static int CVE_2009_3080_PATCHED_gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr) { gdth_evt_str *e; int eindex; ulong flags; TRACE2(("CVE_2009_3080_PATCHED_gdth_read_event() handle %d\n", handle)); spin_lock_irqsave(&ha->smp_lock, flags); if (handle == -1) eindex = eoldidx; else eindex = handle; estr->event_source = 0; if (eindex < 0 || eindex >= MAX_EVENTS) { spin_unlock_irqrestore(&ha->smp_lock, flags); return eindex; } e = &ebuffer[eindex]; if (e->event_source != 0) { if (eindex != elastidx) { if (++eindex == MAX_EVENTS) eindex = 0; } else { eindex = -1; } memcpy(estr, e, sizeof(gdth_evt_str)); } spin_unlock_irqrestore(&ha->smp_lock, flags); return eindex; }
112
175,094
0
* CVE_2009_4141_PATCHED_fasync_helper() is used by almost all character device drivers * to set up the fasync queue, and for regular files by the file * lease code. It returns negative on error, 0 if it did no changes * and positive if it added/deleted the entry. */ int CVE_2009_4141_PATCHED_fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) { if (!on) return fasync_remove_entry(filp, fapp); return fasync_add_entry(fd, filp, fapp); }
113
11,427
0
static int CVE_2010_0006_PATCHED_ipv6_hop_jumbo(struct sk_buff *skb, int optoff) { const unsigned char *nh = skb_network_header(skb); struct net *net = ipv6_skb_net(skb); u32 pkt_len; if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { LIMIT_NETDEBUG(KERN_DEBUG "CVE_2010_0006_PATCHED_ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", nh[optoff+1]); IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS); goto drop; } pkt_len = ntohl(*(__be32 *)(nh + optoff + 2)); if (pkt_len <= IPV6_MAXPLEN) { IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2); return 0; } if (ipv6_hdr(skb)->payload_len) { IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff); return 0; } if (pkt_len > skb->len - sizeof(struct ipv6hdr)) { IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) goto drop; return 1; drop: kfree_skb(skb); return 0; }
114
65,295
0
static int CVE_2010_0307_PATCHED_load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) { struct file *interpreter = NULL; /* to shut gcc up */ unsigned long load_addr = 0, load_bias = 0; int load_addr_set = 0; char * elf_interpreter = NULL; unsigned long error; struct elf_phdr *elf_ppnt, *elf_phdata; unsigned long elf_bss, elf_brk; int retval, i; unsigned int size; unsigned long elf_entry; unsigned long interp_load_addr = 0; unsigned long start_code, end_code, start_data, end_data; unsigned long reloc_func_desc = 0; int executable_stack = EXSTACK_DEFAULT; unsigned long def_flags = 0; struct { struct elfhdr elf_ex; struct elfhdr interp_elf_ex; } *loc; loc = kmalloc(sizeof(*loc), GFP_KERNEL); if (!loc) { retval = -ENOMEM; goto out_ret; } /* Get the exec-header */ loc->elf_ex = *((struct elfhdr *)bprm->buf); retval = -ENOEXEC; /* First of all, some simple consistency checks */ if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0) goto out; if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN) goto out; if (!elf_check_arch(&loc->elf_ex)) goto out; if (!bprm->file->f_op||!bprm->file->f_op->mmap) goto out; /* Now read in all of the header information */ if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr)) goto out; if (loc->elf_ex.e_phnum < 1 || loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr)) goto out; size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr); retval = -ENOMEM; elf_phdata = kmalloc(size, GFP_KERNEL); if (!elf_phdata) goto out; retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *)elf_phdata, size); if (retval != size) { if (retval >= 0) retval = -EIO; goto out_free_ph; } elf_ppnt = elf_phdata; elf_bss = 0; elf_brk = 0; start_code = ~0UL; end_code = 0; start_data = 0; end_data = 0; for (i = 0; i < loc->elf_ex.e_phnum; i++) { if (elf_ppnt->p_type == PT_INTERP) { /* This is the program interpreter used for * shared libraries - for now assume that this * is an a.out format binary */ retval = -ENOEXEC; if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2) goto out_free_ph; retval = -ENOMEM; elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL); if (!elf_interpreter) goto out_free_ph; retval = kernel_read(bprm->file, elf_ppnt->p_offset, elf_interpreter, elf_ppnt->p_filesz); if (retval != elf_ppnt->p_filesz) { if (retval >= 0) retval = -EIO; goto out_free_interp; } /* make sure path is NULL terminated */ retval = -ENOEXEC; if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') goto out_free_interp; interpreter = open_exec(elf_interpreter); retval = PTR_ERR(interpreter); if (IS_ERR(interpreter)) goto out_free_interp; /* * If the binary is not readable then enforce * mm->dumpable = 0 regardless of the interpreter's * permissions. */ if (file_permission(interpreter, MAY_READ) < 0) bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE); if (retval != BINPRM_BUF_SIZE) { if (retval >= 0) retval = -EIO; goto out_free_dentry; } /* Get the exec headers */ loc->interp_elf_ex = *((struct elfhdr *)bprm->buf); break; } elf_ppnt++; } elf_ppnt = elf_phdata; for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) if (elf_ppnt->p_type == PT_GNU_STACK) { if (elf_ppnt->p_flags & PF_X) executable_stack = EXSTACK_ENABLE_X; else executable_stack = EXSTACK_DISABLE_X; break; } /* Some simple consistency checks for the interpreter */ if (elf_interpreter) { retval = -ELIBBAD; /* Not an ELF interpreter */ if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0) goto out_free_dentry; /* Verify the interpreter has a valid arch */ if (!elf_check_arch(&loc->interp_elf_ex)) goto out_free_dentry; } /* Flush all traces of the currently running executable */ retval = flush_old_exec(bprm); if (retval) goto out_free_dentry; /* OK, This is the point of no return */ current->flags &= ~PF_FORKNOEXEC; current->mm->def_flags = def_flags; /* Do this immediately, since STACK_TOP as used in setup_arg_pages may depend on the personality. */ SET_PERSONALITY(loc->elf_ex); if (elf_read_implies_exec(loc->elf_ex, executable_stack)) current->personality |= READ_IMPLIES_EXEC; if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) current->flags |= PF_RANDOMIZE; setup_new_exec(bprm); /* Do this so that we can load the interpreter, if need be. We will change some of these later */ current->mm->free_area_cache = current->mm->mmap_base; current->mm->cached_hole_size = 0; retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), executable_stack); if (retval < 0) { send_sig(SIGKILL, current, 0); goto out_free_dentry; } current->mm->start_stack = bprm->p; /* Now we do a little grungy work by mmaping the ELF image into the correct location in memory. */ for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { int elf_prot = 0, elf_flags; unsigned long k, vaddr; if (elf_ppnt->p_type != PT_LOAD) continue; if (unlikely (elf_brk > elf_bss)) { unsigned long nbyte; /* There was a PT_LOAD segment with p_memsz > p_filesz before this one. Map anonymous pages, if needed, and clear the area. */ retval = set_brk (elf_bss + load_bias, elf_brk + load_bias); if (retval) { send_sig(SIGKILL, current, 0); goto out_free_dentry; } nbyte = ELF_PAGEOFFSET(elf_bss); if (nbyte) { nbyte = ELF_MIN_ALIGN - nbyte; if (nbyte > elf_brk - elf_bss) nbyte = elf_brk - elf_bss; if (clear_user((void __user *)elf_bss + load_bias, nbyte)) { /* * This bss-zeroing can fail if the ELF * file specifies odd protections. So * we don't check the return value */ } } } if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ; if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE; vaddr = elf_ppnt->p_vaddr; if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) { elf_flags |= MAP_FIXED; } else if (loc->elf_ex.e_type == ET_DYN) { /* Try and get dynamic programs out of the way of the * default mmap base, as well as whatever program they * might try to exec. This is because the brk will * follow the loader, and is not movable. */ #ifdef CONFIG_X86 load_bias = 0; #else load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); #endif } error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0); if (BAD_ADDR(error)) { send_sig(SIGKILL, current, 0); retval = IS_ERR((void *)error) ? PTR_ERR((void*)error) : -EINVAL; goto out_free_dentry; } if (!load_addr_set) { load_addr_set = 1; load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset); if (loc->elf_ex.e_type == ET_DYN) { load_bias += error - ELF_PAGESTART(load_bias + vaddr); load_addr += load_bias; reloc_func_desc = load_bias; } } k = elf_ppnt->p_vaddr; if (k < start_code) start_code = k; if (start_data < k) start_data = k; /* * Check to see if the section's size will overflow the * allowed task size. Note that p_filesz must always be * <= p_memsz so it is only necessary to check p_memsz. */ if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || elf_ppnt->p_memsz > TASK_SIZE || TASK_SIZE - elf_ppnt->p_memsz < k) { /* set_brk can never work. Avoid overflows. */ send_sig(SIGKILL, current, 0); retval = -EINVAL; goto out_free_dentry; } k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; if (k > elf_bss) elf_bss = k; if ((elf_ppnt->p_flags & PF_X) && end_code < k) end_code = k; if (end_data < k) end_data = k; k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; if (k > elf_brk) elf_brk = k; } loc->elf_ex.e_entry += load_bias; elf_bss += load_bias; elf_brk += load_bias; start_code += load_bias; end_code += load_bias; start_data += load_bias; end_data += load_bias; /* Calling set_brk effectively mmaps the pages that we need * for the bss and break sections. We must do this before * mapping in the interpreter, to make sure it doesn't wind * up getting placed where the bss needs to go. */ retval = set_brk(elf_bss, elf_brk); if (retval) { send_sig(SIGKILL, current, 0); goto out_free_dentry; } if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) { send_sig(SIGSEGV, current, 0); retval = -EFAULT; /* Nobody gets to see this, but.. */ goto out_free_dentry; } if (elf_interpreter) { unsigned long uninitialized_var(interp_map_addr); elf_entry = load_elf_interp(&loc->interp_elf_ex, interpreter, &interp_map_addr, load_bias); if (!IS_ERR((void *)elf_entry)) { /* * load_elf_interp() returns relocation * adjustment */ interp_load_addr = elf_entry; elf_entry += loc->interp_elf_ex.e_entry; } if (BAD_ADDR(elf_entry)) { force_sig(SIGSEGV, current); retval = IS_ERR((void *)elf_entry) ? (int)elf_entry : -EINVAL; goto out_free_dentry; } reloc_func_desc = interp_load_addr; allow_write_access(interpreter); fput(interpreter); kfree(elf_interpreter); } else { elf_entry = loc->elf_ex.e_entry; if (BAD_ADDR(elf_entry)) { force_sig(SIGSEGV, current); retval = -EINVAL; goto out_free_dentry; } } kfree(elf_phdata); set_binfmt(&elf_format); #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES retval = arch_setup_additional_pages(bprm, !!elf_interpreter); if (retval < 0) { send_sig(SIGKILL, current, 0); goto out; } #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ install_exec_creds(bprm); current->flags &= ~PF_FORKNOEXEC; retval = create_elf_tables(bprm, &loc->elf_ex, load_addr, interp_load_addr); if (retval < 0) { send_sig(SIGKILL, current, 0); goto out; } /* N.B. passed_fileno might not be initialized? */ current->mm->end_code = end_code; current->mm->start_code = start_code; current->mm->start_data = start_data; current->mm->end_data = end_data; current->mm->start_stack = bprm->p; #ifdef arch_randomize_brk if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) current->mm->brk = current->mm->start_brk = arch_randomize_brk(current->mm); #endif if (current->personality & MMAP_PAGE_ZERO) { /* Why this, you ask??? Well SVr4 maps page 0 as read-only, and some applications "depend" upon this behavior. Since we do not have the power to recompile these, we emulate the SVr4 behavior. Sigh. */ down_write(&current->mm->mmap_sem); error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, 0); up_write(&current->mm->mmap_sem); } #ifdef ELF_PLAT_INIT /* * The ABI may specify that certain registers be set up in special * ways (on i386 %edx is the address of a DT_FINI function, for * example. In addition, it may also specify (eg, PowerPC64 ELF) * that the e_entry field is the address of the function descriptor * for the startup routine, rather than the address of the startup * routine itself. This macro performs whatever initialization to * the regs structure is required as well as any relocations to the * function descriptor entries when executing dynamically links apps. */ ELF_PLAT_INIT(regs, reloc_func_desc); #endif start_thread(regs, elf_entry, bprm->p); retval = 0; out: kfree(loc); out_ret: return retval; /* error cleanup */ out_free_dentry: allow_write_access(interpreter); if (interpreter) fput(interpreter); out_free_interp: kfree(elf_interpreter); out_free_ph: kfree(elf_phdata); goto out; }
115
139,445
0
static int CVE_2010_1083_PATCHED_processcompl_compat(struct async *as, void __user * __user *arg) { struct urb *urb = as->urb; struct usbdevfs_urb32 __user *userurb = as->userurb; void __user *addr = as->userurb; unsigned int i; if (as->userbuffer) if (copy_to_user(as->userbuffer, urb->transfer_buffer, urb->transfer_buffer_length)) return -EFAULT; if (put_user(as->status, &userurb->status)) return -EFAULT; if (put_user(urb->actual_length, &userurb->actual_length)) return -EFAULT; if (put_user(urb->error_count, &userurb->error_count)) return -EFAULT; if (usb_endpoint_xfer_isoc(&urb->ep->desc)) { for (i = 0; i < urb->number_of_packets; i++) { if (put_user(urb->iso_frame_desc[i].actual_length, &userurb->iso_frame_desc[i].actual_length)) return -EFAULT; if (put_user(urb->iso_frame_desc[i].status, &userurb->iso_frame_desc[i].status)) return -EFAULT; } } if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) return -EFAULT; return 0; }
116
27,479
0
CVE_2010_2805_PATCHED_FT_Stream_EnterFrame( FT_Stream stream, FT_ULong count ) { FT_Error error = FT_Err_Ok; FT_ULong read_bytes; /* check for nested frame access */ FT_ASSERT( stream && stream->cursor == 0 ); if ( stream->read ) { /* allocate the frame in memory */ FT_Memory memory = stream->memory; /* simple sanity check */ if ( count > stream->size ) { FT_ERROR(( "CVE_2010_2805_PATCHED_FT_Stream_EnterFrame:" " frame size (%lu) larger than stream size (%lu)\n", count, stream->size )); error = FT_Err_Invalid_Stream_Operation; goto Exit; } #ifdef FT_DEBUG_MEMORY /* assume _ft_debug_file and _ft_debug_lineno are already set */ stream->base = (unsigned char*)ft_mem_qalloc( memory, count, &error ); if ( error ) goto Exit; #else if ( FT_QALLOC( stream->base, count ) ) goto Exit; #endif /* read it */ read_bytes = stream->read( stream, stream->pos, stream->base, count ); if ( read_bytes < count ) { FT_ERROR(( "CVE_2010_2805_PATCHED_FT_Stream_EnterFrame:" " invalid read; expected %lu bytes, got %lu\n", count, read_bytes )); FT_FREE( stream->base ); error = FT_Err_Invalid_Stream_Operation; } stream->cursor = stream->base; stream->limit = stream->cursor + count; stream->pos += read_bytes; } else { /* check current and new position */ if ( stream->pos >= stream->size || stream->size - stream->pos < count ) { FT_ERROR(( "CVE_2010_2805_PATCHED_FT_Stream_EnterFrame:" " invalid i/o; pos = 0x%lx, count = %lu, size = 0x%lx\n", stream->pos, count, stream->size )); error = FT_Err_Invalid_Stream_Operation; goto Exit; } /* set cursor */ stream->cursor = stream->base + stream->pos; stream->limit = stream->cursor + count; stream->pos += count; } Exit: return error; }
117
117,440
0
static int CVE_2010_2962_PATCHED_i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv) { struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); drm_i915_private_t *dev_priv = dev->dev_private; ssize_t remain; loff_t offset, page_base; char __user *user_data; int page_offset, page_length; int ret; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; mutex_lock(&dev->struct_mutex); ret = i915_gem_object_pin(obj, 0); if (ret) { mutex_unlock(&dev->struct_mutex); return ret; } ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret) goto fail; obj_priv = to_intel_bo(obj); offset = obj_priv->gtt_offset + args->offset; while (remain > 0) { /* Operation in this page * * page_base = page offset within aperture * page_offset = offset within page * page_length = bytes to copy for this page */ page_base = (offset & ~(PAGE_SIZE-1)); page_offset = offset & (PAGE_SIZE-1); page_length = remain; if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base, page_offset, user_data, page_length); /* If we get a fault while copying data, then (presumably) our * source page isn't available. Return the error and we'll * retry in the slow path. */ if (ret) goto fail; remain -= page_length; user_data += page_length; offset += page_length; } fail: i915_gem_object_unpin(obj); mutex_unlock(&dev->struct_mutex); return ret; }
118
161,491
0
int CVE_2010_3015_PATCHED_ext4_ext_get_blocks(handle_t *handle, struct inode *inode, ext4_lblk_t iblock, unsigned int max_blocks, struct buffer_head *bh_result, int flags) { struct ext4_ext_path *path = NULL; struct ext4_extent_header *eh; struct ext4_extent newex, *ex; ext4_fsblk_t newblock; int err = 0, depth, ret, cache_type; unsigned int allocated = 0; struct ext4_allocation_request ar; ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; __clear_bit(BH_New, &bh_result->b_state); ext_debug("blocks %u/%u requested for inode %lu\n", iblock, max_blocks, inode->i_ino); /* check in cache */ cache_type = ext4_ext_in_cache(inode, iblock, &newex); if (cache_type) { if (cache_type == EXT4_EXT_CACHE_GAP) { if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { /* * block isn't allocated yet and * user doesn't want to allocate it */ goto out2; } /* we should allocate requested block */ } else if (cache_type == EXT4_EXT_CACHE_EXTENT) { /* block is already allocated */ newblock = iblock - le32_to_cpu(newex.ee_block) + ext_pblock(&newex); /* number of remaining blocks in the extent */ allocated = ext4_ext_get_actual_len(&newex) - (iblock - le32_to_cpu(newex.ee_block)); goto out; } else { BUG(); } } /* find extent for this block */ path = ext4_ext_find_extent(inode, iblock, NULL); if (IS_ERR(path)) { err = PTR_ERR(path); path = NULL; goto out2; } depth = ext_depth(inode); /* * consistent leaf must not be empty; * this situation is possible, though, _during_ tree modification; * this is why assert can't be put in ext4_ext_find_extent() */ if (path[depth].p_ext == NULL && depth != 0) { ext4_error(inode->i_sb, __func__, "bad extent address " "inode: %lu, iblock: %d, depth: %d", inode->i_ino, iblock, depth); err = -EIO; goto out2; } eh = path[depth].p_hdr; ex = path[depth].p_ext; if (ex) { ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); ext4_fsblk_t ee_start = ext_pblock(ex); unsigned short ee_len; /* * Uninitialized extents are treated as holes, except that * we split out initialized portions during a write. */ ee_len = ext4_ext_get_actual_len(ex); /* if found extent covers block, simply return it */ if (in_range(iblock, ee_block, ee_len)) { newblock = iblock - ee_block + ee_start; /* number of remaining blocks in the extent */ allocated = ee_len - (iblock - ee_block); ext_debug("%u fit into %u:%d -> %llu\n", iblock, ee_block, ee_len, newblock); /* Do not put uninitialized extent in the cache */ if (!ext4_ext_is_uninitialized(ex)) { ext4_ext_put_in_cache(inode, ee_block, ee_len, ee_start, EXT4_EXT_CACHE_EXTENT); goto out; } ret = ext4_ext_handle_uninitialized_extents(handle, inode, iblock, max_blocks, path, flags, allocated, bh_result, newblock); return ret; } } /* * requested block isn't allocated yet; * we couldn't try to create block if create flag is zero */ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { /* * put just found gap into cache to speed up * subsequent requests */ ext4_ext_put_gap_in_cache(inode, path, iblock); goto out2; } /* * Okay, we need to do block allocation. */ /* find neighbour allocated blocks */ ar.lleft = iblock; err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); if (err) goto out2; ar.lright = iblock; err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright); if (err) goto out2; /* * See if request is beyond maximum number of blocks we can have in * a single extent. For an initialized extent this limit is * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is * EXT_UNINIT_MAX_LEN. */ if (max_blocks > EXT_INIT_MAX_LEN && !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) max_blocks = EXT_INIT_MAX_LEN; else if (max_blocks > EXT_UNINIT_MAX_LEN && (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) max_blocks = EXT_UNINIT_MAX_LEN; /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */ newex.ee_block = cpu_to_le32(iblock); newex.ee_len = cpu_to_le16(max_blocks); err = ext4_ext_check_overlap(inode, &newex, path); if (err) allocated = ext4_ext_get_actual_len(&newex); else allocated = max_blocks; /* allocate new block */ ar.inode = inode; ar.goal = ext4_ext_find_goal(inode, path, iblock); ar.logical = iblock; ar.len = allocated; if (S_ISREG(inode->i_mode)) ar.flags = EXT4_MB_HINT_DATA; else /* disable in-core preallocation for non-regular files */ ar.flags = 0; newblock = ext4_mb_new_blocks(handle, &ar, &err); if (!newblock) goto out2; ext_debug("allocate new block: goal %llu, found %llu/%u\n", ar.goal, newblock, allocated); /* try to insert new extent into found leaf and return */ ext4_ext_store_pblock(&newex, newblock); newex.ee_len = cpu_to_le16(ar.len); /* Mark uninitialized */ if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ ext4_ext_mark_uninitialized(&newex); /* * io_end structure was created for every async * direct IO write to the middle of the file. * To avoid unecessary convertion for every aio dio rewrite * to the mid of file, here we flag the IO that is really * need the convertion. * For non asycn direct IO case, flag the inode state * that we need to perform convertion when IO is done. */ if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) { if (io) io->flag = DIO_AIO_UNWRITTEN; else EXT4_I(inode)->i_state |= EXT4_STATE_DIO_UNWRITTEN;; } } err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); if (err) { /* free data blocks we just allocated */ /* not a good idea to call discard here directly, * but otherwise we'd need to call it every free() */ ext4_discard_preallocations(inode); ext4_free_blocks(handle, inode, 0, ext_pblock(&newex), ext4_ext_get_actual_len(&newex), 0); goto out2; } /* previous routine could use block we allocated */ newblock = ext_pblock(&newex); allocated = ext4_ext_get_actual_len(&newex); if (allocated > max_blocks) allocated = max_blocks; set_buffer_new(bh_result); /* * Update reserved blocks/metadata blocks after successful * block allocation which had been deferred till now. */ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) ext4_da_update_reserve_space(inode, allocated, 1); /* * Cache the extent and update transaction to commit on fdatasync only * when it is _not_ an uninitialized extent. */ if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { ext4_ext_put_in_cache(inode, iblock, allocated, newblock, EXT4_EXT_CACHE_EXTENT); ext4_update_inode_fsync_trans(handle, inode, 1); } else ext4_update_inode_fsync_trans(handle, inode, 0); out: if (allocated > max_blocks) allocated = max_blocks; ext4_ext_show_leaf(inode, path); set_buffer_mapped(bh_result); bh_result->b_bdev = inode->i_sb->s_bdev; bh_result->b_blocknr = newblock; out2: if (path) { ext4_ext_drop_refs(path); kfree(path); } return err ? err : allocated; }
119
180,915
0
static int CVE_2010_3876_PATCHED_packet_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct net_device *dev; struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); if (peer) return -EOPNOTSUPP; sll->sll_family = AF_PACKET; sll->sll_ifindex = po->ifindex; sll->sll_protocol = po->num; sll->sll_pkttype = 0; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); if (dev) { sll->sll_hatype = dev->type; sll->sll_halen = dev->addr_len; memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); } else { sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ sll->sll_halen = 0; } rcu_read_unlock(); *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; return 0; }
120
120,384
0
/***************************************************************************** * CVE_2010_3907_PATCHED_Close *****************************************************************************/ static void CVE_2010_3907_PATCHED_Close( vlc_object_t *p_this ) { demux_t *p_demux = (demux_t*)p_this; demux_sys_t *p_sys = p_demux->p_sys; for( int i = 0; i < p_sys->i_track; i++ ) { real_track_t *tk = p_sys->track[i]; es_format_Clean( &tk->fmt ); if( tk->p_frame ) block_Release( tk->p_frame ); for( int j = 0; j < tk->i_subpackets; j++ ) { if( tk->p_subpackets[ j ] ) block_Release( tk->p_subpackets[ j ] ); } free( tk->p_subpackets ); free( tk->p_subpackets_timecode ); if( tk->p_sipr_packet ) block_Release( tk->p_sipr_packet ); free( tk ); } if( p_sys->i_track > 0 ) free( p_sys->track ); free( p_sys->psz_title ); free( p_sys->psz_artist ); free( p_sys->psz_copyright ); free( p_sys->psz_description ); free( p_sys->p_index ); free( p_sys ); }
121
168,086
0
long CVE_2010_4256_PATCHED_pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { struct pipe_inode_info *pipe; long ret; pipe = get_pipe_info(file); if (!pipe) return -EBADF; mutex_lock(&pipe->inode->i_mutex); switch (cmd) { case F_SETPIPE_SZ: { unsigned int size, nr_pages; size = round_pipe_size(arg); nr_pages = size >> PAGE_SHIFT; ret = -EINVAL; if (!nr_pages) goto out; if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) { ret = -EPERM; goto out; } ret = pipe_set_size(pipe, nr_pages); break; } case F_GETPIPE_SZ: ret = pipe->buffers * PAGE_SIZE; break; default: ret = -EINVAL; break; } out: mutex_unlock(&pipe->inode->i_mutex); return ret; }
122
152,976
0
int __init CVE_2010_4347_PATCHED_acpi_debugfs_init(void) { struct dentry *acpi_dir, *cm_dentry; acpi_dir = debugfs_create_dir("acpi", NULL); if (!acpi_dir) goto err; cm_dentry = debugfs_create_file("custom_method", S_IWUSR, acpi_dir, NULL, &cm_fops); if (!cm_dentry) goto err; return 0; err: if (acpi_dir) debugfs_remove(acpi_dir); return -EINVAL; }
123
40,966
0
int CVE_2011_0014_PATCHED_ssl_parse_clienthello_tlsext(SSL *s, unsigned char **p, unsigned char *d, int n, int *al) { unsigned short type; unsigned short size; unsigned short len; unsigned char *data = *p; int renegotiate_seen = 0; s->servername_done = 0; s->tlsext_status_type = -1; if (data >= (d+n-2)) goto ri_check; n2s(data,len); if (data > (d+n-len)) goto ri_check; while (data <= (d+n-4)) { n2s(data,type); n2s(data,size); if (data+size > (d+n)) goto ri_check; #if 0 fprintf(stderr,"Received extension type %d size %d\n",type,size); #endif if (s->tlsext_debug_cb) s->tlsext_debug_cb(s, 0, type, data, size, s->tlsext_debug_arg); /* The servername extension is treated as follows: - Only the hostname type is supported with a maximum length of 255. - The servername is rejected if too long or if it contains zeros, in which case an fatal alert is generated. - The servername field is maintained together with the session cache. - When a session is resumed, the servername call back invoked in order to allow the application to position itself to the right context. - The servername is acknowledged if it is new for a session or when it is identical to a previously used for the same session. Applications can control the behaviour. They can at any time set a 'desirable' servername for a new SSL object. This can be the case for example with HTTPS when a Host: header field is received and a renegotiation is requested. In this case, a possible servername presented in the new client hello is only acknowledged if it matches the value of the Host: field. - Applications must use SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION if they provide for changing an explicit servername context for the session, i.e. when the session has been established with a servername extension. - On session reconnect, the servername extension may be absent. */ if (type == TLSEXT_TYPE_server_name) { unsigned char *sdata; int servname_type; int dsize; if (size < 2) { *al = SSL_AD_DECODE_ERROR; return 0; } n2s(data,dsize); size -= 2; if (dsize > size ) { *al = SSL_AD_DECODE_ERROR; return 0; } sdata = data; while (dsize > 3) { servname_type = *(sdata++); n2s(sdata,len); dsize -= 3; if (len > dsize) { *al = SSL_AD_DECODE_ERROR; return 0; } if (s->servername_done == 0) switch (servname_type) { case TLSEXT_NAMETYPE_host_name: if (s->session->tlsext_hostname == NULL) { if (len > TLSEXT_MAXLEN_host_name || ((s->session->tlsext_hostname = OPENSSL_malloc(len+1)) == NULL)) { *al = TLS1_AD_UNRECOGNIZED_NAME; return 0; } memcpy(s->session->tlsext_hostname, sdata, len); s->session->tlsext_hostname[len]='\0'; if (strlen(s->session->tlsext_hostname) != len) { OPENSSL_free(s->session->tlsext_hostname); s->session->tlsext_hostname = NULL; *al = TLS1_AD_UNRECOGNIZED_NAME; return 0; } s->servername_done = 1; } else s->servername_done = strlen(s->session->tlsext_hostname) == len && strncmp(s->session->tlsext_hostname, (char *)sdata, len) == 0; break; default: break; } dsize -= len; } if (dsize != 0) { *al = SSL_AD_DECODE_ERROR; return 0; } } #ifndef OPENSSL_NO_EC else if (type == TLSEXT_TYPE_ec_point_formats && s->version != DTLS1_VERSION) { unsigned char *sdata = data; int ecpointformatlist_length = *(sdata++); if (ecpointformatlist_length != size - 1) { *al = TLS1_AD_DECODE_ERROR; return 0; } s->session->tlsext_ecpointformatlist_length = 0; if (s->session->tlsext_ecpointformatlist != NULL) OPENSSL_free(s->session->tlsext_ecpointformatlist); if ((s->session->tlsext_ecpointformatlist = OPENSSL_malloc(ecpointformatlist_length)) == NULL) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } s->session->tlsext_ecpointformatlist_length = ecpointformatlist_length; memcpy(s->session->tlsext_ecpointformatlist, sdata, ecpointformatlist_length); #if 0 fprintf(stderr,"CVE_2011_0014_PATCHED_ssl_parse_clienthello_tlsext s->session->tlsext_ecpointformatlist (length=%i) ", s->session->tlsext_ecpointformatlist_length); sdata = s->session->tlsext_ecpointformatlist; for (i = 0; i < s->session->tlsext_ecpointformatlist_length; i++) fprintf(stderr,"%i ",*(sdata++)); fprintf(stderr,"\n"); #endif } else if (type == TLSEXT_TYPE_elliptic_curves && s->version != DTLS1_VERSION) { unsigned char *sdata = data; int ellipticcurvelist_length = (*(sdata++) << 8); ellipticcurvelist_length += (*(sdata++)); if (ellipticcurvelist_length != size - 2) { *al = TLS1_AD_DECODE_ERROR; return 0; } s->session->tlsext_ellipticcurvelist_length = 0; if (s->session->tlsext_ellipticcurvelist != NULL) OPENSSL_free(s->session->tlsext_ellipticcurvelist); if ((s->session->tlsext_ellipticcurvelist = OPENSSL_malloc(ellipticcurvelist_length)) == NULL) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } s->session->tlsext_ellipticcurvelist_length = ellipticcurvelist_length; memcpy(s->session->tlsext_ellipticcurvelist, sdata, ellipticcurvelist_length); #if 0 fprintf(stderr,"CVE_2011_0014_PATCHED_ssl_parse_clienthello_tlsext s->session->tlsext_ellipticcurvelist (length=%i) ", s->session->tlsext_ellipticcurvelist_length); sdata = s->session->tlsext_ellipticcurvelist; for (i = 0; i < s->session->tlsext_ellipticcurvelist_length; i++) fprintf(stderr,"%i ",*(sdata++)); fprintf(stderr,"\n"); #endif } #endif /* OPENSSL_NO_EC */ #ifdef TLSEXT_TYPE_opaque_prf_input else if (type == TLSEXT_TYPE_opaque_prf_input && s->version != DTLS1_VERSION) { unsigned char *sdata = data; if (size < 2) { *al = SSL_AD_DECODE_ERROR; return 0; } n2s(sdata, s->s3->client_opaque_prf_input_len); if (s->s3->client_opaque_prf_input_len != size - 2) { *al = SSL_AD_DECODE_ERROR; return 0; } if (s->s3->client_opaque_prf_input != NULL) /* shouldn't really happen */ OPENSSL_free(s->s3->client_opaque_prf_input); if (s->s3->client_opaque_prf_input_len == 0) s->s3->client_opaque_prf_input = OPENSSL_malloc(1); /* dummy byte just to get non-NULL */ else s->s3->client_opaque_prf_input = BUF_memdup(sdata, s->s3->client_opaque_prf_input_len); if (s->s3->client_opaque_prf_input == NULL) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } } #endif else if (type == TLSEXT_TYPE_session_ticket) { if (s->tls_session_ticket_ext_cb && !s->tls_session_ticket_ext_cb(s, data, size, s->tls_session_ticket_ext_cb_arg)) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } } else if (type == TLSEXT_TYPE_renegotiate) { if(!ssl_parse_clienthello_renegotiate_ext(s, data, size, al)) return 0; renegotiate_seen = 1; } else if (type == TLSEXT_TYPE_status_request && s->version != DTLS1_VERSION && s->ctx->tlsext_status_cb) { if (size < 5) { *al = SSL_AD_DECODE_ERROR; return 0; } s->tlsext_status_type = *data++; size--; if (s->tlsext_status_type == TLSEXT_STATUSTYPE_ocsp) { const unsigned char *sdata; int dsize; /* Read in responder_id_list */ n2s(data,dsize); size -= 2; if (dsize > size ) { *al = SSL_AD_DECODE_ERROR; return 0; } while (dsize > 0) { OCSP_RESPID *id; int idsize; if (dsize < 4) { *al = SSL_AD_DECODE_ERROR; return 0; } n2s(data, idsize); dsize -= 2 + idsize; size -= 2 + idsize; if (dsize < 0) { *al = SSL_AD_DECODE_ERROR; return 0; } sdata = data; data += idsize; id = d2i_OCSP_RESPID(NULL, &sdata, idsize); if (!id) { *al = SSL_AD_DECODE_ERROR; return 0; } if (data != sdata) { OCSP_RESPID_free(id); *al = SSL_AD_DECODE_ERROR; return 0; } if (!s->tlsext_ocsp_ids && !(s->tlsext_ocsp_ids = sk_OCSP_RESPID_new_null())) { OCSP_RESPID_free(id); *al = SSL_AD_INTERNAL_ERROR; return 0; } if (!sk_OCSP_RESPID_push( s->tlsext_ocsp_ids, id)) { OCSP_RESPID_free(id); *al = SSL_AD_INTERNAL_ERROR; return 0; } } /* Read in request_extensions */ if (size < 2) { *al = SSL_AD_DECODE_ERROR; return 0; } n2s(data,dsize); size -= 2; if (dsize != size) { *al = SSL_AD_DECODE_ERROR; return 0; } sdata = data; if (dsize > 0) { s->tlsext_ocsp_exts = d2i_X509_EXTENSIONS(NULL, &sdata, dsize); if (!s->tlsext_ocsp_exts || (data + dsize != sdata)) { *al = SSL_AD_DECODE_ERROR; return 0; } } } /* We don't know what to do with any other type * so ignore it. */ else s->tlsext_status_type = -1; } /* session ticket processed earlier */ data+=size; } *p = data; ri_check: /* Need RI if renegotiating */ if (!renegotiate_seen && s->new_session && !(s->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION)) { *al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL_PARSE_CLIENTHELLO_TLSEXT, SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED); return 0; } return 1; }
124
7,613
0
nsresult CVE_2011_0059_PATCHED_nsPluginHost::NewPluginURLStream(const nsString& aURL, nsIPluginInstance *aInstance, nsIPluginStreamListener* aListener, const char *aPostData, PRBool aIsFile, PRUint32 aPostDataLen, const char *aHeadersData, PRUint32 aHeadersDataLen) { nsCOMPtr<nsIURI> url; nsAutoString absUrl; nsresult rv; if (aURL.Length() <= 0) return NS_OK; // get the full URL of the document that the plugin is embedded // in to create an absolute url in case aURL is relative nsCOMPtr<nsIDocument> doc; nsCOMPtr<nsIPluginInstanceOwner> owner; aInstance->GetOwner(getter_AddRefs(owner)); if (owner) { rv = owner->GetDocument(getter_AddRefs(doc)); if (NS_SUCCEEDED(rv) && doc) { // Create an absolute URL rv = NS_MakeAbsoluteURI(absUrl, aURL, doc->GetBaseURI()); } } if (absUrl.IsEmpty()) absUrl.Assign(aURL); rv = NS_NewURI(getter_AddRefs(url), absUrl); if (NS_SUCCEEDED(rv)) { nsCOMPtr<nsIPluginTagInfo> pti = do_QueryInterface(owner); nsCOMPtr<nsIDOMElement> element; if (pti) pti->GetDOMElement(getter_AddRefs(element)); PRInt16 shouldLoad = nsIContentPolicy::ACCEPT; rv = NS_CheckContentLoadPolicy(nsIContentPolicy::TYPE_OBJECT_SUBREQUEST, url, (doc ? doc->NodePrincipal() : nsnull), element, EmptyCString(), //mime guess nsnull, //extra &shouldLoad); if (NS_FAILED(rv)) return rv; if (NS_CP_REJECTED(shouldLoad)) { // Disallowed by content policy return NS_ERROR_CONTENT_BLOCKED; } nsRefPtr<nsPluginStreamListenerPeer> listenerPeer = new nsPluginStreamListenerPeer(); if (listenerPeer == NULL) return NS_ERROR_OUT_OF_MEMORY; rv = listenerPeer->Initialize(url, aInstance, aListener); if (NS_SUCCEEDED(rv)) { nsCOMPtr<nsIContentUtils> contentUtils = do_GetService("@mozilla.org/content/contentutils;1"); NS_ENSURE_TRUE(contentUtils, NS_ERROR_OUT_OF_MEMORY); nsCOMPtr<nsIInterfaceRequestor> sameOriginChecker = contentUtils->GetSameOriginChecker(); NS_ENSURE_TRUE(sameOriginChecker, NS_ERROR_OUT_OF_MEMORY); nsCOMPtr<nsIChannel> channel; rv = NS_NewChannel(getter_AddRefs(channel), url, nsnull, nsnull, /* do not add this internal plugin's channel on the load group otherwise this channel could be canceled form |nsDocShell::OnLinkClickSync| bug 166613 */ sameOriginChecker); if (NS_FAILED(rv)) return rv; if (doc) { // Set the owner of channel to the document principal... channel->SetOwner(doc->NodePrincipal()); // And if it's a script allow it to execute against the // document's script context. nsCOMPtr<nsIScriptChannel> scriptChannel(do_QueryInterface(channel)); if (scriptChannel) { scriptChannel->SetExecutionPolicy(nsIScriptChannel::EXECUTE_NORMAL); // Plug-ins seem to depend on javascript: URIs running synchronously scriptChannel->SetExecuteAsync(PR_FALSE); } } // deal with headers and post data nsCOMPtr<nsIHttpChannel> httpChannel(do_QueryInterface(channel)); if (httpChannel) { if (aPostData) { nsCOMPtr<nsIInputStream> postDataStream; rv = NS_NewPluginPostDataStream(getter_AddRefs(postDataStream), (const char*)aPostData, aPostDataLen, aIsFile); if (!postDataStream) { NS_RELEASE(aInstance); return NS_ERROR_UNEXPECTED; } // XXX it's a bit of a hack to rewind the postdata stream // here but it has to be done in case the post data is // being reused multiple times. nsCOMPtr<nsISeekableStream> postDataSeekable(do_QueryInterface(postDataStream)); if (postDataSeekable) postDataSeekable->Seek(nsISeekableStream::NS_SEEK_SET, 0); nsCOMPtr<nsIUploadChannel> uploadChannel(do_QueryInterface(httpChannel)); NS_ASSERTION(uploadChannel, "http must support nsIUploadChannel"); uploadChannel->SetUploadStream(postDataStream, EmptyCString(), -1); } if (aHeadersData) rv = AddHeadersToChannel(aHeadersData, aHeadersDataLen, httpChannel); } rv = channel->AsyncOpen(listenerPeer, nsnull); } } return rv; }
125
169,485
0
nsAccessControlLRUCache::CacheEntry* CVE_2011_0069_PATCHED_nsAccessControlLRUCache::GetEntry(nsIURI* aURI, nsIPrincipal* aPrincipal, PRBool aWithCredentials, PRBool aCreate) { nsCString key; if (!GetCacheKey(aURI, aPrincipal, aWithCredentials, key)) { NS_WARNING("Invalid cache key!"); return nsnull; } CacheEntry* entry; if (mTable.Get(key, &entry)) { // Entry already existed so just return it. Also update the LRU list. // Move to the head of the list. PR_REMOVE_LINK(entry); PR_INSERT_LINK(entry, &mList); return entry; } if (!aCreate) { return nsnull; } // This is a new entry, allocate and insert into the table now so that any // failures don't cause items to be removed from a full cache. entry = new CacheEntry(key); if (!entry) { NS_WARNING("Failed to allocate new cache entry!"); return nsnull; } NS_ASSERTION(mTable.Count() <= ACCESS_CONTROL_CACHE_SIZE, "Something is borked, too many entries in the cache!"); // Now enforce the max count. if (mTable.Count() == ACCESS_CONTROL_CACHE_SIZE) { // Try to kick out all the expired entries. PRTime now = PR_Now(); mTable.Enumerate(RemoveExpiredEntries, &now); // If that didn't remove anything then kick out the least recently used // entry. if (mTable.Count() == ACCESS_CONTROL_CACHE_SIZE) { CacheEntry* lruEntry = static_cast<CacheEntry*>(PR_LIST_TAIL(&mList)); PR_REMOVE_LINK(lruEntry); // This will delete 'lruEntry'. mTable.Remove(lruEntry->mKey); NS_ASSERTION(mTable.Count() == ACCESS_CONTROL_CACHE_SIZE - 1, "Somehow tried to remove an entry that was never added!"); } } if (!mTable.Put(key, entry)) { // Failed, clean up the new entry. delete entry; NS_WARNING("Failed to add entry to the access control cache!"); return nsnull; } PR_INSERT_LINK(entry, &mList); return entry; }
126
109,569
0
void CVE_2011_0073_PATCHED_Invalidate() { nsTArray<PRInt32> ranges; CollectRanges(this, ranges); InvalidateRanges(mSelection->mTree, ranges); }
127
20,660
0
static int CVE_2011_0521_PATCHED_dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; struct av7110 *av7110 = dvbdev->priv; unsigned long arg = (unsigned long) parg; dprintk(8, "av7110:%p\n",av7110); switch (cmd) { case CA_RESET: return ci_ll_reset(&av7110->ci_wbuffer, file, arg, &av7110->ci_slot[0]); break; case CA_GET_CAP: { ca_caps_t cap; cap.slot_num = 2; cap.slot_type = (FW_CI_LL_SUPPORT(av7110->arm_app) ? CA_CI_LINK : CA_CI) | CA_DESCR; cap.descr_num = 16; cap.descr_type = CA_ECD; memcpy(parg, &cap, sizeof(cap)); break; } case CA_GET_SLOT_INFO: { ca_slot_info_t *info=(ca_slot_info_t *)parg; if (info->num < 0 || info->num > 1) return -EINVAL; av7110->ci_slot[info->num].num = info->num; av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ? CA_CI_LINK : CA_CI; memcpy(info, &av7110->ci_slot[info->num], sizeof(ca_slot_info_t)); break; } case CA_GET_MSG: break; case CA_SEND_MSG: break; case CA_GET_DESCR_INFO: { ca_descr_info_t info; info.num = 16; info.type = CA_ECD; memcpy(parg, &info, sizeof (info)); break; } case CA_SET_DESCR: { ca_descr_t *descr = (ca_descr_t*) parg; if (descr->index >= 16) return -EINVAL; if (descr->parity > 1) return -EINVAL; av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, SetDescr, 5, (descr->index<<8)|descr->parity, (descr->cw[0]<<8)|descr->cw[1], (descr->cw[2]<<8)|descr->cw[3], (descr->cw[4]<<8)|descr->cw[5], (descr->cw[6]<<8)|descr->cw[7]); break; } default: return -EINVAL; } return 0; }
128
180,750
0
int CVE_2011_0710_PATCHED_proc_pid_status(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { struct mm_struct *mm = get_task_mm(task); task_name(m, task); task_state(m, ns, pid, task); if (mm) { task_mem(m, mm); mmput(mm); } task_sig(m, task); task_cap(m, task); task_cpus_allowed(m, task); cpuset_task_status_allowed(m, task); task_context_switch_counts(m, task); return 0; }
129
178,176
0
void CVE_2011_1019_PATCHED_dev_load(struct net *net, const char *name) { struct net_device *dev; int no_module; rcu_read_lock(); dev = dev_get_by_name_rcu(net, name); rcu_read_unlock(); no_module = !dev; if (no_module && capable(CAP_NET_ADMIN)) no_module = request_module("netdev-%s", name); if (no_module && capable(CAP_SYS_MODULE)) { if (!request_module("%s", name)) pr_err("Loading kernel module for a network device " "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " "instead\n", name); } }
130
64,602
0
int CVE_2011_1160_PATCHED_tpm_open(struct inode *inode, struct file *file) { int minor = iminor(inode); struct tpm_chip *chip = NULL, *pos; rcu_read_lock(); list_for_each_entry_rcu(pos, &tpm_chip_list, list) { if (pos->vendor.miscdev.minor == minor) { chip = pos; get_device(chip->dev); break; } } rcu_read_unlock(); if (!chip) return -ENODEV; if (test_and_set_bit(0, &chip->is_open)) { dev_dbg(chip->dev, "Another process owns this TPM\n"); put_device(chip->dev); return -EBUSY; } chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL); if (chip->data_buffer == NULL) { clear_bit(0, &chip->is_open); put_device(chip->dev); return -ENOMEM; } atomic_set(&chip->data_pending, 0); file->private_data = chip; return 0; }
131
139,028
0
struct agp_memory *CVE_2011_1747_PATCHED_agp_allocate_memory(struct agp_bridge_data *bridge, size_t page_count, u32 type) { int scratch_pages; struct agp_memory *new; size_t i; int cur_memory; if (!bridge) return NULL; cur_memory = atomic_read(&bridge->current_memory_agp); if ((cur_memory + page_count > bridge->max_memory_agp) || (cur_memory + page_count < page_count)) return NULL; if (type >= AGP_USER_TYPES) { new = agp_generic_alloc_user(page_count, type); if (new) new->bridge = bridge; return new; } if (type != 0) { new = bridge->driver->alloc_by_type(page_count, type); if (new) new->bridge = bridge; return new; } scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; new = agp_create_memory(scratch_pages); if (new == NULL) return NULL; if (bridge->driver->agp_alloc_pages) { if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { agp_free_memory(new); return NULL; } new->bridge = bridge; return new; } for (i = 0; i < page_count; i++) { struct page *page = bridge->driver->agp_alloc_page(bridge); if (page == NULL) { agp_free_memory(new); return NULL; } new->pages[i] = page; new->page_count++; } new->bridge = bridge; return new; }
132
113,319
0
static int CVE_2011_2529_PATCHED_sipsock_read(int *id, int fd, short events, void *ignore) { struct sip_request req; struct ast_sockaddr addr; int res; static char readbuf[65535]; memset(&req, 0, sizeof(req)); res = ast_recvfrom(fd, readbuf, sizeof(readbuf) - 1, 0, &addr); if (res < 0) { #if !defined(__FreeBSD__) if (errno == EAGAIN) ast_log(LOG_NOTICE, "SIP: Received packet with bad UDP checksum\n"); else #endif if (errno != ECONNREFUSED) ast_log(LOG_WARNING, "Recv error: %s\n", strerror(errno)); return 1; } readbuf[res] = '\0'; if (!(req.data = ast_str_create(SIP_MIN_PACKET))) { return 1; } if (ast_str_set(&req.data, 0, "%s", readbuf) == AST_DYNSTR_BUILD_FAILED) { return -1; } /* req.data will have the correct length in case of nulls */ req.len = ast_str_strlen(req.data); req.socket.fd = sipsock; set_socket_transport(&req.socket, SIP_TRANSPORT_UDP); req.socket.tcptls_session = NULL; req.socket.port = htons(ast_sockaddr_port(&bindaddr)); handle_request_do(&req, &addr); deinit_req(&req); return 1; }
133
112,086
0
*/ static enum check_auth_result CVE_2011_2536_PATCHED_register_verify(struct sip_pvt *p, struct ast_sockaddr *addr, struct sip_request *req, const char *uri) { enum check_auth_result res = AUTH_NOT_FOUND; struct sip_peer *peer; char tmp[256]; char *name = NULL, *c, *domain = NULL, *dummy = NULL; char *uri2 = ast_strdupa(uri); terminate_uri(uri2); ast_copy_string(tmp, get_header(req, "To"), sizeof(tmp)); c = get_in_brackets(tmp); c = remove_uri_parameters(c); if (parse_uri(c, "sip:,sips:", &name, &dummy, &domain, NULL)) { ast_log(LOG_NOTICE, "Invalid to address: '%s' from %s (missing sip:) trying to use anyway...\n", c, ast_sockaddr_stringify_addr(addr)); return -1; } SIP_PEDANTIC_DECODE(name); SIP_PEDANTIC_DECODE(domain); /*! \todo XXX here too we interpret a missing @domain as a name-only * URI, whereas the RFC says this is a domain-only uri. */ if (!ast_strlen_zero(domain) && !AST_LIST_EMPTY(&domain_list)) { if (!check_sip_domain(domain, NULL, 0)) { transmit_response(p, "404 Not found (unknown domain)", &p->initreq); return AUTH_UNKNOWN_DOMAIN; } } ast_string_field_set(p, exten, name); build_contact(p); if (req->ignore) { /* Expires is a special case, where we only want to load the peer if this isn't a deregistration attempt */ const char *expires = get_header(req, "Expires"); int expire = atoi(expires); if (ast_strlen_zero(expires)) { /* No expires header; look in Contact */ if ((expires = strcasestr(get_header(req, "Contact"), ";expires="))) { expire = atoi(expires + 9); } } if (!ast_strlen_zero(expires) && expire == 0) { transmit_response_with_date(p, "200 OK", req); return 0; } } peer = find_peer(name, NULL, TRUE, FINDPEERS, FALSE, 0); if (!(peer && ast_apply_ha(peer->ha, addr))) { /* Peer fails ACL check */ if (peer) { unref_peer(peer, "CVE_2011_2536_PATCHED_register_verify: unref_peer: from find_peer operation"); peer = NULL; res = AUTH_ACL_FAILED; } else { res = AUTH_NOT_FOUND; } } if (peer) { ao2_lock(peer); if (!peer->host_dynamic) { ast_log(LOG_ERROR, "Peer '%s' is trying to register, but not configured as host=dynamic\n", peer->name); res = AUTH_PEER_NOT_DYNAMIC; } else { ast_copy_flags(&p->flags[0], &peer->flags[0], SIP_NAT_FORCE_RPORT); if (ast_test_flag(&p->flags[1], SIP_PAGE2_REGISTERTRYING)) transmit_response(p, "100 Trying", req); if (!(res = check_auth(p, req, peer->name, peer->secret, peer->md5secret, SIP_REGISTER, uri2, XMIT_UNRELIABLE, req->ignore))) { if (sip_cancel_destroy(p)) ast_log(LOG_WARNING, "Unable to cancel SIP destruction. Expect bad things.\n"); if (check_request_transport(peer, req)) { ast_set_flag(&p->flags[0], SIP_PENDINGBYE); transmit_response_with_date(p, "403 Forbidden", req); res = AUTH_BAD_TRANSPORT; } else { /* We have a successful registration attempt with proper authentication, now, update the peer */ switch (parse_register_contact(p, peer, req)) { case PARSE_REGISTER_DENIED: ast_log(LOG_WARNING, "Registration denied because of contact ACL\n"); transmit_response_with_date(p, "603 Denied", req); peer->lastmsgssent = -1; res = 0; break; case PARSE_REGISTER_FAILED: ast_log(LOG_WARNING, "Failed to parse contact info\n"); transmit_response_with_date(p, "400 Bad Request", req); peer->lastmsgssent = -1; res = 0; break; case PARSE_REGISTER_QUERY: ast_string_field_set(p, fullcontact, peer->fullcontact); transmit_response_with_date(p, "200 OK", req); peer->lastmsgssent = -1; res = 0; break; case PARSE_REGISTER_UPDATE: ast_string_field_set(p, fullcontact, peer->fullcontact); update_peer(peer, p->expiry); /* Say OK and ask subsystem to retransmit msg counter */ transmit_response_with_date(p, "200 OK", req); if (!ast_test_flag((&peer->flags[1]), SIP_PAGE2_SUBSCRIBEMWIONLY)) peer->lastmsgssent = -1; res = 0; break; } } } } ao2_unlock(peer); } if (!peer && sip_cfg.autocreatepeer) { /* Create peer if we have autocreate mode enabled */ peer = temp_peer(name); if (peer) { ao2_t_link(peers, peer, "link peer into peer table"); if (!ast_sockaddr_isnull(&peer->addr)) { ao2_t_link(peers_by_ip, peer, "link peer into peers-by-ip table"); } ao2_lock(peer); if (sip_cancel_destroy(p)) ast_log(LOG_WARNING, "Unable to cancel SIP destruction. Expect bad things.\n"); switch (parse_register_contact(p, peer, req)) { case PARSE_REGISTER_DENIED: ast_log(LOG_WARNING, "Registration denied because of contact ACL\n"); transmit_response_with_date(p, "403 Forbidden (ACL)", req); peer->lastmsgssent = -1; res = 0; break; case PARSE_REGISTER_FAILED: ast_log(LOG_WARNING, "Failed to parse contact info\n"); transmit_response_with_date(p, "400 Bad Request", req); peer->lastmsgssent = -1; res = 0; break; case PARSE_REGISTER_QUERY: ast_string_field_set(p, fullcontact, peer->fullcontact); transmit_response_with_date(p, "200 OK", req); peer->lastmsgssent = -1; res = 0; break; case PARSE_REGISTER_UPDATE: ast_string_field_set(p, fullcontact, peer->fullcontact); /* Say OK and ask subsystem to retransmit msg counter */ transmit_response_with_date(p, "200 OK", req); manager_event(EVENT_FLAG_SYSTEM, "PeerStatus", "ChannelType: SIP\r\nPeer: SIP/%s\r\nPeerStatus: Registered\r\nAddress: %s\r\n", peer->name, ast_sockaddr_stringify(addr)); peer->lastmsgssent = -1; res = 0; break; } ao2_unlock(peer); } } if (!peer && sip_cfg.alwaysauthreject && ast_test_flag(&p->flags[1], SIP_PAGE2_REGISTERTRYING)) { /* If we found a peer, we transmit a 100 Trying. Therefore, if we're * trying to avoid leaking information, we MUST also transmit the same * response when we DON'T find a peer. */ transmit_response(p, "100 Trying", req); /* Insert a fake delay between the 100 and the subsequent failure. */ sched_yield(); } if (!res) { ast_devstate_changed(AST_DEVICE_UNKNOWN, "SIP/%s", peer->name); } if (res < 0) { switch (res) { case AUTH_SECRET_FAILED: /* Wrong password in authentication. Go away, don't try again until you fixed it */ transmit_response(p, "403 Forbidden (Bad auth)", &p->initreq); if (global_authfailureevents) { const char *peer_addr = ast_strdupa(ast_sockaddr_stringify_addr(addr)); const char *peer_port = ast_strdupa(ast_sockaddr_stringify_port(addr)); manager_event(EVENT_FLAG_SYSTEM, "PeerStatus", "ChannelType: SIP\r\n" "Peer: SIP/%s\r\n" "PeerStatus: Rejected\r\n" "Cause: AUTH_SECRET_FAILED\r\n" "Address: %s\r\n" "Port: %s\r\n", name, peer_addr, peer_port); } break; case AUTH_USERNAME_MISMATCH: /* Username and digest username does not match. Asterisk uses the From: username for authentication. We need the devices to use the same authentication user name until we support proper authentication by digest auth name */ case AUTH_NOT_FOUND: case AUTH_PEER_NOT_DYNAMIC: case AUTH_ACL_FAILED: if (sip_cfg.alwaysauthreject) { transmit_fake_auth_response(p, SIP_REGISTER, &p->initreq, XMIT_UNRELIABLE); if (global_authfailureevents) { const char *peer_addr = ast_strdupa(ast_sockaddr_stringify_addr(addr)); const char *peer_port = ast_strdupa(ast_sockaddr_stringify_port(addr)); manager_event(EVENT_FLAG_SYSTEM, "PeerStatus", "ChannelType: SIP\r\n" "Peer: SIP/%s\r\n" "PeerStatus: Rejected\r\n" "Cause: %s\r\n" "Address: %s\r\n" "Port: %s\r\n", name, res == AUTH_PEER_NOT_DYNAMIC ? "AUTH_PEER_NOT_DYNAMIC" : "URI_NOT_FOUND", peer_addr, peer_port); } } else { /* URI not found */ if (res == AUTH_PEER_NOT_DYNAMIC) { transmit_response(p, "403 Forbidden", &p->initreq); if (global_authfailureevents) { const char *peer_addr = ast_strdupa(ast_sockaddr_stringify_addr(addr)); const char *peer_port = ast_strdupa(ast_sockaddr_stringify_port(addr)); manager_event(EVENT_FLAG_SYSTEM, "PeerStatus", "ChannelType: SIP\r\n" "Peer: SIP/%s\r\n" "PeerStatus: Rejected\r\n" "Cause: AUTH_PEER_NOT_DYNAMIC\r\n" "Address: %s\r\n" "Port: %s\r\n", name, peer_addr, peer_port); } } else { transmit_response(p, "404 Not found", &p->initreq); if (global_authfailureevents) { const char *peer_addr = ast_strdupa(ast_sockaddr_stringify_addr(addr)); const char *peer_port = ast_strdupa(ast_sockaddr_stringify_port(addr)); manager_event(EVENT_FLAG_SYSTEM, "PeerStatus", "ChannelType: SIP\r\n" "Peer: SIP/%s\r\n" "PeerStatus: Rejected\r\n" "Cause: %s\r\n" "Address: %s\r\n" "Port: %s\r\n", name, (res == AUTH_USERNAME_MISMATCH) ? "AUTH_USERNAME_MISMATCH" : "URI_NOT_FOUND", peer_addr, peer_port); } } } break; case AUTH_BAD_TRANSPORT: default: break; } } if (peer) { unref_peer(peer, "CVE_2011_2536_PATCHED_register_verify: unref_peer: tossing stack peer pointer at end of func"); } return res; }
134
73,383
0
void CVE_2011_2605_PATCHED_nsCookieService::SetCookieStringInternal(nsIURI *aHostURI, bool aIsForeign, const nsCString &aCookieHeader, const nsCString &aServerTime, PRBool aFromHttp) { NS_ASSERTION(aHostURI, "null host!"); if (!mDBState) { NS_WARNING("No DBState! Profile already closed?"); return; } // get the base domain for the host URI. // e.g. for "www.bbc.co.uk", this would be "bbc.co.uk". // file:// URI's (i.e. with an empty host) are allowed, but any other // scheme must have a non-empty host. A trailing dot in the host // is acceptable. PRBool requireHostMatch; nsCAutoString baseDomain; nsresult rv = GetBaseDomain(aHostURI, baseDomain, requireHostMatch); if (NS_FAILED(rv)) { COOKIE_LOGFAILURE(SET_COOKIE, aHostURI, aCookieHeader, "couldn't get base domain from URI"); return; } // check default prefs CookieStatus cookieStatus = CheckPrefs(aHostURI, aIsForeign, baseDomain, requireHostMatch, aCookieHeader.get()); // fire a notification if cookie was rejected (but not if there was an error) switch (cookieStatus) { case STATUS_REJECTED: NotifyRejected(aHostURI); case STATUS_REJECTED_WITH_ERROR: return; default: break; } // parse server local time. this is not just done here for efficiency // reasons - if there's an error parsing it, and we need to default it // to the current time, we must do it here since the current time in // SetCookieInternal() will change for each cookie processed (e.g. if the // user is prompted). PRTime tempServerTime; PRInt64 serverTime; PRStatus result = PR_ParseTimeString(aServerTime.get(), PR_TRUE, &tempServerTime); if (result == PR_SUCCESS) { serverTime = tempServerTime / PR_USEC_PER_SEC; } else { serverTime = PR_Now() / PR_USEC_PER_SEC; } // process each cookie in the header nsDependentCString cookieHeader(aCookieHeader); while (SetCookieInternal(aHostURI, baseDomain, requireHostMatch, cookieStatus, cookieHeader, serverTime, aFromHttp)) { // document.cookie can only set one cookie at a time if (!aFromHttp) break; } }
135
2,202
0
static void * CVE_2011_2928_PATCHED_befs_follow_link(struct dentry *dentry, struct nameidata *nd) { befs_inode_info *befs_ino = BEFS_I(dentry->d_inode); char *link; if (befs_ino->i_flags & BEFS_LONG_SYMLINK) { struct super_block *sb = dentry->d_sb; befs_data_stream *data = &befs_ino->i_data.ds; befs_off_t len = data->size; if (len == 0) { befs_error(sb, "Long symlink with illegal length"); link = ERR_PTR(-EIO); } else { befs_debug(sb, "Follow long symlink"); link = kmalloc(len, GFP_NOFS); if (!link) { link = ERR_PTR(-ENOMEM); } else if (befs_read_lsymlink(sb, data, link, len) != len) { kfree(link); befs_error(sb, "Failed to read entire long symlink"); link = ERR_PTR(-EIO); } else { link[len - 1] = '\0'; } } } else { link = befs_ino->i_data.symlink; } nd_set_link(nd, link); return NULL; }
136
14,533
0
void CVE_2011_3000_PATCHED_nsHttpHeaderArray::ParseHeaderLine(const char *line, nsHttpAtom *hdr, char **val) { // // BNF from section 4.2 of RFC 2616: // // message-header = field-name ":" [ field-value ] // field-name = token // field-value = *( field-content | LWS ) // field-content = <the OCTETs making up the field-value // and consisting of either *TEXT or combinations // of token, separators, and quoted-string> // // We skip over mal-formed headers in the hope that we'll still be able to // do something useful with the response. char *p = (char *) strchr(line, ':'); if (!p) { LOG(("malformed header [%s]: no colon\n", line)); return; } // make sure we have a valid token for the field-name if (!nsHttp::IsValidToken(line, p)) { LOG(("malformed header [%s]: field-name not a token\n", line)); return; } *p = 0; // null terminate field-name nsHttpAtom atom = nsHttp::ResolveAtom(line); if (!atom) { LOG(("failed to resolve atom [%s]\n", line)); return; } // skip over whitespace p = net_FindCharNotInSet(++p, HTTP_LWS); // trim trailing whitespace - bug 86608 char *p2 = net_RFindCharNotInSet(p, HTTP_LWS); *++p2 = 0; // null terminate header value; if all chars starting at |p| // consisted of LWS, then p2 would have pointed at |p-1|, so // the prefix increment is always valid. // assign return values if (hdr) *hdr = atom; if (val) *val = p; // assign response header SetHeader(atom, nsDependentCString(p, p2 - p), PR_TRUE, PR_FALSE); }
137
45,276
0
PRBool CVE_2011_3003_PATCHED_ZeroDataIfElementArray() { if (mTarget == LOCAL_GL_ELEMENT_ARRAY_BUFFER) { mData = realloc(mData, mByteLength); if (!mData) { mByteLength = 0; return PR_FALSE; } memset(mData, 0, mByteLength); } return PR_TRUE; }
138
37,947
0
static int CVE_2011_3362_PATCHED_decode_residual_block(AVSContext *h, GetBitContext *gb, const struct dec_2dvlc *r, int esc_golomb_order, int qp, uint8_t *dst, int stride) { int i, esc_code, level, mask; unsigned int level_code, run; DCTELEM level_buf[65]; uint8_t run_buf[65]; DCTELEM *block = h->block; for(i=0;i<65;i++) { level_code = get_ue_code(gb,r->golomb_order); if(level_code >= ESCAPE_CODE) { run = ((level_code - ESCAPE_CODE) >> 1) + 1; esc_code = get_ue_code(gb,esc_golomb_order); level = esc_code + (run > r->max_run ? 1 : r->level_add[run]); while(level > r->inc_limit) r++; mask = -(level_code & 1); level = (level^mask) - mask; } else { level = r->rltab[level_code][0]; if(!level) //end of block signal break; run = r->rltab[level_code][1]; r += r->rltab[level_code][2]; } level_buf[i] = level; run_buf[i] = run; } if(dequant(h,level_buf, run_buf, block, ff_cavs_dequant_mul[qp], ff_cavs_dequant_shift[qp], i)) return -1; h->cdsp.cavs_idct8_add(dst,block,stride); h->s.dsp.clear_block(block); return 0; }
139
53,653
0
static void CVE_2011_3637_PATCHED_m_stop(struct seq_file *m, void *v) { struct proc_maps_private *priv = m->private; struct vm_area_struct *vma = v; if (!IS_ERR(vma)) vma_stop(priv, vma); if (priv->task) put_task_struct(priv->task); }
140
55,846
0
void CVE_2011_3658_PATCHED_nsSVGValue::NotifyObservers(SVGObserverNotifyFunction f, modificationType aModType) { // Since notification can cause untold changes to the observers list, copy it // first before iterating. nsAutoTArray<nsWeakPtr, 1> observersCopy(mObservers); PRInt32 count = observersCopy.Length(); for (PRInt32 i = count - 1; i >= 0; i--) { nsIWeakReference* wr = observersCopy.ElementAt(i); nsCOMPtr<nsISVGValueObserver> observer = do_QueryReferent(wr); if (observer) (static_cast<nsISVGValueObserver*>(observer)->*f)(this, aModType); } }
141
182,834
0
static inline void CVE_2011_4324_PATCHED_encode_openhdr(struct xdr_stream *xdr, const struct nfs_openargs *arg) { __be32 *p; /* * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4, * owner 4 = 32 */ RESERVE_SPACE(8); WRITE32(OP_OPEN); WRITE32(arg->seqid->sequence->counter); encode_share_access(xdr, arg->fmode); RESERVE_SPACE(28); WRITE64(arg->clientid); WRITE32(16); WRITEMEM("open id:", 8); WRITE64(arg->id); }
142
111,307
0
nsresult CVE_2012_0451_PATCHED_nsDocument::InitCSP() { if (CSPService::sCSPEnabled) { nsAutoString cspHeaderValue; nsAutoString cspROHeaderValue; this->GetHeaderData(nsGkAtoms::headerCSP, cspHeaderValue); this->GetHeaderData(nsGkAtoms::headerCSPReportOnly, cspROHeaderValue); bool system = false; nsIScriptSecurityManager *ssm = nsContentUtils::GetSecurityManager(); if (NS_SUCCEEDED(ssm->IsSystemPrincipal(NodePrincipal(), &system)) && system) { // only makes sense to register new CSP if this document is not priviliged return NS_OK; } if (cspHeaderValue.IsEmpty() && cspROHeaderValue.IsEmpty()) { // no CSP header present, stop processing return NS_OK; } #ifdef PR_LOGGING PR_LOG(gCspPRLog, PR_LOG_DEBUG, ("CSP header specified for document %p", this)); #endif nsresult rv; nsCOMPtr<nsIContentSecurityPolicy> mCSP; mCSP = do_CreateInstance("@mozilla.org/contentsecuritypolicy;1", &rv); if (NS_FAILED(rv)) { #ifdef PR_LOGGING PR_LOG(gCspPRLog, PR_LOG_DEBUG, ("Failed to create CSP object: %x", rv)); #endif return rv; } // Store the request context for violation reports nsCOMPtr<nsIHttpChannel> httpChannel = do_QueryInterface(mChannel); mCSP->ScanRequestData(httpChannel); // Start parsing the policy nsCOMPtr<nsIURI> chanURI; mChannel->GetURI(getter_AddRefs(chanURI)); #ifdef PR_LOGGING PR_LOG(gCspPRLog, PR_LOG_DEBUG, ("CSP Loaded")); #endif // ReportOnly mode is enabled *only* if there are no regular-strength CSP // headers present. If there are, then we ignore the ReportOnly mode and // toss a warning into the error console, proceeding with enforcing the // regular-strength CSP. if (cspHeaderValue.IsEmpty()) { mCSP->SetReportOnlyMode(true); // Need to tokenize the header value since multiple headers could be // concatenated into one comma-separated list of policies. // See RFC2616 section 4.2 (last paragraph) nsCharSeparatedTokenizer tokenizer(cspROHeaderValue, ','); while (tokenizer.hasMoreTokens()) { const nsSubstring& policy = tokenizer.nextToken(); mCSP->RefinePolicy(policy, chanURI); #ifdef PR_LOGGING { PR_LOG(gCspPRLog, PR_LOG_DEBUG, ("CSP (report only) refined with policy: \"%s\"", NS_ConvertUTF16toUTF8(policy).get())); } #endif } } else { //XXX(sstamm): maybe we should post a warning when both read only and regular // CSP headers are present. // Need to tokenize the header value since multiple headers could be // concatenated into one comma-separated list of policies. // See RFC2616 section 4.2 (last paragraph) nsCharSeparatedTokenizer tokenizer(cspHeaderValue, ','); while (tokenizer.hasMoreTokens()) { const nsSubstring& policy = tokenizer.nextToken(); mCSP->RefinePolicy(policy, chanURI); #ifdef PR_LOGGING { PR_LOG(gCspPRLog, PR_LOG_DEBUG, ("CSP refined with policy: \"%s\"", NS_ConvertUTF16toUTF8(policy).get())); } #endif } } // Check for frame-ancestor violation nsCOMPtr<nsIDocShell> docShell = do_QueryReferent(mDocumentContainer); if (docShell) { bool safeAncestry = false; // PermitsAncestry sends violation reports when necessary rv = mCSP->PermitsAncestry(docShell, &safeAncestry); NS_ENSURE_SUCCESS(rv, rv); if (!safeAncestry) { #ifdef PR_LOGGING PR_LOG(gCspPRLog, PR_LOG_DEBUG, ("CSP doesn't like frame's ancestry, not loading.")); #endif // stop! ERROR page! mChannel->Cancel(NS_ERROR_CSP_FRAME_ANCESTOR_VIOLATION); } } //Copy into principal nsIPrincipal* principal = GetPrincipal(); if (principal) { principal->SetCsp(mCSP); #ifdef PR_LOGGING PR_LOG(gCspPRLog, PR_LOG_DEBUG, ("Inserted CSP into principal %p", principal)); } else { PR_LOG(gCspPRLog, PR_LOG_DEBUG, ("Couldn't copy CSP into absent principal %p", principal)); #endif } } #ifdef PR_LOGGING else { //CSP was not enabled! PR_LOG(gCspPRLog, PR_LOG_DEBUG, ("CSP is disabled, skipping CSP init for document %p", this)); } #endif return NS_OK; }
143
106,665
0
NS_IMETHODIMP CVE_2012_0477_PATCHED_nsISO2022CNToUnicode::Convert(const char * aSrc, PRInt32 * aSrcLen, PRUnichar * aDest, PRInt32 * aDestLen) { const unsigned char * srcEnd = (unsigned char *)aSrc + *aSrcLen; const unsigned char * src = (unsigned char *) aSrc; PRUnichar* destEnd = aDest + *aDestLen; PRUnichar* dest = aDest; nsresult rv; PRInt32 aLen; while ((src < srcEnd)) { switch (mState) { case eState_ASCII: if(ESC == *src) { mState = eState_ESC; } else { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ASCII; } break; case eState_ESC: // ESC if('$' == *src) { mState = eState_ESC_24; } else { if (CHECK_OVERRUN(dest, destEnd, 2)) goto error1; *dest++ = (PRUnichar) ESC; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ASCII; } break; case eState_ESC_24: // ESC $ if(')' == *src) { mState = eState_ESC_24_29; } else if('*' == *src) { mState = eState_ESC_24_2A; } else if('+' == *src) { mState = eState_ESC_24_2B; } else { if (CHECK_OVERRUN(dest, destEnd, 3)) goto error1; *dest++ = (PRUnichar) ESC; *dest++ = (PRUnichar) '$'; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ASCII; } break; case eState_ESC_24_29: // ESC $ ) if('A' == *src) { mState = eState_ESC_24_29_A; } else if('G' == *src) { mState = eState_ESC_24_29_G; } else { if (CHECK_OVERRUN(dest, destEnd, 4)) goto error1; *dest++ = (PRUnichar) ESC; *dest++ = (PRUnichar) '$'; *dest++ = (PRUnichar) ')'; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ASCII; } break; case eState_ESC_24_29_A: // ESC $ ) A if(SO == *src) { mState = eState_GB2312_1980; mRunLength = 0; } else { if (CHECK_OVERRUN(dest, destEnd, 5)) goto error1; *dest++ = (PRUnichar) ESC; *dest++ = (PRUnichar) '$'; *dest++ = (PRUnichar) ')'; *dest++ = (PRUnichar) 'A'; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ASCII; } break; case eState_GB2312_1980: // ESC $ ) A SO if(SI == *src) { // Shift-In (SI) mState = eState_ESC_24_29_A_SO_SI; if (mRunLength == 0) { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = 0xFFFD; } mRunLength = 0; } else if(ESC == *src) { mState = eState_ESC; } else { if(0x20 < *src && *src < 0x7f) { mData = *src; mState = eState_GB2312_1980_2ndbyte; } else { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; } } break; case eState_GB2312_1980_2ndbyte: // ESC $ ) A SO if(0x20 < *src && *src < 0x7f) { unsigned char gb[2]; PRInt32 gbLen = 2; gb[0] = mData | 0x80; gb[1] = *src | 0x80; aLen = destEnd - dest; rv = GB2312_To_Unicode(gb, gbLen, dest, &aLen); ++mRunLength; if(rv == NS_OK_UDEC_MOREOUTPUT) { goto error1; } else if(NS_FAILED(rv)) { goto error2; } dest += aLen; } else { if (CHECK_OVERRUN(dest, destEnd, 2)) goto error1; *dest++ = (PRUnichar) mData; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; } mState = eState_GB2312_1980; break; case eState_ESC_24_29_A_SO_SI: // ESC $ ) A SO SI if(SO == *src) { mState = eState_GB2312_1980; mRunLength = 0; } else if(ESC == *src) { mState = eState_ESC; } else { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ESC_24_29_A_SO_SI; } break; case eState_ESC_24_29_G: // ESC $ ) G if(SO == *src) { mState = eState_CNS11643_1; mRunLength = 0; } else { if (CHECK_OVERRUN(dest, destEnd, 5)) goto error1; *dest++ = (PRUnichar) ESC; *dest++ = (PRUnichar) '$'; *dest++ = (PRUnichar) ')'; *dest++ = (PRUnichar) 'G'; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ASCII; } break; case eState_CNS11643_1: // ESC $ ) G SO if(SI == *src) { // Shift-In (SI) mState = eState_ESC_24_29_G_SO_SI; if (mRunLength == 0) { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = 0xFFFD; } mRunLength = 0; } else if(ESC == *src) { mState = eState_ESC; } else { if(0x20 < *src && *src < 0x7f) { mData = *src; mState = eState_CNS11643_1_2ndbyte; } else { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; } } break; case eState_CNS11643_1_2ndbyte: // ESC $ ) G SO if(0x20 < *src && *src < 0x7f) { unsigned char cns[4]; PRInt32 cnsLen = 2; cns[0] = mData | 0x80; cns[1] = *src | 0x80; aLen = destEnd - dest; rv = EUCTW_To_Unicode(cns, cnsLen, dest, &aLen); ++mRunLength; if(rv == NS_OK_UDEC_MOREOUTPUT) { goto error1; } else if(NS_FAILED(rv)) { goto error2; } dest += aLen; } else { if (CHECK_OVERRUN(dest, destEnd, 2)) goto error1; *dest++ = (PRUnichar) mData; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; } mState = eState_CNS11643_1; break; case eState_ESC_24_29_G_SO_SI: // ESC $ ) G SO SI if(SO == *src) { mState = eState_CNS11643_1; mRunLength = 0; } else if(ESC == *src) { mState = eState_ESC; } else { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ESC_24_29_G_SO_SI; } break; case eState_ESC_24_2A: // ESC $ * if('H' == *src) { mState = eState_ESC_24_2A_H; } else { if (CHECK_OVERRUN(dest, destEnd, 4)) goto error1; *dest++ = (PRUnichar) ESC; *dest++ = (PRUnichar) '$'; *dest++ = (PRUnichar) '*'; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ASCII; } break; case eState_ESC_24_2A_H: // ESC $ * H if(ESC == *src) { mState = eState_ESC_24_2A_H_ESC; } else { if (CHECK_OVERRUN(dest, destEnd, 5)) goto error1; *dest++ = (PRUnichar) ESC; *dest++ = (PRUnichar) '$'; *dest++ = (PRUnichar) '*'; *dest++ = (PRUnichar) 'H'; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ASCII; } break; case eState_ESC_24_2A_H_ESC: // ESC $ * H ESC if(SS2 == *src) { mState = eState_CNS11643_2; mRunLength = 0; } else if('$' == *src) { mState = eState_ESC_24; } else { if (CHECK_OVERRUN(dest, destEnd, 6)) goto error1; *dest++ = (PRUnichar) ESC; *dest++ = (PRUnichar) '$'; *dest++ = (PRUnichar) '*'; *dest++ = (PRUnichar) 'H'; *dest++ = (PRUnichar) ESC; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ASCII; } break; case eState_CNS11643_2: // ESC $ * H ESC SS2 if(SI == *src) { // Shift-In (SI) mState = eState_ESC_24_2A_H_ESC_SS2_SI; if (mRunLength == 0) { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = 0xFFFD; } mRunLength = 0; } else if(ESC == *src) { mState = eState_ESC_24_2A_H_ESC; } else { if(0x20 < *src && *src < 0x7f) { mData = *src; mState = eState_CNS11643_2_2ndbyte; } else { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; } } break; case eState_CNS11643_2_2ndbyte: // ESC $ * H ESC SS2 if(0x20 < *src && *src < 0x7f) { unsigned char cns[4]; PRInt32 cnsLen = 4; cns[0] = (unsigned char) MBYTE; cns[1] = (unsigned char) (PMASK + 2); cns[2] = mData | 0x80; cns[3] = *src | 0x80; aLen = destEnd - dest; rv = EUCTW_To_Unicode(cns, cnsLen, dest, &aLen); ++mRunLength; if(rv == NS_OK_UDEC_MOREOUTPUT) { goto error1; } else if(NS_FAILED(rv)) { goto error2; } dest += aLen; } else { if (CHECK_OVERRUN(dest, destEnd, 2)) goto error1; *dest++ = (PRUnichar) mData; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; } mState = eState_CNS11643_2; break; case eState_ESC_24_2A_H_ESC_SS2_SI: // ESC $ * H ESC SS2 SI if(ESC == *src) { mState = eState_ESC_24_2A_H_ESC_SS2_SI_ESC; } else { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ESC_24_2A_H_ESC_SS2_SI; } break; case eState_ESC_24_2A_H_ESC_SS2_SI_ESC: // ESC $ * H ESC SS2 SI ESC if(SS2 == *src) { mState = eState_CNS11643_2; mRunLength = 0; } else if('$' == *src) { mState = eState_ESC_24; } else { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ESC_24_2A_H_ESC_SS2_SI; } break; case eState_ESC_24_2B: // ESC $ + if('I' <= *src && *src <= 'M') { mState = eState_ESC_24_2B_I; mPlaneID = *src - 'I' + 3; } else { if (CHECK_OVERRUN(dest, destEnd, 4)) goto error1; *dest++ = (PRUnichar) ESC; *dest++ = (PRUnichar) '$'; *dest++ = (PRUnichar) '+'; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ASCII; } break; case eState_ESC_24_2B_I: // ESC $ + I if(ESC == *src) { mState = eState_ESC_24_2B_I_ESC; } else { if (CHECK_OVERRUN(dest, destEnd, 5)) goto error1; *dest++ = (PRUnichar) ESC; *dest++ = (PRUnichar) '$'; *dest++ = (PRUnichar) '+'; *dest++ = (PRUnichar) 'I' + mPlaneID - 3; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ASCII; } break; case eState_ESC_24_2B_I_ESC: // ESC $ + I ESC if(SS3 == *src) { mState = eState_CNS11643_3; mRunLength = 0; } else if('$' == *src) { mState = eState_ESC_24; } else { if (CHECK_OVERRUN(dest, destEnd, 6)) goto error1; *dest++ = (PRUnichar) ESC; *dest++ = (PRUnichar) '$'; *dest++ = (PRUnichar) '+'; *dest++ = (PRUnichar) 'I' + mPlaneID - 3; *dest++ = (PRUnichar) ESC; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ASCII; } break; case eState_CNS11643_3: // ESC $ + I ESC SS3 if(SI == *src) { // Shift-In (SI) mState = eState_ESC_24_2B_I_ESC_SS3_SI; if (mRunLength == 0) { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = 0xFFFD; } mRunLength = 0; } else if(ESC == *src) { mState = eState_ESC_24_2B_I_ESC; } else { if(0x20 < *src && *src < 0x7f) { mData = *src; mState = eState_CNS11643_3_2ndbyte; } else { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; } } break; case eState_CNS11643_3_2ndbyte: // ESC $ + I ESC SS3 if(0x20 < *src && *src < 0x7f) { unsigned char cns[4]; PRInt32 cnsLen = 4; cns[0] = (unsigned char) MBYTE; cns[1] = (unsigned char) (PMASK + mPlaneID); cns[2] = mData | 0x80; cns[3] = *src | 0x80; aLen = destEnd - dest; rv = EUCTW_To_Unicode(cns, cnsLen, dest, &aLen); ++mRunLength; if(rv == NS_OK_UDEC_MOREOUTPUT) { goto error1; } else if(NS_FAILED(rv)) { goto error2; } dest += aLen; } else { if (CHECK_OVERRUN(dest, destEnd, 2)) goto error1; *dest++ = (PRUnichar) mData; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; } mState = eState_CNS11643_3; break; case eState_ESC_24_2B_I_ESC_SS3_SI: // ESC $ + I ESC SS3 SI if(ESC == *src) { mState = eState_ESC_24_2B_I_ESC_SS3_SI_ESC; } else { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ESC_24_2B_I_ESC_SS3_SI; } break; case eState_ESC_24_2B_I_ESC_SS3_SI_ESC: // ESC $ + I ESC SS3 SI ESC if(SS3 == *src) { mState = eState_CNS11643_3; mRunLength = 0; } else if('$' == *src) { mState = eState_ESC_24; } else { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = (0x80 & *src) ? 0xFFFD : (PRUnichar) *src; mState = eState_ESC_24_2B_I_ESC_SS3_SI; } break; case eState_ERROR: NS_NOTREACHED("unhandled case"); goto error2; } // switch src++; } *aDestLen = dest- aDest; return NS_OK; error1: *aDestLen = dest-aDest; *aSrcLen = src - (const unsigned char*)aSrc; return NS_OK_UDEC_MOREOUTPUT; error2: *aSrcLen = src - (const unsigned char*)aSrc; *aDestLen = dest-aDest; mState = eState_ASCII; return NS_ERROR_UNEXPECTED; }
144
104,242
0
NS_IMETHODIMP CVE_2012_0477_PATCHED_nsISO2022JPToUnicodeV2::Convert( const char * aSrc, PRInt32 * aSrcLen, PRUnichar * aDest, PRInt32 * aDestLen) { static const PRUint16 fbIdx[128] = { /* 0x8X */ 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, /* 0x9X */ 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, /* 0xAX */ 0xFFFD, 0, 94, 94* 2, 94* 3, 94* 4, 94* 5, 94* 6, 94* 7, 94* 8 , 94* 9, 94*10, 94*11, 94*12, 94*13, 94*14, /* 0xBX */ 94*15, 94*16, 94*17, 94*18, 94*19, 94*20, 94*21, 94*22, 94*23, 94*24, 94*25, 94*26, 94*27, 94*28, 94*29, 94*30, /* 0xCX */ 94*31, 94*32, 94*33, 94*34, 94*35, 94*36, 94*37, 94*38, 94*39, 94*40, 94*41, 94*42, 94*43, 94*44, 94*45, 94*46, /* 0xDX */ 94*47, 94*48, 94*49, 94*50, 94*51, 94*52, 94*53, 94*54, 94*55, 94*56, 94*57, 94*58, 94*59, 94*60, 94*61, 94*62, /* 0xEX */ 94*63, 94*64, 94*65, 94*66, 94*67, 94*68, 94*69, 94*70, 94*71, 94*72, 94*73, 94*74, 94*75, 94*76, 94*77, 94*78, /* 0xFX */ 94*79, 94*80, 94*81, 94*82, 94*83, 94*84, 94*85, 94*86, 94*87, 94*88, 94*89, 94*90, 94*91, 94*92, 94*93, 0xFFFD, }; static const PRUint8 sbIdx[256] = { /* 0x0X */ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* 0x1X */ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* 0x2X */ 0xFF, 0, 1, 2, 3, 4, 5, 6, 7, 8 , 9, 10, 11, 12, 13, 14, /* 0x3X */ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, /* 0x4X */ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, /* 0x5X */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, /* 0x6X */ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, /* 0x7X */ 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 0xFF, /* 0x8X */ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* 0x9X */ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* 0xAX */ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* 0xBX */ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* 0xCX */ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* 0xDX */ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* 0xEX */ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* 0xFX */ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, }; const unsigned char* srcEnd = (unsigned char*)aSrc + *aSrcLen; const unsigned char* src =(unsigned char*) aSrc; PRUnichar* destEnd = aDest + *aDestLen; PRUnichar* dest = aDest; while((src < srcEnd)) { switch(mState) { case mState_ASCII: if(0x1b == *src) { mLastLegalState = mState; mState = mState_ESC; } else if(*src & 0x80) { goto error2; } else { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = (PRUnichar) *src; } break; case mState_ESC: if( '(' == *src) { mState = mState_ESC_28; } else if ('$' == *src) { mState = mState_ESC_24; } else if ('.' == *src) { // for ISO-2022-JP-2 mState = mState_ESC_2e; } else if ('N' == *src) { // for ISO-2022-JP-2 mState = mState_ESC_4e; } else { if (CHECK_OVERRUN(dest, destEnd, 2)) goto error1; *dest++ = (PRUnichar) 0x1b; if(0x80 & *src) goto error2; *dest++ = (PRUnichar) *src; mState = mLastLegalState; } break; case mState_ESC_28: // ESC ( if( 'B' == *src) { mState = mState_ASCII; if (mRunLength == 0) { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = 0xFFFD; } mRunLength = 0; } else if ('J' == *src) { mState = mState_JISX0201_1976Roman; if (mRunLength == 0 && mLastLegalState != mState_ASCII) { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; if (mErrBehavior == kOnError_Signal) goto error2; *dest++ = 0xFFFD; } mRunLength = 0; } else if ('I' == *src) { mState = mState_JISX0201_1976Kana; mRunLength = 0; } else { if (CHECK_OVERRUN(dest, destEnd, 3)) goto error1; *dest++ = (PRUnichar) 0x1b; *dest++ = (PRUnichar) '('; if(0x80 & *src) goto error2; *dest++ = (PRUnichar) *src; mState = mLastLegalState; } break; case mState_ESC_24: // ESC $ if( '@' == *src) { mState = mState_JISX0208_1978; mRunLength = 0; } else if ('A' == *src) { mState = mState_GB2312_1980; mRunLength = 0; } else if ('B' == *src) { mState = mState_JISX0208_1983; mRunLength = 0; } else if ('(' == *src) { mState = mState_ESC_24_28; } else { if (CHECK_OVERRUN(dest, destEnd, 3)) goto error1; *dest++ = (PRUnichar) 0x1b; *dest++ = (PRUnichar) '$'; if(0x80 & *src) goto error2; *dest++ = (PRUnichar) *src; mState = mLastLegalState; } break; case mState_ESC_24_28: // ESC $ ( if( 'C' == *src) { mState = mState_KSC5601_1987; mRunLength = 0; } else if ('D' == *src) { mState = mState_JISX0212_1990; mRunLength = 0; } else { if (CHECK_OVERRUN(dest, destEnd, 4)) goto error1; *dest++ = (PRUnichar) 0x1b; *dest++ = (PRUnichar) '$'; *dest++ = (PRUnichar) '('; if(0x80 & *src) goto error2; *dest++ = (PRUnichar) *src; mState = mLastLegalState; } break; case mState_JISX0201_1976Roman: if(0x1b == *src) { mLastLegalState = mState; mState = mState_ESC; } else if(*src & 0x80) { goto error2; } else { // XXX We need to decide how to handle \ and ~ here // we may need a if statement here for '\' and '~' // to map them to Yen and Overbar if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = (PRUnichar) *src; ++mRunLength; } break; case mState_JISX0201_1976Kana: if(0x1b == *src) { mLastLegalState = mState; mState = mState_ESC; } else { if((0x21 <= *src) && (*src <= 0x5F)) { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = (0xFF61-0x0021) + *src; ++mRunLength; } else { goto error2; } } break; case mState_JISX0208_1978: if(0x1b == *src) { mLastLegalState = mState; mState = mState_ESC; } else if(*src & 0x80) { mLastLegalState = mState; mState = mState_ERROR; } else { mData = JIS0208_INDEX[*src & 0x7F]; if(0xFFFD == mData) goto error2; mState = mState_JISX0208_1978_2ndbyte; } break; case mState_GB2312_1980: if(0x1b == *src) { mLastLegalState = mState; mState = mState_ESC; } else if(*src & 0x80) { mLastLegalState = mState; mState = mState_ERROR; } else { mData = fbIdx[*src & 0x7F]; if(0xFFFD == mData) goto error2; mState = mState_GB2312_1980_2ndbyte; } break; case mState_JISX0208_1983: if(0x1b == *src) { mLastLegalState = mState; mState = mState_ESC; } else if(*src & 0x80) { mLastLegalState = mState; mState = mState_ERROR; } else { mData = JIS0208_INDEX[*src & 0x7F]; if(0xFFFD == mData) goto error2; mState = mState_JISX0208_1983_2ndbyte; } break; case mState_KSC5601_1987: if(0x1b == *src) { mLastLegalState = mState; mState = mState_ESC; } else if(*src & 0x80) { mLastLegalState = mState; mState = mState_ERROR; } else { mData = fbIdx[*src & 0x7F]; if(0xFFFD == mData) goto error2; mState = mState_KSC5601_1987_2ndbyte; } break; case mState_JISX0212_1990: if(0x1b == *src) { mLastLegalState = mState; mState = mState_ESC; } else if(*src & 0x80) { mLastLegalState = mState; mState = mState_ERROR; } else { mData = JIS0212_INDEX[*src & 0x7F]; if(0xFFFD == mData) goto error2; mState = mState_JISX0212_1990_2ndbyte; } break; case mState_JISX0208_1978_2ndbyte: { PRUint8 off = sbIdx[*src]; if(0xFF == off) { goto error2; } else { // XXX We need to map from JIS X 0208 1983 to 1987 // in the next line before pass to *dest++ if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = gJapaneseMap[mData+off]; ++mRunLength; } mState = mState_JISX0208_1978; } break; case mState_GB2312_1980_2ndbyte: { PRUint8 off = sbIdx[*src]; if(0xFF == off) { goto error2; } else { if (!mGB2312Decoder) { // creating a delegate converter (GB2312) nsresult rv; nsCOMPtr<nsICharsetConverterManager> ccm = do_GetService(kCharsetConverterManagerCID, &rv); if (NS_SUCCEEDED(rv)) { rv = ccm->GetUnicodeDecoderRaw("GB2312", &mGB2312Decoder); } } if (!mGB2312Decoder) {// failed creating a delegate converter goto error2; } else { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; unsigned char gb[2]; PRUnichar uni; PRInt32 gbLen = 2, uniLen = 1; // ((mData/94)+0x21) is the original 1st byte. // *src is the present 2nd byte. // Put 2 bytes (one character) to gb[] with GB2312 encoding. gb[0] = ((mData / 94) + 0x21) | 0x80; gb[1] = *src | 0x80; // Convert GB2312 to unicode. mGB2312Decoder->Convert((const char *)gb, &gbLen, &uni, &uniLen); *dest++ = uni; ++mRunLength; } } mState = mState_GB2312_1980; } break; case mState_JISX0208_1983_2ndbyte: { PRUint8 off = sbIdx[*src]; if(0xFF == off) { goto error2; } else { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = gJapaneseMap[mData+off]; ++mRunLength; } mState = mState_JISX0208_1983; } break; case mState_KSC5601_1987_2ndbyte: { PRUint8 off = sbIdx[*src]; if(0xFF == off) { goto error2; } else { if (!mEUCKRDecoder) { // creating a delegate converter (EUC-KR) nsresult rv; nsCOMPtr<nsICharsetConverterManager> ccm = do_GetService(kCharsetConverterManagerCID, &rv); if (NS_SUCCEEDED(rv)) { rv = ccm->GetUnicodeDecoderRaw("EUC-KR", &mEUCKRDecoder); } } if (!mEUCKRDecoder) {// failed creating a delegate converter goto error2; } else { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; unsigned char ksc[2]; PRUnichar uni; PRInt32 kscLen = 2, uniLen = 1; // ((mData/94)+0x21) is the original 1st byte. // *src is the present 2nd byte. // Put 2 bytes (one character) to ksc[] with EUC-KR encoding. ksc[0] = ((mData / 94) + 0x21) | 0x80; ksc[1] = *src | 0x80; // Convert EUC-KR to unicode. mEUCKRDecoder->Convert((const char *)ksc, &kscLen, &uni, &uniLen); *dest++ = uni; ++mRunLength; } } mState = mState_KSC5601_1987; } break; case mState_JISX0212_1990_2ndbyte: { PRUint8 off = sbIdx[*src]; if(0xFF == off) { goto error2; } else { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = gJapaneseMap[mData+off]; ++mRunLength; } mState = mState_JISX0212_1990; } break; case mState_ESC_2e: // ESC . // "ESC ." will designate 96 character set to G2. mState = mLastLegalState; if( 'A' == *src) { G2charset = G2_ISO88591; } else if ('F' == *src) { G2charset = G2_ISO88597; } else { if (CHECK_OVERRUN(dest, destEnd, 3)) goto error1; *dest++ = (PRUnichar) 0x1b; *dest++ = (PRUnichar) '.'; if(0x80 & *src) goto error2; *dest++ = (PRUnichar) *src; } break; case mState_ESC_4e: // ESC N // "ESC N" is the SS2 sequence, that invoke a G2 designated // character set. Since SS2 is effective only for next one // character, mState should be returned to the last status. mState = mLastLegalState; if((0x20 <= *src) && (*src <= 0x7F)) { if (G2_ISO88591 == G2charset) { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; *dest++ = *src | 0x80; ++mRunLength; } else if (G2_ISO88597 == G2charset) { if (!mISO88597Decoder) { // creating a delegate converter (ISO-8859-7) nsresult rv; nsCOMPtr<nsICharsetConverterManager> ccm = do_GetService(kCharsetConverterManagerCID, &rv); if (NS_SUCCEEDED(rv)) { rv = ccm->GetUnicodeDecoderRaw("ISO-8859-7", &mISO88597Decoder); } } if (!mISO88597Decoder) {// failed creating a delegate converter goto error2; } else { if (CHECK_OVERRUN(dest, destEnd, 1)) goto error1; // Put one character with ISO-8859-7 encoding. unsigned char gr = *src | 0x80; PRUnichar uni; PRInt32 grLen = 1, uniLen = 1; // Convert ISO-8859-7 to unicode. mISO88597Decoder->Convert((const char *)&gr, &grLen, &uni, &uniLen); *dest++ = uni; ++mRunLength; } } else {// G2charset is G2_unknown (not designated yet) goto error2; } } else { if (CHECK_OVERRUN(dest, destEnd, 3)) goto error1; *dest++ = (PRUnichar) 0x1b; *dest++ = (PRUnichar) 'N'; if(0x80 & *src) goto error2; *dest++ = (PRUnichar) *src; } break; case mState_ERROR: mState = mLastLegalState; mRunLength = 0; goto error2; break; } // switch src++; } *aDestLen = dest - aDest; return NS_OK; error1: *aDestLen = dest - aDest; *aSrcLen = src - (const unsigned char*)aSrc; return NS_OK_UDEC_MOREOUTPUT; error2: *aSrcLen = src - (const unsigned char*)aSrc; *aDestLen = dest - aDest; return NS_ERROR_UNEXPECTED; }
145
60,145
0
static int CVE_2012_1097_PATCHED_fill_thread_core_info(struct elf_thread_core_info *t, const struct user_regset_view *view, long signr, size_t *total) { unsigned int i; /* * NT_PRSTATUS is the one special case, because the regset data * goes into the pr_reg field inside the note contents, rather * than being the whole note contents. We fill the reset in here. * We assume that regset 0 is NT_PRSTATUS. */ fill_prstatus(&t->prstatus, t->task, signr); (void) view->regsets[0].get(t->task, &view->regsets[0], 0, sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg, NULL); fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &t->prstatus); *total += notesize(&t->notes[0]); do_thread_regset_writeback(t->task, &view->regsets[0]); /* * Each other regset might generate a note too. For each regset * that has no core_note_type or is inactive, we leave t->notes[i] * all zero and we'll know to skip writing it later. */ for (i = 1; i < view->n; ++i) { const struct user_regset *regset = &view->regsets[i]; do_thread_regset_writeback(t->task, regset); if (regset->core_note_type && regset->get && (!regset->active || regset->active(t->task, regset))) { int ret; size_t size = regset->n * regset->size; void *data = kmalloc(size, GFP_KERNEL); if (unlikely(!data)) return 0; ret = regset->get(t->task, regset, 0, size, data, NULL); if (unlikely(ret)) kfree(data); else { if (regset->core_note_type != NT_PRFPREG) fill_note(&t->notes[i], "LINUX", regset->core_note_type, size, data); else { t->prstatus.pr_fpvalid = 1; fill_note(&t->notes[i], "CORE", NT_PRFPREG, size, data); } *total += notesize(&t->notes[i]); } } } return 1; }
146
160,408
0
static int CVE_2012_1583_PATCHED_xfrm6_tunnel_rcv(struct sk_buff *skb) { struct ipv6hdr *iph = skb->nh.ipv6h; __be32 spi; spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr); return xfrm6_rcv_spi(skb, spi) > 0 ? : 0; }
147
88,590
0
NS_IMETHODIMP CVE_2012_1945_PATCHED_nsLocalFile::IsSymlink(bool *_retval) { // Check we are correctly initialized. CHECK_mWorkingPath(); NS_ENSURE_ARG(_retval); // unless it is a valid shortcut path it's not a symlink if (!IsShortcutPath(mWorkingPath)) { *_retval = false; return NS_OK; } // we need to know if this is a file or directory nsresult rv = ResolveAndStat(); if (NS_FAILED(rv)) { return rv; } // We should not check mFileInfo64.type here for PR_FILE_FILE because lnk // files can point to directories or files. Important security checks // depend on correctly identifying lnk files. mFileInfo64 now holds info // about the target of the lnk file, not the actual lnk file! *_retval = true; return NS_OK; }
148
119,789
0
NS_IMETHODIMP CVE_2012_1955_PATCHED_nsDocShell::InternalLoad(nsIURI * aURI, nsIURI * aReferrer, nsISupports * aOwner, PRUint32 aFlags, const PRUnichar *aWindowTarget, const char* aTypeHint, nsIInputStream * aPostData, nsIInputStream * aHeadersData, PRUint32 aLoadType, nsISHEntry * aSHEntry, bool aFirstParty, nsIDocShell** aDocShell, nsIRequest** aRequest) { nsresult rv = NS_OK; #ifdef PR_LOGGING if (gDocShellLeakLog && PR_LOG_TEST(gDocShellLeakLog, PR_LOG_DEBUG)) { nsCAutoString spec; if (aURI) aURI->GetSpec(spec); PR_LogPrint("DOCSHELL %p InternalLoad %s\n", this, spec.get()); } #endif // Initialize aDocShell/aRequest if (aDocShell) { *aDocShell = nsnull; } if (aRequest) { *aRequest = nsnull; } if (!aURI) { return NS_ERROR_NULL_POINTER; } NS_ENSURE_TRUE(IsValidLoadType(aLoadType), NS_ERROR_INVALID_ARG); NS_ENSURE_TRUE(!mIsBeingDestroyed, NS_ERROR_NOT_AVAILABLE); // wyciwyg urls can only be loaded through history. Any normal load of // wyciwyg through docshell is illegal. Disallow such loads. if (aLoadType & LOAD_CMD_NORMAL) { bool isWyciwyg = false; rv = aURI->SchemeIs("wyciwyg", &isWyciwyg); if ((isWyciwyg && NS_SUCCEEDED(rv)) || NS_FAILED(rv)) return NS_ERROR_FAILURE; } bool bIsJavascript = false; if (NS_FAILED(aURI->SchemeIs("javascript", &bIsJavascript))) { bIsJavascript = false; } // // First, notify any nsIContentPolicy listeners about the document load. // Only abort the load if a content policy listener explicitly vetos it! // nsCOMPtr<nsIDOMElement> requestingElement; // Use nsPIDOMWindow since we _want_ to cross the chrome boundary if needed nsCOMPtr<nsPIDOMWindow> privateWin(do_QueryInterface(mScriptGlobal)); if (privateWin) requestingElement = privateWin->GetFrameElementInternal(); PRInt16 shouldLoad = nsIContentPolicy::ACCEPT; PRUint32 contentType; if (IsFrame()) { NS_ASSERTION(requestingElement, "A frame but no DOM element!?"); contentType = nsIContentPolicy::TYPE_SUBDOCUMENT; } else { contentType = nsIContentPolicy::TYPE_DOCUMENT; } nsISupports* context = requestingElement; if (!context) { context = mScriptGlobal; } // XXXbz would be nice to know the loading principal here... but we don't nsCOMPtr<nsIPrincipal> loadingPrincipal; if (aReferrer) { nsCOMPtr<nsIScriptSecurityManager> secMan = do_GetService(NS_SCRIPTSECURITYMANAGER_CONTRACTID, &rv); NS_ENSURE_SUCCESS(rv, rv); rv = secMan->GetCodebasePrincipal(aReferrer, getter_AddRefs(loadingPrincipal)); } rv = NS_CheckContentLoadPolicy(contentType, aURI, loadingPrincipal, context, EmptyCString(), //mime guess nsnull, //extra &shouldLoad); if (NS_FAILED(rv) || NS_CP_REJECTED(shouldLoad)) { if (NS_SUCCEEDED(rv) && shouldLoad == nsIContentPolicy::REJECT_TYPE) { return NS_ERROR_CONTENT_BLOCKED_SHOW_ALT; } return NS_ERROR_CONTENT_BLOCKED; } nsCOMPtr<nsISupports> owner(aOwner); // // Get an owner from the current document if necessary. Note that we only // do this for URIs that inherit a security context and local file URIs; // in particular we do NOT do this for about:blank. This way, random // about:blank loads that have no owner (which basically means they were // done by someone from chrome manually messing with our nsIWebNavigation // or by C++ setting document.location) don't get a funky principal. If // callers want something interesting to happen with the about:blank // principal in this case, they should pass an owner in. // { bool inherits; // One more twist: Don't inherit the owner for external loads. if (aLoadType != LOAD_NORMAL_EXTERNAL && !owner && (aFlags & INTERNAL_LOAD_FLAGS_INHERIT_OWNER) && NS_SUCCEEDED(nsContentUtils::URIInheritsSecurityContext(aURI, &inherits)) && inherits) { owner = GetInheritedPrincipal(true); } } // Don't allow loads that would inherit our security context // if this document came from an unsafe channel. { bool willInherit; // This condition needs to match the one in // nsContentUtils::SetUpChannelOwner. // Except we reverse the rv check to be safe in case // nsContentUtils::URIInheritsSecurityContext fails here and // succeeds there. rv = nsContentUtils::URIInheritsSecurityContext(aURI, &willInherit); if (NS_FAILED(rv) || willInherit || NS_IsAboutBlank(aURI)) { nsCOMPtr<nsIDocShellTreeItem> treeItem = this; do { nsCOMPtr<nsIDocShell> itemDocShell = do_QueryInterface(treeItem); bool isUnsafe; if (itemDocShell && NS_SUCCEEDED(itemDocShell->GetChannelIsUnsafe(&isUnsafe)) && isUnsafe) { return NS_ERROR_DOM_SECURITY_ERR; } nsCOMPtr<nsIDocShellTreeItem> parent; treeItem->GetSameTypeParent(getter_AddRefs(parent)); parent.swap(treeItem); } while (treeItem); } } // // Resolve the window target before going any further... // If the load has been targeted to another DocShell, then transfer the // load to it... // if (aWindowTarget && *aWindowTarget) { // We've already done our owner-inheriting. Mask out that bit, so we // don't try inheriting an owner from the target window if we came up // with a null owner above. aFlags = aFlags & ~INTERNAL_LOAD_FLAGS_INHERIT_OWNER; // Locate the target DocShell. // This may involve creating a new toplevel window - if necessary. // nsCOMPtr<nsIDocShellTreeItem> targetItem; FindItemWithName(aWindowTarget, nsnull, this, getter_AddRefs(targetItem)); nsCOMPtr<nsIDocShell> targetDocShell = do_QueryInterface(targetItem); bool isNewWindow = false; if (!targetDocShell) { nsCOMPtr<nsIDOMWindow> win = do_GetInterface(GetAsSupports(this)); NS_ENSURE_TRUE(win, NS_ERROR_NOT_AVAILABLE); nsDependentString name(aWindowTarget); nsCOMPtr<nsIDOMWindow> newWin; rv = win->Open(EmptyString(), // URL to load name, // window name EmptyString(), // Features getter_AddRefs(newWin)); // In some cases the Open call doesn't actually result in a new // window being opened. We can detect these cases by examining the // document in |newWin|, if any. nsCOMPtr<nsPIDOMWindow> piNewWin = do_QueryInterface(newWin); if (piNewWin) { nsCOMPtr<nsIDocument> newDoc = do_QueryInterface(piNewWin->GetExtantDocument()); if (!newDoc || newDoc->IsInitialDocument()) { isNewWindow = true; aFlags |= INTERNAL_LOAD_FLAGS_FIRST_LOAD; } } nsCOMPtr<nsIWebNavigation> webNav = do_GetInterface(newWin); targetDocShell = do_QueryInterface(webNav); } // // Transfer the load to the target DocShell... Pass nsnull as the // window target name from to prevent recursive retargeting! // if (NS_SUCCEEDED(rv) && targetDocShell) { rv = targetDocShell->InternalLoad(aURI, aReferrer, owner, aFlags, nsnull, // No window target aTypeHint, aPostData, aHeadersData, aLoadType, aSHEntry, aFirstParty, aDocShell, aRequest); if (rv == NS_ERROR_NO_CONTENT) { // XXXbz except we never reach this code! if (isNewWindow) { // // At this point, a new window has been created, but the // URI did not have any data associated with it... // // So, the best we can do, is to tear down the new window // that was just created! // nsCOMPtr<nsIDOMWindow> domWin = do_GetInterface(targetDocShell); if (domWin) { domWin->Close(); } } // // NS_ERROR_NO_CONTENT should not be returned to the // caller... This is an internal error code indicating that // the URI had no data associated with it - probably a // helper-app style protocol (ie. mailto://) // rv = NS_OK; } else if (isNewWindow) { // XXX: Once new windows are created hidden, the new // window will need to be made visible... For now, // do nothing. } } // Else we ran out of memory, or were a popup and got blocked, // or something. return rv; } // // Load is being targetted at this docshell so return an error if the // docshell is in the process of being destroyed. // if (mIsBeingDestroyed) { return NS_ERROR_FAILURE; } rv = CheckLoadingPermissions(); if (NS_FAILED(rv)) { return rv; } // If this docshell is owned by a frameloader, make sure to cancel // possible frameloader initialization before loading a new page. nsCOMPtr<nsIDocShellTreeItem> parent; GetParent(getter_AddRefs(parent)); if (parent) { nsCOMPtr<nsIDocument> doc = do_GetInterface(parent); if (doc) { doc->TryCancelFrameLoaderInitialization(this); } } if (mFiredUnloadEvent) { if (IsOKToLoadURI(aURI)) { NS_PRECONDITION(!aWindowTarget || !*aWindowTarget, "Shouldn't have a window target here!"); // If this is a replace load, make whatever load triggered // the unload event also a replace load, so we don't // create extra history entries. if (LOAD_TYPE_HAS_FLAGS(aLoadType, LOAD_FLAGS_REPLACE_HISTORY)) { mLoadType = LOAD_NORMAL_REPLACE; } // Do this asynchronously nsCOMPtr<nsIRunnable> ev = new InternalLoadEvent(this, aURI, aReferrer, aOwner, aFlags, aTypeHint, aPostData, aHeadersData, aLoadType, aSHEntry, aFirstParty); return NS_DispatchToCurrentThread(ev); } // Just ignore this load attempt return NS_OK; } // Before going any further vet loads initiated by external programs. if (aLoadType == LOAD_NORMAL_EXTERNAL) { // Disallow external chrome: loads targetted at content windows bool isChrome = false; if (NS_SUCCEEDED(aURI->SchemeIs("chrome", &isChrome)) && isChrome) { NS_WARNING("blocked external chrome: url -- use '-chrome' option"); return NS_ERROR_FAILURE; } // clear the decks to prevent context bleed-through (bug 298255) rv = CreateAboutBlankContentViewer(nsnull, nsnull); if (NS_FAILED(rv)) return NS_ERROR_FAILURE; // reset loadType so we don't have to add lots of tests for // LOAD_NORMAL_EXTERNAL after this point aLoadType = LOAD_NORMAL; } mAllowKeywordFixup = (aFlags & INTERNAL_LOAD_FLAGS_ALLOW_THIRD_PARTY_FIXUP) != 0; mURIResultedInDocument = false; // reset the clock... if (aLoadType == LOAD_NORMAL || aLoadType == LOAD_STOP_CONTENT || LOAD_TYPE_HAS_FLAGS(aLoadType, LOAD_FLAGS_REPLACE_HISTORY) || aLoadType == LOAD_HISTORY || aLoadType == LOAD_LINK) { // Split mCurrentURI and aURI on the '#' character. Make sure we read // the return values of SplitURIAtHash; if it fails, we don't want to // allow a short-circuited navigation. nsCAutoString curBeforeHash, curHash, newBeforeHash, newHash; nsresult splitRv1, splitRv2; splitRv1 = mCurrentURI ? nsContentUtils::SplitURIAtHash(mCurrentURI, curBeforeHash, curHash) : NS_ERROR_FAILURE; splitRv2 = nsContentUtils::SplitURIAtHash(aURI, newBeforeHash, newHash); bool sameExceptHashes = NS_SUCCEEDED(splitRv1) && NS_SUCCEEDED(splitRv2) && curBeforeHash.Equals(newBeforeHash); bool historyNavBetweenSameDoc = false; if (mOSHE && aSHEntry) { // We're doing a history load. mOSHE->SharesDocumentWith(aSHEntry, &historyNavBetweenSameDoc); #ifdef DEBUG if (historyNavBetweenSameDoc) { nsCOMPtr<nsIInputStream> currentPostData; mOSHE->GetPostData(getter_AddRefs(currentPostData)); NS_ASSERTION(currentPostData == aPostData, "Different POST data for entries for the same page?"); } #endif } // A short-circuited load happens when we navigate between two SHEntries // for the same document. We do a short-circuited load under two // circumstances. Either // // a) we're navigating between two different SHEntries which share a // document, or // // b) we're navigating to a new shentry whose URI differs from the // current URI only in its hash, the new hash is non-empty, and // we're not doing a POST. // // The restriction tha the SHEntries in (a) must be different ensures // that history.go(0) and the like trigger full refreshes, rather than // short-circuited loads. bool doShortCircuitedLoad = (historyNavBetweenSameDoc && mOSHE != aSHEntry) || (!aSHEntry && aPostData == nsnull && sameExceptHashes && !newHash.IsEmpty()); if (doShortCircuitedLoad) { // Save the current URI; we need it if we fire a hashchange later. nsCOMPtr<nsIURI> oldURI = mCurrentURI; // Save the position of the scrollers. nscoord cx = 0, cy = 0; GetCurScrollPos(ScrollOrientation_X, &cx); GetCurScrollPos(ScrollOrientation_Y, &cy); // ScrollToAnchor doesn't necessarily cause us to scroll the window; // the function decides whether a scroll is appropriate based on the // arguments it receives. But even if we don't end up scrolling, // ScrollToAnchor performs other important tasks, such as informing // the presShell that we have a new hash. See bug 680257. rv = ScrollToAnchor(curHash, newHash, aLoadType); NS_ENSURE_SUCCESS(rv, rv); // Reset mLoadType to its original value once we exit this block, // because this short-circuited load might have started after a // normal, network load, and we don't want to clobber its load type. // See bug 737307. AutoRestore<PRUint32> loadTypeResetter(mLoadType); // If a non-short-circuit load (i.e., a network load) is pending, // make this a replacement load, so that we don't add a SHEntry here // and the network load goes into the SHEntry it expects to. if (JustStartedNetworkLoad() && (aLoadType & LOAD_CMD_NORMAL)) { mLoadType = LOAD_NORMAL_REPLACE; } else { mLoadType = aLoadType; } mURIResultedInDocument = true; /* we need to assign mLSHE to aSHEntry right here, so that on History loads, * SetCurrentURI() called from OnNewURI() will send proper * onLocationChange() notifications to the browser to update * back/forward buttons. */ SetHistoryEntry(&mLSHE, aSHEntry); /* This is a anchor traversal with in the same page. * call OnNewURI() so that, this traversal will be * recorded in session and global history. */ nsCOMPtr<nsISupports> owner; if (mOSHE) { mOSHE->GetOwner(getter_AddRefs(owner)); } // Pass true for aCloneSHChildren, since we're not // changing documents here, so all of our subframes are // still relevant to the new session history entry. // // It also makes OnNewURI(...) set LOCATION_CHANGE_SAME_DOCUMENT // flag on firing onLocationChange(...). // Anyway, aCloneSHChildren param is simply reflecting // doShortCircuitedLoad in this scope. OnNewURI(aURI, nsnull, owner, mLoadType, true, true, true); nsCOMPtr<nsIInputStream> postData; nsCOMPtr<nsISupports> cacheKey; if (mOSHE) { /* save current position of scroller(s) (bug 59774) */ mOSHE->SetScrollPosition(cx, cy); // Get the postdata and page ident from the current page, if // the new load is being done via normal means. Note that // "normal means" can be checked for just by checking for // LOAD_CMD_NORMAL, given the loadType and allowScroll check // above -- it filters out some LOAD_CMD_NORMAL cases that we // wouldn't want here. if (aLoadType & LOAD_CMD_NORMAL) { mOSHE->GetPostData(getter_AddRefs(postData)); mOSHE->GetCacheKey(getter_AddRefs(cacheKey)); // Link our new SHEntry to the old SHEntry's back/forward // cache data, since the two SHEntries correspond to the // same document. if (mLSHE) mLSHE->AdoptBFCacheEntry(mOSHE); } } /* Assign mOSHE to mLSHE. This will either be a new entry created * by OnNewURI() for normal loads or aSHEntry for history loads. */ if (mLSHE) { SetHistoryEntry(&mOSHE, mLSHE); // Save the postData obtained from the previous page // in to the session history entry created for the // anchor page, so that any history load of the anchor // page will restore the appropriate postData. if (postData) mOSHE->SetPostData(postData); // Make sure we won't just repost without hitting the // cache first if (cacheKey) mOSHE->SetCacheKey(cacheKey); } /* restore previous position of scroller(s), if we're moving * back in history (bug 59774) */ if (mOSHE && (aLoadType == LOAD_HISTORY || aLoadType == LOAD_RELOAD_NORMAL)) { nscoord bx, by; mOSHE->GetScrollPosition(&bx, &by); SetCurScrollPosEx(bx, by); } /* Clear out mLSHE so that further anchor visits get * recorded in SH and SH won't misbehave. */ SetHistoryEntry(&mLSHE, nsnull); /* Set the title for the SH entry for this target url. so that * SH menus in go/back/forward buttons won't be empty for this. */ if (mSessionHistory) { PRInt32 index = -1; mSessionHistory->GetIndex(&index); nsCOMPtr<nsIHistoryEntry> hEntry; mSessionHistory->GetEntryAtIndex(index, false, getter_AddRefs(hEntry)); NS_ENSURE_TRUE(hEntry, NS_ERROR_FAILURE); nsCOMPtr<nsISHEntry> shEntry(do_QueryInterface(hEntry)); if (shEntry) shEntry->SetTitle(mTitle); } /* Set the title for the Global History entry for this anchor url. */ if (mUseGlobalHistory) { nsCOMPtr<IHistory> history = services::GetHistoryService(); if (history) { history->SetURITitle(aURI, mTitle); } else if (mGlobalHistory) { mGlobalHistory->SetPageTitle(aURI, mTitle); } } // Set the doc's URI according to the new history entry's URI. nsCOMPtr<nsIDocument> doc = do_GetInterface(GetAsSupports(this)); NS_ENSURE_TRUE(doc, NS_ERROR_FAILURE); doc->SetDocumentURI(aURI); SetDocCurrentStateObj(mOSHE); // Dispatch the popstate and hashchange events, as appropriate. nsCOMPtr<nsPIDOMWindow> window = do_QueryInterface(mScriptGlobal); if (window) { // Fire a hashchange event URIs differ, and only in their hashes. bool doHashchange = sameExceptHashes && !curHash.Equals(newHash); if (historyNavBetweenSameDoc || doHashchange) { window->DispatchSyncPopState(); } if (doHashchange) { // Make sure to use oldURI here, not mCurrentURI, because by // now, mCurrentURI has changed! window->DispatchAsyncHashchange(oldURI, aURI); } } // Inform the favicon service that the favicon for oldURI also // applies to aURI. CopyFavicon(oldURI, aURI); return NS_OK; } } // mContentViewer->PermitUnload can destroy |this| docShell, which // causes the next call of CanSavePresentation to crash. // Hold onto |this| until we return, to prevent a crash from happening. // (bug#331040) nsCOMPtr<nsIDocShell> kungFuDeathGrip(this); rv = MaybeInitTiming(); if (mTiming) { mTiming->NotifyBeforeUnload(); } // Check if the page doesn't want to be unloaded. The javascript: // protocol handler deals with this for javascript: URLs. if (!bIsJavascript && mContentViewer) { bool okToUnload; rv = mContentViewer->PermitUnload(false, &okToUnload); if (NS_SUCCEEDED(rv) && !okToUnload) { // The user chose not to unload the page, interrupt the // load. return NS_OK; } } if (mTiming) { mTiming->NotifyUnloadAccepted(mCurrentURI); } // Check for saving the presentation here, before calling Stop(). // This is necessary so that we can catch any pending requests. // Since the new request has not been created yet, we pass null for the // new request parameter. // Also pass nsnull for the document, since it doesn't affect the return // value for our purposes here. bool savePresentation = CanSavePresentation(aLoadType, nsnull, nsnull); // Don't stop current network activity for javascript: URL's since // they might not result in any data, and thus nothing should be // stopped in those cases. In the case where they do result in // data, the javascript: URL channel takes care of stopping // current network activity. if (!bIsJavascript) { // Stop any current network activity. // Also stop content if this is a zombie doc. otherwise // the onload will be delayed by other loads initiated in the // background by the first document that // didn't fully load before the next load was initiated. // If not a zombie, don't stop content until data // starts arriving from the new URI... nsCOMPtr<nsIContentViewer> zombieViewer; if (mContentViewer) { mContentViewer->GetPreviousViewer(getter_AddRefs(zombieViewer)); } if (zombieViewer || LOAD_TYPE_HAS_FLAGS(aLoadType, LOAD_FLAGS_STOP_CONTENT)) { rv = Stop(nsIWebNavigation::STOP_ALL); } else { rv = Stop(nsIWebNavigation::STOP_NETWORK); } if (NS_FAILED(rv)) return rv; } mLoadType = aLoadType; // mLSHE should be assigned to aSHEntry, only after Stop() has // been called. But when loading an error page, do not clear the // mLSHE for the real page. if (mLoadType != LOAD_ERROR_PAGE) SetHistoryEntry(&mLSHE, aSHEntry); mSavingOldViewer = savePresentation; // If we have a saved content viewer in history, restore and show it now. if (aSHEntry && (mLoadType & LOAD_CMD_HISTORY)) { // Make sure our history ID points to the same ID as // SHEntry's docshell ID. aSHEntry->GetDocshellID(&mHistoryID); // It's possible that the previous viewer of mContentViewer is the // viewer that will end up in aSHEntry when it gets closed. If that's // the case, we need to go ahead and force it into its shentry so we // can restore it. if (mContentViewer) { nsCOMPtr<nsIContentViewer> prevViewer; mContentViewer->GetPreviousViewer(getter_AddRefs(prevViewer)); if (prevViewer) { #ifdef DEBUG nsCOMPtr<nsIContentViewer> prevPrevViewer; prevViewer->GetPreviousViewer(getter_AddRefs(prevPrevViewer)); NS_ASSERTION(!prevPrevViewer, "Should never have viewer chain here"); #endif nsCOMPtr<nsISHEntry> viewerEntry; prevViewer->GetHistoryEntry(getter_AddRefs(viewerEntry)); if (viewerEntry == aSHEntry) { // Make sure this viewer ends up in the right place mContentViewer->SetPreviousViewer(nsnull); prevViewer->Destroy(); } } } nsCOMPtr<nsISHEntry> oldEntry = mOSHE; bool restoring; rv = RestorePresentation(aSHEntry, &restoring); if (restoring) return rv; // We failed to restore the presentation, so clean up. // Both the old and new history entries could potentially be in // an inconsistent state. if (NS_FAILED(rv)) { if (oldEntry) oldEntry->SyncPresentationState(); aSHEntry->SyncPresentationState(); } } nsCOMPtr<nsIRequest> req; rv = DoURILoad(aURI, aReferrer, !(aFlags & INTERNAL_LOAD_FLAGS_DONT_SEND_REFERRER), owner, aTypeHint, aPostData, aHeadersData, aFirstParty, aDocShell, getter_AddRefs(req), (aFlags & INTERNAL_LOAD_FLAGS_FIRST_LOAD) != 0, (aFlags & INTERNAL_LOAD_FLAGS_BYPASS_CLASSIFIER) != 0, (aFlags & INTERNAL_LOAD_FLAGS_FORCE_ALLOW_COOKIES) != 0); if (req && aRequest) NS_ADDREF(*aRequest = req); if (NS_FAILED(rv)) { nsCOMPtr<nsIChannel> chan(do_QueryInterface(req)); DisplayLoadError(rv, aURI, nsnull, chan); } return rv; }
149
75,959
0
struct sk_buff *CVE_2012_2136_PATCHED_sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, unsigned long data_len, int noblock, int *errcode) { struct sk_buff *skb; gfp_t gfp_mask; long timeo; int err; int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; err = -EMSGSIZE; if (npages > MAX_SKB_FRAGS) goto failure; gfp_mask = sk->sk_allocation; if (gfp_mask & __GFP_WAIT) gfp_mask |= __GFP_REPEAT; timeo = sock_sndtimeo(sk, noblock); while (1) { err = sock_error(sk); if (err != 0) goto failure; err = -EPIPE; if (sk->sk_shutdown & SEND_SHUTDOWN) goto failure; if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { skb = alloc_skb(header_len, gfp_mask); if (skb) { int i; /* No pages, we're done... */ if (!data_len) break; skb->truesize += data_len; skb_shinfo(skb)->nr_frags = npages; for (i = 0; i < npages; i++) { struct page *page; skb_frag_t *frag; page = alloc_pages(sk->sk_allocation, 0); if (!page) { err = -ENOBUFS; skb_shinfo(skb)->nr_frags = i; kfree_skb(skb); goto failure; } frag = &skb_shinfo(skb)->frags[i]; frag->page = page; frag->page_offset = 0; frag->size = (data_len >= PAGE_SIZE ? PAGE_SIZE : data_len); data_len -= PAGE_SIZE; } /* Full success... */ break; } err = -ENOBUFS; goto failure; } set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); err = -EAGAIN; if (!timeo) goto failure; if (signal_pending(current)) goto interrupted; timeo = sock_wait_for_wmem(sk, timeo); } skb_set_owner_w(skb, sk); return skb; interrupted: err = sock_intr_errno(timeo); failure: *errcode = err; return NULL; }
150
44,056
0
static int CVE_2012_2393_PATCHED_dictionary_load(void) { ddict_t* d; ddict_application_t* p; ddict_vendor_t* v; ddict_cmd_t* c; ddict_typedefn_t* t; ddict_avp_t* a; gboolean do_debug_parser = getenv("WIRESHARK_DEBUG_DIAM_DICT_PARSER") ? TRUE : FALSE; gboolean do_dump_dict = getenv("WIRESHARK_DUMP_DIAM_DICT") ? TRUE : FALSE; char* dir = ep_strdup_printf("%s" G_DIR_SEPARATOR_S "diameter" G_DIR_SEPARATOR_S, get_datafile_dir()); const avp_type_t* type; const avp_type_t* octetstring = &basic_types[0]; diam_avp_t* avp; GHashTable* vendors = g_hash_table_new(strcase_hash,strcase_equal); diam_vnd_t* vnd; GArray* vnd_shrt_arr = g_array_new(TRUE,TRUE,sizeof(value_string)); build_dict.hf = g_array_new(FALSE,TRUE,sizeof(hf_register_info)); build_dict.ett = g_ptr_array_new(); build_dict.types = g_hash_table_new(strcase_hash,strcase_equal); build_dict.avps = g_hash_table_new(strcase_hash,strcase_equal); dictionary.vnds = pe_tree_create(EMEM_TREE_TYPE_RED_BLACK,"diameter_vnds"); dictionary.avps = pe_tree_create(EMEM_TREE_TYPE_RED_BLACK,"diameter_avps"); unknown_vendor.vs_cmds = g_array_new(TRUE,TRUE,sizeof(value_string)); unknown_vendor.vs_avps = g_array_new(TRUE,TRUE,sizeof(value_string)); no_vnd.vs_cmds = g_array_new(TRUE,TRUE,sizeof(value_string)); no_vnd.vs_avps = g_array_new(TRUE,TRUE,sizeof(value_string)); all_cmds = g_array_new(TRUE,TRUE,sizeof(value_string)); pe_tree_insert32(dictionary.vnds,0,&no_vnd); g_hash_table_insert(vendors,(gchar *)"None",&no_vnd); /* initialize the types hash with the known basic types */ for (type = basic_types; type->name; type++) { g_hash_table_insert(build_dict.types,(gchar *)type->name,(void*)type); } /* load the dictionary */ d = ddict_scan(dir,"dictionary.xml",do_debug_parser); if (d == NULL) { return 0; } if (do_dump_dict) ddict_print(stdout, d); /* populate the types */ for (t = d->typedefns; t; t = t->next) { const avp_type_t* parent = NULL; /* try to get the parent type */ if (t->name == NULL) { fprintf(stderr,"Diameter Dictionary: Invalid Type (empty name): parent==%s\n", t->parent ? t->parent : "(null)"); continue; } if (g_hash_table_lookup(build_dict.types,t->name)) continue; if (t->parent) { parent = g_hash_table_lookup(build_dict.types,t->parent); } if (!parent) parent = octetstring; /* insert the parent type for this type */ g_hash_table_insert(build_dict.types,t->name,(void*)parent); } /* populate the applications */ if ((p = d->applications)) { GArray* arr = g_array_new(TRUE,TRUE,sizeof(value_string)); for (; p; p = p->next) { value_string item = {p->code,p->name}; g_array_append_val(arr,item); } dictionary.applications = (void*)arr->data; g_array_free(arr,FALSE); } if ((v = d->vendors)) { for ( ; v; v = v->next) { value_string item = {v->code,v->name}; if (v->name == NULL) { fprintf(stderr,"Diameter Dictionary: Invalid Vendor (empty name): code==%d\n",v->code); continue; } if (g_hash_table_lookup(vendors,v->name)) continue; g_array_append_val(vnd_shrt_arr,item); vnd = g_malloc(sizeof(diam_vnd_t)); vnd->code = v->code; vnd->vs_cmds = g_array_new(TRUE,TRUE,sizeof(value_string)); vnd->vs_avps = g_array_new(TRUE,TRUE,sizeof(value_string)); vnd->vs_avps_ext = NULL; pe_tree_insert32(dictionary.vnds,vnd->code,vnd); g_hash_table_insert(vendors,v->name,vnd); } } vnd_short_vs = (void*)vnd_shrt_arr->data; g_array_free(vnd_shrt_arr,FALSE); if ((c = d->cmds)) { for (; c; c = c->next) { if (c->vendor == NULL) { fprintf(stderr,"Diameter Dictionary: Invalid Vendor (empty name) for command %s\n", c->name ? c->name : "(null)"); continue; } if ((vnd = g_hash_table_lookup(vendors,c->vendor))) { value_string item = {c->code,c->name}; g_array_append_val(vnd->vs_cmds,item); /* Also add to all_cmds as used by RFC version */ g_array_append_val(all_cmds,item); } else { fprintf(stderr,"Diameter Dictionary: No Vendor: %s\n",c->vendor); } } } for (a = d->avps; a; a = a->next) { ddict_enum_t* e; value_string* vs = NULL; const char* vend = a->vendor ? a->vendor : "None"; ddict_xmlpi_t* x; void* avp_data = NULL; if (a->name == NULL) { fprintf(stderr,"Diameter Dictionary: Invalid AVP (empty name)\n"); continue; } if ((vnd = g_hash_table_lookup(vendors,vend))) { value_string vndvs = {a->code,a->name}; g_array_append_val(vnd->vs_avps,vndvs); } else { fprintf(stderr,"Diameter Dictionary: No Vendor: %s\n",vend); vnd = &unknown_vendor; } if ((e = a->enums)) { GArray* arr = g_array_new(TRUE,TRUE,sizeof(value_string)); for (; e; e = e->next) { value_string item = {e->code,e->name}; g_array_append_val(arr,item); } g_array_sort(arr, compare_avps); vs = (void*)arr->data; } type = NULL; for( x = d->xmlpis; x; x = x->next ) { if ( (strcase_equal(x->name,"avp-proto") && strcase_equal(x->key,a->name)) || (a->type && strcase_equal(x->name,"type-proto") && strcase_equal(x->key,a->type)) ) { static avp_type_t proto_type = {"proto", proto_avp, proto_avp, FT_UINT32, BASE_NONE, build_proto_avp}; type = &proto_type; avp_data = x->value; break; } } if ( (!type) && a->type ) type = g_hash_table_lookup(build_dict.types,a->type); if (!type) type = octetstring; avp = type->build( type, a->code, vnd, a->name, vs, avp_data); if (avp != NULL) { g_hash_table_insert(build_dict.avps, a->name, avp); { emem_tree_key_t k[] = { { 1, &(a->code) }, { 1, &(vnd->code) }, { 0 , NULL } }; pe_tree_insert32_array(dictionary.avps,k,avp); } } } g_hash_table_destroy(build_dict.types); g_hash_table_destroy(build_dict.avps); g_hash_table_destroy(vendors); return 1; }
151
120,749
0
static int CVE_2012_2779_PATCHED_decode_pic_hdr(IVI5DecContext *ctx, AVCodecContext *avctx) { if (get_bits(&ctx->gb, 5) != 0x1F) { av_log(avctx, AV_LOG_ERROR, "Invalid picture start code!\n"); return -1; } ctx->prev_frame_type = ctx->frame_type; ctx->frame_type = get_bits(&ctx->gb, 3); if (ctx->frame_type >= 5) { av_log(avctx, AV_LOG_ERROR, "Invalid frame type: %d \n", ctx->frame_type); return -1; } ctx->frame_num = get_bits(&ctx->gb, 8); if (ctx->frame_type == FRAMETYPE_INTRA) { ctx->gop_invalid = 1; if (decode_gop_header(ctx, avctx)) return -1; ctx->gop_invalid = 0; } if (ctx->frame_type != FRAMETYPE_NULL) { ctx->frame_flags = get_bits(&ctx->gb, 8); ctx->pic_hdr_size = (ctx->frame_flags & 1) ? get_bits_long(&ctx->gb, 24) : 0; ctx->checksum = (ctx->frame_flags & 0x10) ? get_bits(&ctx->gb, 16) : 0; /* skip unknown extension if any */ if (ctx->frame_flags & 0x20) skip_hdr_extension(&ctx->gb); /* XXX: untested */ /* decode macroblock huffman codebook */ if (ff_ivi_dec_huff_desc(&ctx->gb, ctx->frame_flags & 0x40, IVI_MB_HUFF, &ctx->mb_vlc, avctx)) return -1; skip_bits(&ctx->gb, 3); /* FIXME: unknown meaning! */ } align_get_bits(&ctx->gb); return 0; }
152
182,307
0
int CVE_2012_2783_PATCHED_ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; VP56Context *s = avctx->priv_data; AVFrame *const p = s->framep[VP56_FRAME_CURRENT]; int remaining_buf_size = avpkt->size; int is_alpha, av_uninit(alpha_offset); if (s->has_alpha) { if (remaining_buf_size < 3) return -1; alpha_offset = bytestream_get_be24(&buf); remaining_buf_size -= 3; if (remaining_buf_size < alpha_offset) return -1; } for (is_alpha=0; is_alpha < 1+s->has_alpha; is_alpha++) { int mb_row, mb_col, mb_row_flip, mb_offset = 0; int block, y, uv, stride_y, stride_uv; int golden_frame = 0; int res; s->modelp = &s->models[is_alpha]; res = s->parse_header(s, buf, remaining_buf_size, &golden_frame); if (!res) return -1; if (res == 2) { int i; for (i = 0; i < 4; i++) { if (s->frames[i].data[0]) avctx->release_buffer(avctx, &s->frames[i]); } if (is_alpha) { avcodec_set_dimensions(avctx, 0, 0); return -1; } } if (!is_alpha) { p->reference = 1; if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } if (res == 2) if (vp56_size_changed(avctx)) { avctx->release_buffer(avctx, p); return -1; } } if (p->key_frame) { p->pict_type = AV_PICTURE_TYPE_I; s->default_models_init(s); for (block=0; block<s->mb_height*s->mb_width; block++) s->macroblocks[block].type = VP56_MB_INTRA; } else { p->pict_type = AV_PICTURE_TYPE_P; vp56_parse_mb_type_models(s); s->parse_vector_models(s); s->mb_type = VP56_MB_INTER_NOVEC_PF; } if (s->parse_coeff_models(s)) goto next; memset(s->prev_dc, 0, sizeof(s->prev_dc)); s->prev_dc[1][VP56_FRAME_CURRENT] = 128; s->prev_dc[2][VP56_FRAME_CURRENT] = 128; for (block=0; block < 4*s->mb_width+6; block++) { s->above_blocks[block].ref_frame = VP56_FRAME_NONE; s->above_blocks[block].dc_coeff = 0; s->above_blocks[block].not_null_dc = 0; } s->above_blocks[2*s->mb_width + 2].ref_frame = VP56_FRAME_CURRENT; s->above_blocks[3*s->mb_width + 4].ref_frame = VP56_FRAME_CURRENT; stride_y = p->linesize[0]; stride_uv = p->linesize[1]; if (s->flip < 0) mb_offset = 7; /* main macroblocks loop */ for (mb_row=0; mb_row<s->mb_height; mb_row++) { if (s->flip < 0) mb_row_flip = s->mb_height - mb_row - 1; else mb_row_flip = mb_row; for (block=0; block<4; block++) { s->left_block[block].ref_frame = VP56_FRAME_NONE; s->left_block[block].dc_coeff = 0; s->left_block[block].not_null_dc = 0; } memset(s->coeff_ctx, 0, sizeof(s->coeff_ctx)); memset(s->coeff_ctx_last, 24, sizeof(s->coeff_ctx_last)); s->above_block_idx[0] = 1; s->above_block_idx[1] = 2; s->above_block_idx[2] = 1; s->above_block_idx[3] = 2; s->above_block_idx[4] = 2*s->mb_width + 2 + 1; s->above_block_idx[5] = 3*s->mb_width + 4 + 1; s->block_offset[s->frbi] = (mb_row_flip*16 + mb_offset) * stride_y; s->block_offset[s->srbi] = s->block_offset[s->frbi] + 8*stride_y; s->block_offset[1] = s->block_offset[0] + 8; s->block_offset[3] = s->block_offset[2] + 8; s->block_offset[4] = (mb_row_flip*8 + mb_offset) * stride_uv; s->block_offset[5] = s->block_offset[4]; for (mb_col=0; mb_col<s->mb_width; mb_col++) { vp56_decode_mb(s, mb_row, mb_col, is_alpha); for (y=0; y<4; y++) { s->above_block_idx[y] += 2; s->block_offset[y] += 16; } for (uv=4; uv<6; uv++) { s->above_block_idx[uv] += 1; s->block_offset[uv] += 8; } } } next: if (p->key_frame || golden_frame) { if (s->framep[VP56_FRAME_GOLDEN]->data[0] && s->framep[VP56_FRAME_GOLDEN] != p && s->framep[VP56_FRAME_GOLDEN] != s->framep[VP56_FRAME_GOLDEN2]) avctx->release_buffer(avctx, s->framep[VP56_FRAME_GOLDEN]); s->framep[VP56_FRAME_GOLDEN] = p; } if (s->has_alpha) { FFSWAP(AVFrame *, s->framep[VP56_FRAME_GOLDEN], s->framep[VP56_FRAME_GOLDEN2]); buf += alpha_offset; remaining_buf_size -= alpha_offset; } } if (s->framep[VP56_FRAME_PREVIOUS] == s->framep[VP56_FRAME_GOLDEN] || s->framep[VP56_FRAME_PREVIOUS] == s->framep[VP56_FRAME_GOLDEN2]) { if (s->framep[VP56_FRAME_UNUSED] != s->framep[VP56_FRAME_GOLDEN] && s->framep[VP56_FRAME_UNUSED] != s->framep[VP56_FRAME_GOLDEN2]) FFSWAP(AVFrame *, s->framep[VP56_FRAME_PREVIOUS], s->framep[VP56_FRAME_UNUSED]); else FFSWAP(AVFrame *, s->framep[VP56_FRAME_PREVIOUS], s->framep[VP56_FRAME_UNUSED2]); } else if (s->framep[VP56_FRAME_PREVIOUS]->data[0]) avctx->release_buffer(avctx, s->framep[VP56_FRAME_PREVIOUS]); FFSWAP(AVFrame *, s->framep[VP56_FRAME_CURRENT], s->framep[VP56_FRAME_PREVIOUS]); p->qstride = 0; p->qscale_table = s->qscale_table; p->qscale_type = FF_QSCALE_TYPE_VP56; *(AVFrame*)data = *p; *data_size = sizeof(AVFrame); return avpkt->size; }
153
659
0
static int CVE_2012_3364_PATCHED_nci_extract_activation_params_iso_dep(struct nci_dev *ndev, struct nci_rf_intf_activated_ntf *ntf, __u8 *data) { struct activation_params_nfca_poll_iso_dep *nfca_poll; struct activation_params_nfcb_poll_iso_dep *nfcb_poll; switch (ntf->activation_rf_tech_and_mode) { case NCI_NFC_A_PASSIVE_POLL_MODE: nfca_poll = &ntf->activation_params.nfca_poll_iso_dep; nfca_poll->rats_res_len = min_t(__u8, *data++, 20); pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len); if (nfca_poll->rats_res_len > 0) { memcpy(nfca_poll->rats_res, data, nfca_poll->rats_res_len); } break; case NCI_NFC_B_PASSIVE_POLL_MODE: nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep; nfcb_poll->attrib_res_len = min_t(__u8, *data++, 50); pr_debug("attrib_res_len %d\n", nfcb_poll->attrib_res_len); if (nfcb_poll->attrib_res_len > 0) { memcpy(nfcb_poll->attrib_res, data, nfcb_poll->attrib_res_len); } break; default: pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", ntf->activation_rf_tech_and_mode); return NCI_STATUS_RF_PROTOCOL_ERROR; } return NCI_STATUS_OK; }
154
56,595
0
NS_IMETHODIMP CVE_2012_3976_PATCHED_nsSecureBrowserUIImpl::OnStateChange(nsIWebProgress* aWebProgress, nsIRequest* aRequest, PRUint32 aProgressStateFlags, nsresult aStatus) { #ifdef DEBUG nsAutoAtomic atomic(mOnStateLocationChangeReentranceDetection); NS_ASSERTION(mOnStateLocationChangeReentranceDetection == 1, "unexpected parallel nsIWebProgress OnStateChange and/or OnLocationChange notification"); #endif /* All discussion, unless otherwise mentioned, only refers to http, https, file or wyciwig requests. Redirects are evil, well, some of them. There are multiple forms of redirects. Redirects caused by http refresh content are ok, because experiments show, with those redirects, the old page contents and their requests will come to STOP completely, before any progress from new refreshed page content is reported. So we can safely treat them as separate page loading transactions. Evil are redirects at the http protocol level, like code 302. If the toplevel documents gets replaced, i.e. redirected with 302, we do not care for the security state of the initial transaction, which has now been redirected, we only care for the new page load. For the implementation of the security UI, we make an assumption, that is hopefully true. Imagine, the received page that was delivered with the 302 redirection answer, also delivered html content. What happens if the parser starts to analyze the content and tries to load contained sub objects? In that case we would see start and stop requests for subdocuments, some for the previous document, some for the new target document. And only those for the new toplevel document may be taken into consideration, when deciding about the security state of the next toplevel document. Because security state is being looked at, when loading stops for (sub)documents, this could cause real confusion, because we have to decide, whether an incoming progress belongs to the new toplevel page, or the previous, already redirected page. Can we simplify here? If a redirect at the http protocol level is seen, can we safely assume, its html content will not be parsed, anylzed, and no embedded objects will get loaded (css, js, images), because the redirect is already happening? If we can assume that, this really simplify things. Because we will never see notification for sub requests that need to get ignored. I would like to make this assumption for now, but please let me (kaie) know if I'm wrong. Excurse: If my assumption is wrong, then we would require more tracking information. We need to keep lists of all pointers to request object that had been seen since the last toplevel start event. If the start for a redirected page is seen, the list of releveant object must be cleared, and only progress for requests which start after it must be analyzed. All other events must be ignored, as they belong to now irrelevant previous top level documents. Frames are also evil. First we need a decision. kaie thinks: Only if the toplevel frame is secure, we should try to display secure lock icons. If some of the inner contents are insecure, we display mixed mode. But if the top level frame is not secure, why indicate a mixed lock icon at all? I think we should always display an open lock icon, if the top level frameset is insecure. That's the way Netscape Communicator behaves, and I think we should do the same. The user will not know which parts are secure and which are not, and any certificate information, displayed in the tooltip or in the "page info" will only be relevant for some subframe(s), and the user will not know which ones, so we shouldn't display it as a general attribute of the displayed page. Why are frames evil? Because the progress for the toplevel frame document is not easily distinguishable from subframes. The same STATE bits are reported. While at first sight, when a new page load happens, the toplevel frameset document has also the STATE_IS_NETWORK bit in it. But this can't really be used. Because in case that document causes a http 302 redirect, the real top level frameset will no longer have that bit. But we need some way to distinguish top level frames from inner frames. I saw that the web progress we get delivered has a reference to the toplevel DOM window. I suggest, we look at all incoming requests. If a request is NOT for the toplevel DOM window, we will always treat it as a subdocument request, regardless of whether the load flags indicate a top level document. */ nsCOMPtr<nsIDOMWindow> windowForProgress; aWebProgress->GetDOMWindow(getter_AddRefs(windowForProgress)); nsCOMPtr<nsIDOMWindow> window; bool isViewSource; nsCOMPtr<nsINetUtil> ioService; { ReentrantMonitorAutoEnter lock(mReentrantMonitor); window = do_QueryReferent(mWindow); NS_ASSERTION(window, "Window has gone away?!"); isViewSource = mIsViewSource; ioService = mIOService; } if (!ioService) { ioService = do_GetService(NS_IOSERVICE_CONTRACTID); if (ioService) { ReentrantMonitorAutoEnter lock(mReentrantMonitor); mIOService = ioService; } } bool isNoContentResponse = false; nsCOMPtr<nsIHttpChannel> httpChannel = do_QueryInterface(aRequest); if (httpChannel) { PRUint32 response; isNoContentResponse = NS_SUCCEEDED(httpChannel->GetResponseStatus(&response)) && (response == 204 || response == 205); } const bool isToplevelProgress = (windowForProgress.get() == window.get()) && !isNoContentResponse; #ifdef PR_LOGGING if (windowForProgress) { if (isToplevelProgress) { PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: OnStateChange: progress: for toplevel\n", this)); } else { PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: OnStateChange: progress: for something else\n", this)); } } else { PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: OnStateChange: progress: no window known\n", this)); } #endif PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: OnStateChange\n", this)); if (isViewSource) return NS_OK; if (!aRequest) { PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: OnStateChange with null request\n", this)); return NS_ERROR_NULL_POINTER; } #ifdef PR_LOGGING if (PR_LOG_TEST(gSecureDocLog, PR_LOG_DEBUG)) { nsXPIDLCString reqname; aRequest->GetName(reqname); PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: %p %p OnStateChange %x %s\n", this, aWebProgress, aRequest, aProgressStateFlags, reqname.get())); } #endif nsCOMPtr<nsISupports> securityInfo(ExtractSecurityInfo(aRequest)); nsCOMPtr<nsIURI> uri; nsCOMPtr<nsIChannel> channel(do_QueryInterface(aRequest)); if (channel) { channel->GetURI(getter_AddRefs(uri)); } nsCOMPtr<imgIRequest> imgRequest(do_QueryInterface(aRequest)); if (imgRequest) { NS_ASSERTION(!channel, "How did that happen, exactly?"); // for image requests, we get the URI from here imgRequest->GetURI(getter_AddRefs(uri)); } if (uri) { bool vs; if (NS_SUCCEEDED(uri->SchemeIs("javascript", &vs)) && vs) { // We ignore the progress events for javascript URLs. // If a document loading gets triggered, we will see more events. return NS_OK; } } PRUint32 loadFlags = 0; aRequest->GetLoadFlags(&loadFlags); #ifdef PR_LOGGING if (aProgressStateFlags & STATE_START && aProgressStateFlags & STATE_IS_REQUEST && isToplevelProgress && loadFlags & nsIChannel::LOAD_DOCUMENT_URI) { PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: OnStateChange: SOMETHING STARTS FOR TOPMOST DOCUMENT\n", this)); } if (aProgressStateFlags & STATE_STOP && aProgressStateFlags & STATE_IS_REQUEST && isToplevelProgress && loadFlags & nsIChannel::LOAD_DOCUMENT_URI) { PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: OnStateChange: SOMETHING STOPS FOR TOPMOST DOCUMENT\n", this)); } #endif bool isSubDocumentRelevant = true; // We are only interested in requests that load in the browser window... if (!imgRequest) { // is not imgRequest nsCOMPtr<nsIHttpChannel> httpRequest(do_QueryInterface(aRequest)); if (!httpRequest) { nsCOMPtr<nsIFileChannel> fileRequest(do_QueryInterface(aRequest)); if (!fileRequest) { nsCOMPtr<nsIWyciwygChannel> wyciwygRequest(do_QueryInterface(aRequest)); if (!wyciwygRequest) { nsCOMPtr<nsIFTPChannel> ftpRequest(do_QueryInterface(aRequest)); if (!ftpRequest) { PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: OnStateChange: not relevant for sub content\n", this)); isSubDocumentRelevant = false; } } } } } // This will ignore all resource, chrome, data, file, moz-icon, and anno // protocols. Local resources are treated as trusted. if (uri && ioService) { bool hasFlag; nsresult rv = ioService->URIChainHasFlags(uri, nsIProtocolHandler::URI_IS_LOCAL_RESOURCE, &hasFlag); if (NS_SUCCEEDED(rv) && hasFlag) { isSubDocumentRelevant = false; } } #if defined(DEBUG) nsCString info2; PRUint32 testFlags = loadFlags; if (testFlags & nsIChannel::LOAD_DOCUMENT_URI) { testFlags -= nsIChannel::LOAD_DOCUMENT_URI; info2.Append("LOAD_DOCUMENT_URI "); } if (testFlags & nsIChannel::LOAD_RETARGETED_DOCUMENT_URI) { testFlags -= nsIChannel::LOAD_RETARGETED_DOCUMENT_URI; info2.Append("LOAD_RETARGETED_DOCUMENT_URI "); } if (testFlags & nsIChannel::LOAD_REPLACE) { testFlags -= nsIChannel::LOAD_REPLACE; info2.Append("LOAD_REPLACE "); } const char *_status = NS_SUCCEEDED(aStatus) ? "1" : "0"; nsCString info; PRUint32 f = aProgressStateFlags; if (f & nsIWebProgressListener::STATE_START) { f -= nsIWebProgressListener::STATE_START; info.Append("START "); } if (f & nsIWebProgressListener::STATE_REDIRECTING) { f -= nsIWebProgressListener::STATE_REDIRECTING; info.Append("REDIRECTING "); } if (f & nsIWebProgressListener::STATE_TRANSFERRING) { f -= nsIWebProgressListener::STATE_TRANSFERRING; info.Append("TRANSFERRING "); } if (f & nsIWebProgressListener::STATE_NEGOTIATING) { f -= nsIWebProgressListener::STATE_NEGOTIATING; info.Append("NEGOTIATING "); } if (f & nsIWebProgressListener::STATE_STOP) { f -= nsIWebProgressListener::STATE_STOP; info.Append("STOP "); } if (f & nsIWebProgressListener::STATE_IS_REQUEST) { f -= nsIWebProgressListener::STATE_IS_REQUEST; info.Append("IS_REQUEST "); } if (f & nsIWebProgressListener::STATE_IS_DOCUMENT) { f -= nsIWebProgressListener::STATE_IS_DOCUMENT; info.Append("IS_DOCUMENT "); } if (f & nsIWebProgressListener::STATE_IS_NETWORK) { f -= nsIWebProgressListener::STATE_IS_NETWORK; info.Append("IS_NETWORK "); } if (f & nsIWebProgressListener::STATE_IS_WINDOW) { f -= nsIWebProgressListener::STATE_IS_WINDOW; info.Append("IS_WINDOW "); } if (f & nsIWebProgressListener::STATE_IS_INSECURE) { f -= nsIWebProgressListener::STATE_IS_INSECURE; info.Append("IS_INSECURE "); } if (f & nsIWebProgressListener::STATE_IS_BROKEN) { f -= nsIWebProgressListener::STATE_IS_BROKEN; info.Append("IS_BROKEN "); } if (f & nsIWebProgressListener::STATE_IS_SECURE) { f -= nsIWebProgressListener::STATE_IS_SECURE; info.Append("IS_SECURE "); } if (f & nsIWebProgressListener::STATE_SECURE_HIGH) { f -= nsIWebProgressListener::STATE_SECURE_HIGH; info.Append("SECURE_HIGH "); } if (f & nsIWebProgressListener::STATE_SECURE_MED) { f -= nsIWebProgressListener::STATE_SECURE_MED; info.Append("SECURE_MED "); } if (f & nsIWebProgressListener::STATE_SECURE_LOW) { f -= nsIWebProgressListener::STATE_SECURE_LOW; info.Append("SECURE_LOW "); } if (f & nsIWebProgressListener::STATE_RESTORING) { f -= nsIWebProgressListener::STATE_RESTORING; info.Append("STATE_RESTORING "); } if (f > 0) { info.Append("f contains unknown flag!"); } PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: OnStateChange: %s %s -- %s\n", this, _status, info.get(), info2.get())); if (aProgressStateFlags & STATE_STOP && channel) { PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: OnStateChange: seeing STOP with security state: %d\n", this, GetSecurityStateFromSecurityInfo(securityInfo) )); } #endif if (aProgressStateFlags & STATE_TRANSFERRING && aProgressStateFlags & STATE_IS_REQUEST) { // The listing of a request in mTransferringRequests // means, there has already been data transfered. ReentrantMonitorAutoEnter lock(mReentrantMonitor); PL_DHashTableOperate(&mTransferringRequests, aRequest, PL_DHASH_ADD); return NS_OK; } bool requestHasTransferedData = false; if (aProgressStateFlags & STATE_STOP && aProgressStateFlags & STATE_IS_REQUEST) { { /* scope for the ReentrantMonitorAutoEnter */ ReentrantMonitorAutoEnter lock(mReentrantMonitor); PLDHashEntryHdr *entry = PL_DHashTableOperate(&mTransferringRequests, aRequest, PL_DHASH_LOOKUP); if (PL_DHASH_ENTRY_IS_BUSY(entry)) { PL_DHashTableOperate(&mTransferringRequests, aRequest, PL_DHASH_REMOVE); requestHasTransferedData = true; } } if (!requestHasTransferedData) { // Because image loads doesn't support any TRANSFERRING notifications but // only START and STOP we must ask them directly whether content was // transferred. See bug 432685 for details. nsCOMPtr<nsISecurityInfoProvider> securityInfoProvider = do_QueryInterface(aRequest); // Guess true in all failure cases to be safe. But if we're not // an nsISecurityInfoProvider, then we just haven't transferred // any data. bool hasTransferred; requestHasTransferedData = securityInfoProvider && (NS_FAILED(securityInfoProvider->GetHasTransferredData(&hasTransferred)) || hasTransferred); } } bool allowSecurityStateChange = true; if (loadFlags & nsIChannel::LOAD_RETARGETED_DOCUMENT_URI) { // The original consumer (this) is no longer the target of the load. // Ignore any events with this flag, do not allow them to update // our secure UI state. allowSecurityStateChange = false; } if (aProgressStateFlags & STATE_START && aProgressStateFlags & STATE_IS_REQUEST && isToplevelProgress && loadFlags & nsIChannel::LOAD_DOCUMENT_URI) { bool inProgress; PRInt32 saveSubHigh; PRInt32 saveSubLow; PRInt32 saveSubBroken; PRInt32 saveSubNo; nsCOMPtr<nsIAssociatedContentSecurity> prevContentSecurity; PRInt32 newSubHigh = 0; PRInt32 newSubLow = 0; PRInt32 newSubBroken = 0; PRInt32 newSubNo = 0; { ReentrantMonitorAutoEnter lock(mReentrantMonitor); inProgress = (mDocumentRequestsInProgress!=0); if (allowSecurityStateChange && !inProgress) { saveSubHigh = mSubRequestsHighSecurity; saveSubLow = mSubRequestsLowSecurity; saveSubBroken = mSubRequestsBrokenSecurity; saveSubNo = mSubRequestsNoSecurity; prevContentSecurity = do_QueryInterface(mCurrentToplevelSecurityInfo); } } if (allowSecurityStateChange && !inProgress) { PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: OnStateChange: start for toplevel document\n", this )); if (prevContentSecurity) { PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: OnStateChange: start, saving current sub state\n", this )); // before resetting our state, let's save information about // sub element loads, so we can restore it later prevContentSecurity->SetCountSubRequestsHighSecurity(saveSubHigh); prevContentSecurity->SetCountSubRequestsLowSecurity(saveSubLow); prevContentSecurity->SetCountSubRequestsBrokenSecurity(saveSubBroken); prevContentSecurity->SetCountSubRequestsNoSecurity(saveSubNo); prevContentSecurity->Flush(); PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: Saving subs in START to %p as %d,%d,%d,%d\n", this, prevContentSecurity.get(), saveSubHigh, saveSubLow, saveSubBroken, saveSubNo)); } bool retrieveAssociatedState = false; if (securityInfo && (aProgressStateFlags & nsIWebProgressListener::STATE_RESTORING) != 0) { retrieveAssociatedState = true; } else { nsCOMPtr<nsIWyciwygChannel> wyciwygRequest(do_QueryInterface(aRequest)); if (wyciwygRequest) { retrieveAssociatedState = true; } } if (retrieveAssociatedState) { // When restoring from bfcache, we will not get events for the // page's sub elements, so let's load the state of sub elements // from the cache. nsCOMPtr<nsIAssociatedContentSecurity> newContentSecurity(do_QueryInterface(securityInfo)); if (newContentSecurity) { PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: OnStateChange: start, loading old sub state\n", this )); newContentSecurity->GetCountSubRequestsHighSecurity(&newSubHigh); newContentSecurity->GetCountSubRequestsLowSecurity(&newSubLow); newContentSecurity->GetCountSubRequestsBrokenSecurity(&newSubBroken); newContentSecurity->GetCountSubRequestsNoSecurity(&newSubNo); PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: Restoring subs in START from %p to %d,%d,%d,%d\n", this, newContentSecurity.get(), newSubHigh, newSubLow, newSubBroken, newSubNo)); } } else { // If we don't get OnLocationChange for this top level load later, // it didn't get rendered. But we reset the state to unknown and // mSubRequests* to zeros. If we would have left these values after // this top level load stoped, we would override the original top level // load with all zeros and break mixed content state on back and forward. mRestoreSubrequests = true; } } { ReentrantMonitorAutoEnter lock(mReentrantMonitor); if (allowSecurityStateChange && !inProgress) { ResetStateTracking(); mSubRequestsHighSecurity = newSubHigh; mSubRequestsLowSecurity = newSubLow; mSubRequestsBrokenSecurity = newSubBroken; mSubRequestsNoSecurity = newSubNo; mNewToplevelSecurityStateKnown = false; } // By using a counter, this code also works when the toplevel // document get's redirected, but the STOP request for the // previous toplevel document has not yet have been received. PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: OnStateChange: ++mDocumentRequestsInProgress\n", this )); ++mDocumentRequestsInProgress; } return NS_OK; } if (aProgressStateFlags & STATE_STOP && aProgressStateFlags & STATE_IS_REQUEST && isToplevelProgress && loadFlags & nsIChannel::LOAD_DOCUMENT_URI) { PRInt32 temp_DocumentRequestsInProgress; nsCOMPtr<nsISecurityEventSink> temp_ToplevelEventSink; { ReentrantMonitorAutoEnter lock(mReentrantMonitor); temp_DocumentRequestsInProgress = mDocumentRequestsInProgress; if (allowSecurityStateChange) { temp_ToplevelEventSink = mToplevelEventSink; } } if (temp_DocumentRequestsInProgress <= 0) { // Ignore stop requests unless a document load is in progress // Unfortunately on application start, see some stops without having seen any starts... return NS_OK; } PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: OnStateChange: --mDocumentRequestsInProgress\n", this )); if (!temp_ToplevelEventSink && channel) { if (allowSecurityStateChange) { ObtainEventSink(channel, temp_ToplevelEventSink); } } bool sinkChanged = false; bool inProgress; { ReentrantMonitorAutoEnter lock(mReentrantMonitor); if (allowSecurityStateChange) { sinkChanged = (mToplevelEventSink != temp_ToplevelEventSink); mToplevelEventSink = temp_ToplevelEventSink; } --mDocumentRequestsInProgress; inProgress = mDocumentRequestsInProgress > 0; } if (allowSecurityStateChange && requestHasTransferedData) { // Data has been transferred for the single toplevel // request. Evaluate the security state. // Do this only when the sink has changed. We update and notify // the state from OnLacationChange, this is actually redundant. // But when the target sink changes between OnLocationChange and // OnStateChange, we have to fire the notification here (again). if (sinkChanged || mOnLocationChangeSeen) return EvaluateAndUpdateSecurityState(aRequest, securityInfo, false); } mOnLocationChangeSeen = false; if (mRestoreSubrequests && !inProgress) { // We get here when there were no OnLocationChange between // OnStateChange(START) and OnStateChange(STOP). Then the load has not // been rendered but has been retargeted in some other way then by external // app handler. Restore mSubRequests* members to what the current security // state info holds (it was reset to all zero in OnStateChange(START) // before). nsCOMPtr<nsIAssociatedContentSecurity> currentContentSecurity; { ReentrantMonitorAutoEnter lock(mReentrantMonitor); currentContentSecurity = do_QueryInterface(mCurrentToplevelSecurityInfo); // Drop this indication flag, the restore opration is just being // done. mRestoreSubrequests = false; // We can do this since the state didn't actually change. mNewToplevelSecurityStateKnown = true; } PRInt32 subHigh = 0; PRInt32 subLow = 0; PRInt32 subBroken = 0; PRInt32 subNo = 0; if (currentContentSecurity) { currentContentSecurity->GetCountSubRequestsHighSecurity(&subHigh); currentContentSecurity->GetCountSubRequestsLowSecurity(&subLow); currentContentSecurity->GetCountSubRequestsBrokenSecurity(&subBroken); currentContentSecurity->GetCountSubRequestsNoSecurity(&subNo); PR_LOG(gSecureDocLog, PR_LOG_DEBUG, ("SecureUI:%p: Restoring subs in STOP from %p to %d,%d,%d,%d\n", this, currentContentSecurity.get(), subHigh, subLow, subBroken, subNo)); } { ReentrantMonitorAutoEnter lock(mReentrantMonitor); mSubRequestsHighSecurity = subHigh; mSubRequestsLowSecurity = subLow; mSubRequestsBrokenSecurity = subBroken; mSubRequestsNoSecurity = subNo; } } return NS_OK; } if (aProgressStateFlags & STATE_STOP && aProgressStateFlags & STATE_IS_REQUEST) { if (!isSubDocumentRelevant) return NS_OK; // if we arrive here, LOAD_DOCUMENT_URI is not set // We only care for the security state of sub requests which have actually transfered data. if (allowSecurityStateChange && requestHasTransferedData) { UpdateSubrequestMembers(securityInfo); // Care for the following scenario: // A new top level document load might have already started, // but the security state of the new top level document might not yet been known. // // At this point, we are learning about the security state of a sub-document. // We must not update the security state based on the sub content, // if the new top level state is not yet known. // // We skip updating the security state in this case. bool temp_NewToplevelSecurityStateKnown; { ReentrantMonitorAutoEnter lock(mReentrantMonitor); temp_NewToplevelSecurityStateKnown = mNewToplevelSecurityStateKnown; } if (temp_NewToplevelSecurityStateKnown) return UpdateSecurityState(aRequest, false, false, false); } return NS_OK; } return NS_OK; }
155
73,576
0
static gint CVE_2012_4289_PATCHED_decode_uuid_acl(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, gint offset, guint16 bitmap) { if ((offset & 1)) PAD(1); if ((bitmap & kFileSec_UUID)) { proto_tree_add_item(tree, hf_afp_UUID, tvb, offset, 16, ENC_BIG_ENDIAN); offset += 16; } if ((bitmap & kFileSec_GRPUUID)) { proto_tree_add_item(tree, hf_afp_GRPUUID, tvb, offset, 16, ENC_BIG_ENDIAN); offset += 16; } if ((bitmap & kFileSec_ACL)) { offset = decode_kauth_acl(tvb, pinfo, tree, offset); } return offset; }
156
70,527
0
static gint CVE_2012_4289_PATCHED_dissect_query_afp_set_acl(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, gint offset) { guint16 bitmap; PAD(1); offset = decode_vol_did(tree, tvb, offset); bitmap = decode_acl_list_bitmap(tvb, tree, offset); offset += 2; offset = decode_name(tree, pinfo, tvb, offset); offset = decode_uuid_acl(tvb, pinfo, tree, offset, bitmap); return offset; }
157
121,399
0
static gint CVE_2012_6054_PATCHED_dissect_sflow_245_address_type(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, gint offset, struct sflow_address_type *hf_type, struct sflow_address_details *addr_detail) { guint32 addr_type; int len; proto_item *pi; addr_type = tvb_get_ntohl(tvb, offset); offset += 4; switch (addr_type) { case ADDR_TYPE_IPV4: len = 4; proto_tree_add_item(tree, hf_type->hf_addr_v4, tvb, offset, 4, ENC_BIG_ENDIAN); break; case ADDR_TYPE_IPV6: len = 16; proto_tree_add_item(tree, hf_type->hf_addr_v6, tvb, offset, 16, ENC_NA); break; default: /* unknown/invalid address type, we don't know the length setting it to 0 is ok, offset is incremented by this function, we won't get stuck in an endless loop */ len = 0; pi = proto_tree_add_text(tree, tvb, offset - 4, 4, "Unknown address type (%u)", addr_type); expert_add_info_format(pinfo, pi, PI_MALFORMED, PI_ERROR, "Unknown/invalid address type"); } if (addr_detail) { addr_detail->addr_type = addr_type; switch (len) { case 4: tvb_memcpy(tvb, addr_detail->agent_address.v4, offset, len); break; case 16: tvb_memcpy(tvb, addr_detail->agent_address.v6, offset, len); break; } } return offset + len; }
158
31,221
0
static int CVE_2012_6538_PATCHED_copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) { struct xfrm_algo *algo; struct nlattr *nla; nla = nla_reserve(skb, XFRMA_ALG_AUTH, sizeof(*algo) + (auth->alg_key_len + 7) / 8); if (!nla) return -EMSGSIZE; algo = nla_data(nla); strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name)); memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8); algo->alg_key_len = auth->alg_key_len; return 0; }
159
58,741
0
static int CVE_2013_0311_PATCHED_translate_desc(struct vhost_dev *dev, u64 addr, u32 len, struct iovec iov[], int iov_size) { const struct vhost_memory_region *reg; struct vhost_memory *mem; struct iovec *_iov; u64 s = 0; int ret = 0; rcu_read_lock(); mem = rcu_dereference(dev->memory); while ((u64)len > s) { u64 size; if (unlikely(ret >= iov_size)) { ret = -ENOBUFS; break; } reg = find_region(mem, addr, len); if (unlikely(!reg)) { ret = -EFAULT; break; } _iov = iov + ret; size = reg->memory_size - addr + reg->guest_phys_addr; _iov->iov_len = min((u64)len - s, size); _iov->iov_base = (void __user *)(unsigned long) (reg->userspace_addr + addr - reg->guest_phys_addr); s += size; addr += size; ++ret; } rcu_read_unlock(); return ret; }
160
33,056
0
NS_IMETHODIMP CVE_2013_0755_PATCHED_Navigator::Vibrate(const jsval& aPattern, JSContext* cx) { nsCOMPtr<nsPIDOMWindow> win = do_QueryReferent(mWindow); NS_ENSURE_TRUE(win, NS_OK); nsCOMPtr<nsIDOMDocument> domDoc = win->GetExtantDocument(); NS_ENSURE_TRUE(domDoc, NS_ERROR_FAILURE); bool hidden = true; domDoc->GetMozHidden(&hidden); if (hidden) { // Hidden documents cannot start or stop a vibration. return NS_OK; } nsAutoTArray<uint32_t, 8> pattern; // null or undefined pattern is an error. if (JSVAL_IS_NULL(aPattern) || JSVAL_IS_VOID(aPattern)) { return NS_ERROR_DOM_NOT_SUPPORTED_ERR; } if (JSVAL_IS_PRIMITIVE(aPattern)) { int32_t p; if (GetVibrationDurationFromJsval(aPattern, cx, &p)) { pattern.AppendElement(p); } else { return NS_ERROR_DOM_NOT_SUPPORTED_ERR; } } else { JSObject *obj = JSVAL_TO_OBJECT(aPattern); PRUint32 length; if (!JS_GetArrayLength(cx, obj, &length) || length > sMaxVibrateListLen) { return NS_ERROR_DOM_NOT_SUPPORTED_ERR; } pattern.SetLength(length); for (PRUint32 i = 0; i < length; ++i) { jsval v; int32_t pv; if (JS_GetElement(cx, obj, i, &v) && GetVibrationDurationFromJsval(v, cx, &pv)) { pattern[i] = pv; } else { return NS_ERROR_DOM_NOT_SUPPORTED_ERR; } } } // The spec says we check sVibratorEnabled after we've done the sanity // checking on the pattern. if (!sVibratorEnabled) { return NS_OK; } // Add a listener to cancel the vibration if the document becomes hidden, // and remove the old mozvisibility listener, if there was one. if (!gVibrateWindowListener) { // If gVibrateWindowListener is null, this is the first time we've vibrated, // and we need to register a listener to clear gVibrateWindowListener on // shutdown. ClearOnShutdown(&gVibrateWindowListener); } else { gVibrateWindowListener->RemoveListener(); } gVibrateWindowListener = new VibrateWindowListener(win, domDoc); nsCOMPtr<nsIDOMWindow> domWindow = do_QueryInterface(static_cast<nsIDOMWindow*>(win)); hal::Vibrate(pattern, domWindow); return NS_OK; }
161
109,600
0
uint32_t CVE_2013_0761_PATCHED_AddTrack(MediaInputPort* aPort, StreamBuffer::Track* aTrack, GraphTime aFrom) { // Use the ID of the source track if we can, otherwise allocate a new // unique ID TrackID id = NS_MAX(mMaxTrackID + 1, aTrack->GetID()); mMaxTrackID = id; TrackRate rate = aTrack->GetRate(); // Round up the track start time so the track, if anything, starts a // little later than the true time. This means we'll have enough // samples in our input stream to go just beyond the destination time. TrackTicks outputStart = TimeToTicksRoundUp(rate, GraphTimeToStreamTime(aFrom)); nsAutoPtr<MediaSegment> segment; segment = aTrack->GetSegment()->CreateEmptyClone(); for (uint32_t j = 0; j < mListeners.Length(); ++j) { MediaStreamListener* l = mListeners[j]; l->NotifyQueuedTrackChanges(Graph(), id, rate, outputStart, MediaStreamListener::TRACK_EVENT_CREATED, *segment); } segment->AppendNullData(outputStart); StreamBuffer::Track* track = &mBuffer.CVE_2013_0761_PATCHED_AddTrack(id, rate, outputStart, segment.forget()); LOG(PR_LOG_DEBUG, ("TrackUnionStream %p adding track %d for input stream %p track %d, start ticks %lld", this, id, aPort->GetSource(), aTrack->GetID(), (long long)outputStart)); TrackMapEntry* map = mTrackMap.AppendElement(); map->mInputPort = aPort; map->mInputTrackID = aTrack->GetID(); map->mOutputTrackID = track->GetID(); map->mSegment = aTrack->GetSegment()->CreateEmptyClone(); return mTrackMap.Length() - 1; }
162
40,686
0
virtual void CVE_2013_0761_PATCHED_ProduceOutput(GraphTime aFrom, GraphTime aTo) { nsAutoTArray<bool,8> mappedTracksFinished; nsAutoTArray<bool,8> mappedTracksWithMatchingInputTracks; for (uint32_t i = 0; i < mTrackMap.Length(); ++i) { mappedTracksFinished.AppendElement(true); mappedTracksWithMatchingInputTracks.AppendElement(false); } bool allFinished = true; for (uint32_t i = 0; i < mInputs.Length(); ++i) { MediaStream* stream = mInputs[i]->GetSource(); if (!stream->IsFinishedOnGraphThread()) { allFinished = false; } for (StreamBuffer::TrackIter tracks(stream->GetStreamBuffer()); !tracks.IsEnded(); tracks.Next()) { bool found = false; for (uint32_t j = 0; j < mTrackMap.Length(); ++j) { TrackMapEntry* map = &mTrackMap[j]; if (map->mInputPort == mInputs[i] && map->mInputTrackID == tracks->GetID()) { bool trackFinished; StreamBuffer::Track* outputTrack = mBuffer.FindTrack(map->mOutputTrackID); if (!outputTrack || outputTrack->IsEnded()) { trackFinished = true; } else { CopyTrackData(tracks.get(), j, aFrom, aTo, &trackFinished); } mappedTracksFinished[j] = trackFinished; mappedTracksWithMatchingInputTracks[j] = true; found = true; break; } } if (!found) { bool trackFinished = false; uint32_t mapIndex = AddTrack(mInputs[i], tracks.get(), aFrom); CopyTrackData(tracks.get(), mapIndex, aFrom, aTo, &trackFinished); mappedTracksFinished.AppendElement(trackFinished); mappedTracksWithMatchingInputTracks.AppendElement(true); } } } for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) { if (mappedTracksFinished[i]) { EndTrack(i); } else { allFinished = false; } if (!mappedTracksWithMatchingInputTracks[i]) { mTrackMap.RemoveElementAt(i); } } if (allFinished && mAutofinish) { // All streams have finished and won't add any more tracks, and // all our tracks have actually finished and been removed from our map, // so we're finished now. FinishOnGraphThread(); } mBuffer.AdvanceKnownTracksTime(GraphTimeToStreamTime(aTo)); }
163
86,822
0
bool CVE_2013_0772_PATCHED_nsGIFDecoder2::DoLzw(const uint8_t *q) { if (!mGIFStruct.rows_remaining) return true; /* Copy all the decoder state variables into locals so the compiler * won't worry about them being aliased. The locals will be homed * back into the GIF decoder structure when we exit. */ int avail = mGIFStruct.avail; int bits = mGIFStruct.bits; int codesize = mGIFStruct.codesize; int codemask = mGIFStruct.codemask; int count = mGIFStruct.count; int oldcode = mGIFStruct.oldcode; const int clear_code = ClearCode(); uint8_t firstchar = mGIFStruct.firstchar; int32_t datum = mGIFStruct.datum; uint16_t *prefix = mGIFStruct.prefix; uint8_t *stackp = mGIFStruct.stackp; uint8_t *suffix = mGIFStruct.suffix; uint8_t *stack = mGIFStruct.stack; uint8_t *rowp = mGIFStruct.rowp; uint32_t bpr = mGIFStruct.width; if (!mGIFStruct.images_decoded) bpr *= sizeof(uint32_t); uint8_t *rowend = mImageData + (bpr * mGIFStruct.irow) + mGIFStruct.width; #define OUTPUT_ROW() \ PR_BEGIN_MACRO \ if (!OutputRow()) \ goto END; \ rowp = mImageData + mGIFStruct.irow * bpr; \ rowend = rowp + mGIFStruct.width; \ PR_END_MACRO for (const uint8_t* ch = q; count-- > 0; ch++) { /* Feed the next byte into the decoder's 32-bit input buffer. */ datum += ((int32) *ch) << bits; bits += 8; /* Check for underflow of decoder's 32-bit input buffer. */ while (bits >= codesize) { /* Get the leading variable-length symbol from the data stream */ int code = datum & codemask; datum >>= codesize; bits -= codesize; /* Reset the dictionary to its original state, if requested */ if (code == clear_code) { codesize = mGIFStruct.datasize + 1; codemask = (1 << codesize) - 1; avail = clear_code + 2; oldcode = -1; continue; } /* Check for explicit end-of-stream code */ if (code == (clear_code + 1)) { /* end-of-stream should only appear after all image data */ return (mGIFStruct.rows_remaining == 0); } if (oldcode == -1) { if (code >= MAX_BITS) return false; *rowp++ = suffix[code] & mColorMask; // ensure index is within colormap if (rowp == rowend) OUTPUT_ROW(); firstchar = oldcode = code; continue; } int incode = code; if (code >= avail) { *stackp++ = firstchar; code = oldcode; if (stackp >= stack + MAX_BITS) return false; } while (code >= clear_code) { if ((code >= MAX_BITS) || (code == prefix[code])) return false; *stackp++ = suffix[code]; code = prefix[code]; if (stackp == stack + MAX_BITS) return false; } *stackp++ = firstchar = suffix[code]; /* Define a new codeword in the dictionary. */ if (avail < 4096) { prefix[avail] = oldcode; suffix[avail] = firstchar; avail++; /* If we've used up all the codewords of a given length * increase the length of codewords by one bit, but don't * exceed the specified maximum codeword size of 12 bits. */ if (((avail & codemask) == 0) && (avail < 4096)) { codesize++; codemask += avail; } } oldcode = incode; /* Copy the decoded data out to the scanline buffer. */ do { *rowp++ = *--stackp & mColorMask; // ensure index is within colormap if (rowp == rowend) OUTPUT_ROW(); } while (stackp > stack); } } END: /* Home the local copies of the GIF decoder state variables */ mGIFStruct.avail = avail; mGIFStruct.bits = bits; mGIFStruct.codesize = codesize; mGIFStruct.codemask = codemask; mGIFStruct.count = count; mGIFStruct.oldcode = oldcode; mGIFStruct.firstchar = firstchar; mGIFStruct.datum = datum; mGIFStruct.stackp = stackp; mGIFStruct.rowp = rowp; return true; }
164
40,262
0
NS_IMETHODIMP CVE_2013_0790_PATCHED_nsPluginByteRangeStreamListener::OnStartRequest(nsIRequest *request, nsISupports *ctxt) { nsresult rv; nsCOMPtr<nsIStreamListener> finalStreamListener = do_QueryReferent(mWeakPtrPluginStreamListenerPeer); if (!finalStreamListener) return NS_ERROR_FAILURE; nsPluginStreamListenerPeer *pslp = static_cast<nsPluginStreamListenerPeer*>(finalStreamListener.get()); NS_ASSERTION(pslp->mRequests.IndexOfObject(GetBaseRequest(request)) != -1, "Untracked byte-range request?"); nsCOMPtr<nsIStreamConverterService> serv = do_GetService(NS_STREAMCONVERTERSERVICE_CONTRACTID, &rv); if (NS_SUCCEEDED(rv)) { rv = serv->AsyncConvertData(MULTIPART_BYTERANGES, "*/*", finalStreamListener, nullptr, getter_AddRefs(mStreamConverter)); if (NS_SUCCEEDED(rv)) { rv = mStreamConverter->OnStartRequest(request, ctxt); if (NS_SUCCEEDED(rv)) return rv; } } mStreamConverter = 0; nsCOMPtr<nsIHttpChannel> httpChannel(do_QueryInterface(request)); if (!httpChannel) { return NS_ERROR_FAILURE; } uint32_t responseCode = 0; rv = httpChannel->GetResponseStatus(&responseCode); if (NS_FAILED(rv)) { return NS_ERROR_FAILURE; } if (responseCode != 200) { uint32_t wantsAllNetworkStreams = 0; rv = pslp->GetPluginInstance()->GetValueFromPlugin(NPPVpluginWantsAllNetworkStreams, &wantsAllNetworkStreams); // If the call returned an error code make sure we still use our default value. if (NS_FAILED(rv)) { wantsAllNetworkStreams = 0; } if (!wantsAllNetworkStreams){ return NS_ERROR_FAILURE; } } // if server cannot continue with byte range (206 status) and sending us whole object (200 status) // reset this seekable stream & try serve it to plugin instance as a file mStreamConverter = finalStreamListener; mRemoveMagicNumber = true; rv = pslp->ServeStreamAsFile(request, ctxt); return rv; }
165
4,744
0
static BOOL CVE_2013_0799_PATCHED_IsStatusApplying(LPCWSTR updateDirPath, BOOL &isApplying) { isApplying = FALSE; WCHAR updateStatusFilePath[MAX_PATH + 1] = {L'\0'}; wcsncpy(updateStatusFilePath, updateDirPath, MAX_PATH); if (!PathAppendSafe(updateStatusFilePath, L"update.status")) { LOG_WARN(("Could not append path for update.status file")); return FALSE; } nsAutoHandle statusFile(CreateFileW(updateStatusFilePath, GENERIC_READ, FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, NULL, OPEN_EXISTING, 0, NULL)); if (INVALID_HANDLE_VALUE == statusFile) { LOG_WARN(("Could not open update.status file")); return FALSE; } char buf[32] = { 0 }; DWORD read; if (!ReadFile(statusFile, buf, sizeof(buf), &read, NULL)) { LOG_WARN(("Could not read from update.status file")); return FALSE; } LOG(("updater.exe returned status: %s", buf)); const char kApplying[] = "applying"; isApplying = strncmp(buf, kApplying, sizeof(kApplying) - 1) == 0; return TRUE; }
166
185,247
0
static int CVE_2013_0844_PATCHED_adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; ADPCMDecodeContext *c = avctx->priv_data; ADPCMChannelStatus *cs; int n, m, channel, i; short *samples; int st; /* stereo */ int count1, count2; int nb_samples, coded_samples, ret; GetByteContext gb; bytestream2_init(&gb, buf, buf_size); nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples); if (nb_samples <= 0) { av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n"); return AVERROR_INVALIDDATA; } /* get output buffer */ c->frame.nb_samples = nb_samples; if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } samples = (short *)c->frame.data[0]; /* use coded_samples when applicable */ /* it is always <= nb_samples, so the output buffer will be large enough */ if (coded_samples) { if (coded_samples != nb_samples) av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n"); c->frame.nb_samples = nb_samples = coded_samples; } st = avctx->channels == 2 ? 1 : 0; switch(avctx->codec->id) { case AV_CODEC_ID_ADPCM_IMA_QT: /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples). Channel data is interleaved per-chunk. */ for (channel = 0; channel < avctx->channels; channel++) { int predictor; int step_index; cs = &(c->status[channel]); /* (pppppp) (piiiiiii) */ /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */ predictor = sign_extend(bytestream2_get_be16u(&gb), 16); step_index = predictor & 0x7F; predictor &= ~0x7F; if (cs->step_index == step_index) { int diff = predictor - cs->predictor; if (diff < 0) diff = - diff; if (diff > 0x7f) goto update; } else { update: cs->step_index = step_index; cs->predictor = predictor; } if (cs->step_index > 88u){ av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n", channel, cs->step_index); return AVERROR_INVALIDDATA; } samples = (short *)c->frame.data[0] + channel; for (m = 0; m < 32; m++) { int byte = bytestream2_get_byteu(&gb); *samples = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F, 3); samples += avctx->channels; *samples = adpcm_ima_qt_expand_nibble(cs, byte >> 4 , 3); samples += avctx->channels; } } break; case AV_CODEC_ID_ADPCM_IMA_WAV: for(i=0; i<avctx->channels; i++){ cs = &(c->status[i]); cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16); cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16); if (cs->step_index > 88u){ av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n", i, cs->step_index); return AVERROR_INVALIDDATA; } } for (n = (nb_samples - 1) / 8; n > 0; n--) { for (i = 0; i < avctx->channels; i++) { cs = &c->status[i]; for (m = 0; m < 4; m++) { int v = bytestream2_get_byteu(&gb); *samples = adpcm_ima_expand_nibble(cs, v & 0x0F, 3); samples += avctx->channels; *samples = adpcm_ima_expand_nibble(cs, v >> 4 , 3); samples += avctx->channels; } samples -= 8 * avctx->channels - 1; } samples += 7 * avctx->channels; } break; case AV_CODEC_ID_ADPCM_4XM: for (i = 0; i < avctx->channels; i++) c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16); for (i = 0; i < avctx->channels; i++) { c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16); if (c->status[i].step_index > 88u) { av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n", i, c->status[i].step_index); return AVERROR_INVALIDDATA; } } for (i = 0; i < avctx->channels; i++) { samples = (short *)c->frame.data[0] + i; cs = &c->status[i]; for (n = nb_samples >> 1; n > 0; n--) { int v = bytestream2_get_byteu(&gb); *samples = adpcm_ima_expand_nibble(cs, v & 0x0F, 4); samples += avctx->channels; *samples = adpcm_ima_expand_nibble(cs, v >> 4 , 4); samples += avctx->channels; } } break; case AV_CODEC_ID_ADPCM_MS: { int block_predictor; block_predictor = bytestream2_get_byteu(&gb); if (block_predictor > 6) { av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n", block_predictor); return AVERROR_INVALIDDATA; } c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor]; c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor]; if (st) { block_predictor = bytestream2_get_byteu(&gb); if (block_predictor > 6) { av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n", block_predictor); return AVERROR_INVALIDDATA; } c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor]; c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor]; } c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16); if (st){ c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16); } c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16); if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16); c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16); if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16); *samples++ = c->status[0].sample2; if (st) *samples++ = c->status[1].sample2; *samples++ = c->status[0].sample1; if (st) *samples++ = c->status[1].sample1; for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) { int byte = bytestream2_get_byteu(&gb); *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 ); *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F); } break; } case AV_CODEC_ID_ADPCM_IMA_DK4: for (channel = 0; channel < avctx->channels; channel++) { cs = &c->status[channel]; cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16); cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16); if (cs->step_index > 88u){ av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n", channel, cs->step_index); return AVERROR_INVALIDDATA; } } for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) { int v = bytestream2_get_byteu(&gb); *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3); *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3); } break; case AV_CODEC_ID_ADPCM_IMA_DK3: { int last_byte = 0; int nibble; int decode_top_nibble_next = 0; int diff_channel; const int16_t *samples_end = samples + avctx->channels * nb_samples; bytestream2_skipu(&gb, 10); c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16); c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16); c->status[0].step_index = bytestream2_get_byteu(&gb); c->status[1].step_index = bytestream2_get_byteu(&gb); if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){ av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n", c->status[0].step_index, c->status[1].step_index); return AVERROR_INVALIDDATA; } /* sign extend the predictors */ diff_channel = c->status[1].predictor; /* DK3 ADPCM support macro */ #define DK3_GET_NEXT_NIBBLE() \ if (decode_top_nibble_next) { \ nibble = last_byte >> 4; \ decode_top_nibble_next = 0; \ } else { \ last_byte = bytestream2_get_byteu(&gb); \ nibble = last_byte & 0x0F; \ decode_top_nibble_next = 1; \ } while (samples < samples_end) { /* for this algorithm, c->status[0] is the sum channel and * c->status[1] is the diff channel */ /* process the first predictor of the sum channel */ DK3_GET_NEXT_NIBBLE(); adpcm_ima_expand_nibble(&c->status[0], nibble, 3); /* process the diff channel predictor */ DK3_GET_NEXT_NIBBLE(); adpcm_ima_expand_nibble(&c->status[1], nibble, 3); /* process the first pair of stereo PCM samples */ diff_channel = (diff_channel + c->status[1].predictor) / 2; *samples++ = c->status[0].predictor + c->status[1].predictor; *samples++ = c->status[0].predictor - c->status[1].predictor; /* process the second predictor of the sum channel */ DK3_GET_NEXT_NIBBLE(); adpcm_ima_expand_nibble(&c->status[0], nibble, 3); /* process the second pair of stereo PCM samples */ diff_channel = (diff_channel + c->status[1].predictor) / 2; *samples++ = c->status[0].predictor + c->status[1].predictor; *samples++ = c->status[0].predictor - c->status[1].predictor; } break; } case AV_CODEC_ID_ADPCM_IMA_ISS: for (channel = 0; channel < avctx->channels; channel++) { cs = &c->status[channel]; cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16); cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16); if (cs->step_index > 88u){ av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n", channel, cs->step_index); return AVERROR_INVALIDDATA; } } for (n = nb_samples >> (1 - st); n > 0; n--) { int v1, v2; int v = bytestream2_get_byteu(&gb); /* nibbles are swapped for mono */ if (st) { v1 = v >> 4; v2 = v & 0x0F; } else { v2 = v >> 4; v1 = v & 0x0F; } *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3); *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3); } break; case AV_CODEC_ID_ADPCM_IMA_APC: while (bytestream2_get_bytes_left(&gb) > 0) { int v = bytestream2_get_byteu(&gb); *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3); *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3); } break; case AV_CODEC_ID_ADPCM_IMA_WS: if (c->vqa_version == 3) { for (channel = 0; channel < avctx->channels; channel++) { int16_t *smp = samples + channel; for (n = nb_samples / 2; n > 0; n--) { int v = bytestream2_get_byteu(&gb); *smp = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3); smp += avctx->channels; *smp = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3); smp += avctx->channels; } } } else { for (n = nb_samples / 2; n > 0; n--) { for (channel = 0; channel < avctx->channels; channel++) { int v = bytestream2_get_byteu(&gb); *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3); samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3); } samples += avctx->channels; } } bytestream2_seek(&gb, 0, SEEK_END); break; case AV_CODEC_ID_ADPCM_XA: while (bytestream2_get_bytes_left(&gb) >= 128) { if ((ret = xa_decode(avctx, samples, buf + bytestream2_tell(&gb), &c->status[0], &c->status[1], avctx->channels)) < 0) return ret; bytestream2_skipu(&gb, 128); samples += 28 * 8; } break; case AV_CODEC_ID_ADPCM_IMA_EA_EACS: for (i=0; i<=st; i++) { c->status[i].step_index = bytestream2_get_le32u(&gb); if (c->status[i].step_index > 88u) { av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n", i, c->status[i].step_index); return AVERROR_INVALIDDATA; } } for (i=0; i<=st; i++) c->status[i].predictor = bytestream2_get_le32u(&gb); for (n = nb_samples >> (1 - st); n > 0; n--) { int byte = bytestream2_get_byteu(&gb); *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3); *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3); } break; case AV_CODEC_ID_ADPCM_IMA_EA_SEAD: for (n = nb_samples >> (1 - st); n > 0; n--) { int byte = bytestream2_get_byteu(&gb); *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6); *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6); } break; case AV_CODEC_ID_ADPCM_EA: { int previous_left_sample, previous_right_sample; int current_left_sample, current_right_sample; int next_left_sample, next_right_sample; int coeff1l, coeff2l, coeff1r, coeff2r; int shift_left, shift_right; /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces, each coding 28 stereo samples. */ if(avctx->channels != 2) return AVERROR_INVALIDDATA; current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16); previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16); current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16); previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16); for (count1 = 0; count1 < nb_samples / 28; count1++) { int byte = bytestream2_get_byteu(&gb); coeff1l = ea_adpcm_table[ byte >> 4 ]; coeff2l = ea_adpcm_table[(byte >> 4 ) + 4]; coeff1r = ea_adpcm_table[ byte & 0x0F]; coeff2r = ea_adpcm_table[(byte & 0x0F) + 4]; byte = bytestream2_get_byteu(&gb); shift_left = 20 - (byte >> 4); shift_right = 20 - (byte & 0x0F); for (count2 = 0; count2 < 28; count2++) { byte = bytestream2_get_byteu(&gb); next_left_sample = sign_extend(byte >> 4, 4) << shift_left; next_right_sample = sign_extend(byte, 4) << shift_right; next_left_sample = (next_left_sample + (current_left_sample * coeff1l) + (previous_left_sample * coeff2l) + 0x80) >> 8; next_right_sample = (next_right_sample + (current_right_sample * coeff1r) + (previous_right_sample * coeff2r) + 0x80) >> 8; previous_left_sample = current_left_sample; current_left_sample = av_clip_int16(next_left_sample); previous_right_sample = current_right_sample; current_right_sample = av_clip_int16(next_right_sample); *samples++ = current_left_sample; *samples++ = current_right_sample; } } bytestream2_skip(&gb, 2); // Skip terminating 0x0000 break; } case AV_CODEC_ID_ADPCM_EA_MAXIS_XA: { int coeff[2][2], shift[2]; for(channel = 0; channel < avctx->channels; channel++) { int byte = bytestream2_get_byteu(&gb); for (i=0; i<2; i++) coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i]; shift[channel] = 20 - (byte & 0x0F); } for (count1 = 0; count1 < nb_samples / 2; count1++) { int byte[2]; byte[0] = bytestream2_get_byteu(&gb); if (st) byte[1] = bytestream2_get_byteu(&gb); for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */ for(channel = 0; channel < avctx->channels; channel++) { int sample = sign_extend(byte[channel] >> i, 4) << shift[channel]; sample = (sample + c->status[channel].sample1 * coeff[channel][0] + c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8; c->status[channel].sample2 = c->status[channel].sample1; c->status[channel].sample1 = av_clip_int16(sample); *samples++ = c->status[channel].sample1; } } } bytestream2_seek(&gb, 0, SEEK_END); break; } case AV_CODEC_ID_ADPCM_EA_R1: case AV_CODEC_ID_ADPCM_EA_R2: case AV_CODEC_ID_ADPCM_EA_R3: { /* channel numbering 2chan: 0=fl, 1=fr 4chan: 0=fl, 1=rl, 2=fr, 3=rr 6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */ const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3; int previous_sample, current_sample, next_sample; int coeff1, coeff2; int shift; unsigned int channel; uint16_t *samplesC; int count = 0; int offsets[6]; for (channel=0; channel<avctx->channels; channel++) offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) : bytestream2_get_le32(&gb)) + (avctx->channels + 1) * 4; for (channel=0; channel<avctx->channels; channel++) { bytestream2_seek(&gb, offsets[channel], SEEK_SET); samplesC = samples + channel; if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) { current_sample = sign_extend(bytestream2_get_le16(&gb), 16); previous_sample = sign_extend(bytestream2_get_le16(&gb), 16); } else { current_sample = c->status[channel].predictor; previous_sample = c->status[channel].prev_sample; } for (count1 = 0; count1 < nb_samples / 28; count1++) { int byte = bytestream2_get_byte(&gb); if (byte == 0xEE) { /* only seen in R2 and R3 */ current_sample = sign_extend(bytestream2_get_be16(&gb), 16); previous_sample = sign_extend(bytestream2_get_be16(&gb), 16); for (count2=0; count2<28; count2++) { *samplesC = sign_extend(bytestream2_get_be16(&gb), 16); samplesC += avctx->channels; } } else { coeff1 = ea_adpcm_table[ byte >> 4 ]; coeff2 = ea_adpcm_table[(byte >> 4) + 4]; shift = 20 - (byte & 0x0F); for (count2=0; count2<28; count2++) { if (count2 & 1) next_sample = sign_extend(byte, 4) << shift; else { byte = bytestream2_get_byte(&gb); next_sample = sign_extend(byte >> 4, 4) << shift; } next_sample += (current_sample * coeff1) + (previous_sample * coeff2); next_sample = av_clip_int16(next_sample >> 8); previous_sample = current_sample; current_sample = next_sample; *samplesC = current_sample; samplesC += avctx->channels; } } } if (!count) { count = count1; } else if (count != count1) { av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n"); count = FFMAX(count, count1); } if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) { c->status[channel].predictor = current_sample; c->status[channel].prev_sample = previous_sample; } } c->frame.nb_samples = count * 28; bytestream2_seek(&gb, 0, SEEK_END); break; } case AV_CODEC_ID_ADPCM_EA_XAS: for (channel=0; channel<avctx->channels; channel++) { int coeff[2][4], shift[4]; short *s2, *s = &samples[channel]; for (n=0; n<4; n++, s+=32*avctx->channels) { int val = sign_extend(bytestream2_get_le16u(&gb), 16); for (i=0; i<2; i++) coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i]; s[0] = val & ~0x0F; val = sign_extend(bytestream2_get_le16u(&gb), 16); shift[n] = 20 - (val & 0x0F); s[avctx->channels] = val & ~0x0F; } for (m=2; m<32; m+=2) { s = &samples[m*avctx->channels + channel]; for (n=0; n<4; n++, s+=32*avctx->channels) { int byte = bytestream2_get_byteu(&gb); for (s2=s, i=0; i<8; i+=4, s2+=avctx->channels) { int level = sign_extend(byte >> (4 - i), 4) << shift[n]; int pred = s2[-1*avctx->channels] * coeff[0][n] + s2[-2*avctx->channels] * coeff[1][n]; s2[0] = av_clip_int16((level + pred + 0x80) >> 8); } } } } break; case AV_CODEC_ID_ADPCM_IMA_AMV: case AV_CODEC_ID_ADPCM_IMA_SMJPEG: if (avctx->codec->id == AV_CODEC_ID_ADPCM_IMA_AMV) { c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16); c->status[0].step_index = bytestream2_get_le16u(&gb); bytestream2_skipu(&gb, 4); } else { c->status[0].predictor = sign_extend(bytestream2_get_be16u(&gb), 16); c->status[0].step_index = bytestream2_get_byteu(&gb); bytestream2_skipu(&gb, 1); } if (c->status[0].step_index > 88u) { av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", c->status[0].step_index); return AVERROR_INVALIDDATA; } for (n = nb_samples >> (1 - st); n > 0; n--) { int hi, lo, v = bytestream2_get_byteu(&gb); if (avctx->codec->id == AV_CODEC_ID_ADPCM_IMA_AMV) { hi = v & 0x0F; lo = v >> 4; } else { lo = v & 0x0F; hi = v >> 4; } *samples++ = adpcm_ima_expand_nibble(&c->status[0], lo, 3); *samples++ = adpcm_ima_expand_nibble(&c->status[0], hi, 3); } break; case AV_CODEC_ID_ADPCM_CT: for (n = nb_samples >> (1 - st); n > 0; n--) { int v = bytestream2_get_byteu(&gb); *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 ); *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F); } break; case AV_CODEC_ID_ADPCM_SBPRO_4: case AV_CODEC_ID_ADPCM_SBPRO_3: case AV_CODEC_ID_ADPCM_SBPRO_2: if (!c->status[0].step_index) { /* the first byte is a raw sample */ *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80); if (st) *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80); c->status[0].step_index = 1; nb_samples--; } if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) { for (n = nb_samples >> (1 - st); n > 0; n--) { int byte = bytestream2_get_byteu(&gb); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], byte >> 4, 4, 0); *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], byte & 0x0F, 4, 0); } } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) { for (n = nb_samples / 3; n > 0; n--) { int byte = bytestream2_get_byteu(&gb); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], byte >> 5 , 3, 0); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], (byte >> 2) & 0x07, 3, 0); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], byte & 0x03, 2, 0); } } else { for (n = nb_samples >> (2 - st); n > 0; n--) { int byte = bytestream2_get_byteu(&gb); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], byte >> 6 , 2, 2); *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], (byte >> 4) & 0x03, 2, 2); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], (byte >> 2) & 0x03, 2, 2); *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], byte & 0x03, 2, 2); } } break; case AV_CODEC_ID_ADPCM_SWF: adpcm_swf_decode(avctx, buf, buf_size, samples); bytestream2_seek(&gb, 0, SEEK_END); break; case AV_CODEC_ID_ADPCM_YAMAHA: for (n = nb_samples >> (1 - st); n > 0; n--) { int v = bytestream2_get_byteu(&gb); *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F); *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 ); } break; case AV_CODEC_ID_ADPCM_THP: { int table[2][16]; int prev[2][2]; int ch; for (i = 0; i < 2; i++) for (n = 0; n < 16; n++) table[i][n] = sign_extend(bytestream2_get_be16u(&gb), 16); /* Initialize the previous sample. */ for (i = 0; i < 2; i++) for (n = 0; n < 2; n++) prev[i][n] = sign_extend(bytestream2_get_be16u(&gb), 16); for (ch = 0; ch <= st; ch++) { samples = (short *)c->frame.data[0] + ch; /* Read in every sample for this channel. */ for (i = 0; i < nb_samples / 14; i++) { int byte = bytestream2_get_byteu(&gb); int index = (byte >> 4) & 7; unsigned int exp = byte & 0x0F; int factor1 = table[ch][index * 2]; int factor2 = table[ch][index * 2 + 1]; /* Decode 14 samples. */ for (n = 0; n < 14; n++) { int32_t sampledat; if (n & 1) { sampledat = sign_extend(byte, 4); } else { byte = bytestream2_get_byteu(&gb); sampledat = sign_extend(byte >> 4, 4); } sampledat = ((prev[ch][0]*factor1 + prev[ch][1]*factor2) >> 11) + (sampledat << exp); *samples = av_clip_int16(sampledat); prev[ch][1] = prev[ch][0]; prev[ch][0] = *samples++; /* In case of stereo, skip one sample, this sample is for the other channel. */ samples += st; } } } break; } default: return -1; } *got_frame_ptr = 1; *(AVFrame *)data = c->frame; return bytestream2_tell(&gb); }
167
37,728
0
BOOL CVE_2013_1672_PATCHED_ProcessSoftwareUpdateCommand(DWORD argc, LPWSTR *argv) { BOOL result = TRUE; if (argc < 3) { LOG_WARN(("Not enough command line parameters specified. " "Updating update.status.")); // We can only update update.status if argv[1] exists. argv[1] is // the directory where the update.status file exists. if (argc < 2 || !WriteStatusFailure(argv[1], SERVICE_NOT_ENOUGH_COMMAND_LINE_ARGS)) { LOG_WARN(("Could not write update.status service update failure. (%d)", GetLastError())); } return FALSE; } WCHAR installDir[MAX_PATH + 1] = {L'\0'}; if (!GetInstallationDir(argc, argv, installDir)) { LOG_WARN(("Could not get the installation directory")); if (!WriteStatusFailure(argv[1], SERVICE_INSTALLDIR_ERROR)) { LOG_WARN(("Could not write update.status for GetInstallationDir failure.")); } return FALSE; } // Make sure the path to the updater to use for the update is local. // We do this check to make sure that file locking is available for // race condition security checks. BOOL isLocal = FALSE; if (!IsLocalFile(argv[0], isLocal) || !isLocal) { LOG_WARN(("Filesystem in path %ls is not supported (%d)", argv[0], GetLastError())); if (!WriteStatusFailure(argv[1], SERVICE_UPDATER_NOT_FIXED_DRIVE)) { LOG_WARN(("Could not write update.status service update failure. (%d)", GetLastError())); } return FALSE; } nsAutoHandle noWriteLock(CreateFileW(argv[0], GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, 0, NULL)); if (INVALID_HANDLE_VALUE == noWriteLock) { LOG_WARN(("Could not set no write sharing access on file. (%d)", GetLastError())); if (!WriteStatusFailure(argv[1], SERVICE_COULD_NOT_LOCK_UPDATER)) { LOG_WARN(("Could not write update.status service update failure. (%d)", GetLastError())); } return FALSE; } // Verify that the updater.exe that we are executing is the same // as the one in the installation directory which we are updating. // The installation dir that we are installing to is installDir. WCHAR installDirUpdater[MAX_PATH + 1] = {L'\0'}; wcsncpy(installDirUpdater, installDir, MAX_PATH); if (!PathAppendSafe(installDirUpdater, L"updater.exe")) { LOG_WARN(("Install directory updater could not be determined.")); result = FALSE; } BOOL updaterIsCorrect; if (result && !VerifySameFiles(argv[0], installDirUpdater, updaterIsCorrect)) { LOG_WARN(("Error checking if the updaters are the same.\n" "Path 1: %ls\nPath 2: %ls", argv[0], installDirUpdater)); result = FALSE; } if (result && !updaterIsCorrect) { LOG_WARN(("The updaters do not match, updater will not run.")); result = FALSE; } if (result) { LOG(("updater.exe was compared successfully to the installation directory" " updater.exe.")); } else { if (!WriteStatusFailure(argv[1], SERVICE_UPDATER_COMPARE_ERROR)) { LOG_WARN(("Could not write update.status updater compare failure.")); } return FALSE; } // Check to make sure the updater.exe module has the unique updater identity. // This is a security measure to make sure that the signed executable that // we will run is actually an updater. HMODULE updaterModule = LoadLibraryEx(argv[0], NULL, LOAD_LIBRARY_AS_DATAFILE); if (!updaterModule) { LOG_WARN(("updater.exe module could not be loaded. (%d)", GetLastError())); result = FALSE; } else { char updaterIdentity[64]; if (!LoadStringA(updaterModule, IDS_UPDATER_IDENTITY, updaterIdentity, sizeof(updaterIdentity))) { LOG_WARN(("The updater.exe application does not contain the Mozilla" " updater identity.")); result = FALSE; } if (strcmp(updaterIdentity, UPDATER_IDENTITY_STRING)) { LOG_WARN(("The updater.exe identity string is not valid.")); result = FALSE; } FreeLibrary(updaterModule); } if (result) { LOG(("The updater.exe application contains the Mozilla" " updater identity.")); } else { if (!WriteStatusFailure(argv[1], SERVICE_UPDATER_IDENTITY_ERROR)) { LOG_WARN(("Could not write update.status no updater identity.")); } return TRUE; } // Check for updater.exe sign problems BOOL updaterSignProblem = FALSE; #ifndef DISABLE_UPDATER_AUTHENTICODE_CHECK updaterSignProblem = !DoesBinaryMatchAllowedCertificates(installDir, argv[0]); #endif // Only proceed with the update if we have no signing problems if (!updaterSignProblem) { BOOL updateProcessWasStarted = FALSE; if (StartUpdateProcess(argc, argv, installDir, updateProcessWasStarted)) { LOG(("updater.exe was launched and run successfully!")); LogFlush(); // Don't attempt to update the service when the update is being staged. if (!IsUpdateBeingStaged(argc, argv)) { // We might not execute code after StartServiceUpdate because // the service installer will stop the service if it is running. StartServiceUpdate(installDir); } } else { result = FALSE; LOG_WARN(("Error running update process. Updating update.status (%d)", GetLastError())); LogFlush(); // If the update process was started, then updater.exe is responsible for // setting the failure code. If it could not be started then we do the // work. We set an error instead of directly setting status pending // so that the app.update.service.errors pref can be updated when // the callback app restarts. if (!updateProcessWasStarted) { if (!WriteStatusFailure(argv[1], SERVICE_UPDATER_COULD_NOT_BE_STARTED)) { LOG_WARN(("Could not write update.status service update failure. (%d)", GetLastError())); } } } } else { result = FALSE; LOG_WARN(("Could not start process due to certificate check error on " "updater.exe. Updating update.status. (%d)", GetLastError())); // When there is a certificate check error on the updater.exe application, // we want to write out the error. if (!WriteStatusFailure(argv[1], SERVICE_UPDATER_SIGN_ERROR)) { LOG_WARN(("Could not write pending state to update.status. (%d)", GetLastError())); } } return result; }
168
129,654
0
nsresult CVE_2013_1676_PATCHED_nsBidiPresUtils::ResolveParagraph(nsBlockFrame* aBlockFrame, BidiParagraphData* aBpd) { nsPresContext *presContext = aBlockFrame->PresContext(); if (aBpd->BufferLength() < 1) { return NS_OK; } aBpd->mBuffer.ReplaceChar("\t\r\n", kSpace); int32_t runCount; nsresult rv = aBpd->SetPara(); NS_ENSURE_SUCCESS(rv, rv); uint8_t embeddingLevel = aBpd->GetParaLevel(); rv = aBpd->CountRuns(&runCount); NS_ENSURE_SUCCESS(rv, rv); int32_t runLength = 0; // the length of the current run of text int32_t lineOffset = 0; // the start of the current run int32_t logicalLimit = 0; // the end of the current run + 1 int32_t numRun = -1; int32_t fragmentLength = 0; // the length of the current text frame int32_t frameIndex = -1; // index to the frames in mLogicalFrames int32_t frameCount = aBpd->FrameCount(); int32_t contentOffset = 0; // offset of current frame in its content node bool isTextFrame = false; nsIFrame* frame = nullptr; nsIContent* content = nullptr; int32_t contentTextLength = 0; FramePropertyTable *propTable = presContext->PropertyTable(); nsLineBox* currentLine = nullptr; #ifdef DEBUG #ifdef NOISY_BIDI printf("Before Resolve(), aBlockFrame=0x%p, mBuffer='%s', frameCount=%d, runCount=%d\n", (void*)aBlockFrame, NS_ConvertUTF16toUTF8(aBpd->mBuffer).get(), frameCount, runCount); #ifdef REALLY_NOISY_BIDI printf(" block frame tree=:\n"); aBlockFrame->List(stdout, 0); #endif #endif #endif nsIFrame* firstFrame = nullptr; nsIFrame* lastFrame = nullptr; for (; ;) { if (fragmentLength <= 0) { // Get the next frame from mLogicalFrames if (++frameIndex >= frameCount) { break; } frame = aBpd->FrameAt(frameIndex); if (frame == NS_BIDI_CONTROL_FRAME || nsGkAtoms::textFrame != frame->GetType()) { /* * Any non-text frame corresponds to a single character in the text buffer * (a bidi control character, LINE SEPARATOR, or OBJECT SUBSTITUTE) */ isTextFrame = false; fragmentLength = 1; } else { if (!firstFrame) { firstFrame = frame; } lastFrame = frame; currentLine = aBpd->GetLineForFrameAt(frameIndex); content = frame->GetContent(); if (!content) { rv = NS_OK; break; } contentTextLength = content->TextLength(); if (contentTextLength == 0) { frame->AdjustOffsetsForBidi(0, 0); // Set the base level and embedding level of the current run even // on an empty frame. Otherwise frame reordering will not be correct. propTable->Set(frame, nsIFrame::EmbeddingLevelProperty(), NS_INT32_TO_PTR(embeddingLevel)); propTable->Set(frame, nsIFrame::BaseLevelProperty(), NS_INT32_TO_PTR(aBpd->GetParaLevel())); propTable->Set(frame, nsIFrame::ParagraphDepthProperty(), NS_INT32_TO_PTR(aBpd->mParagraphDepth)); continue; } int32_t start, end; frame->GetOffsets(start, end); NS_ASSERTION(!(contentTextLength < end - start), "Frame offsets don't fit in content"); fragmentLength = NS_MIN(contentTextLength, end - start); contentOffset = start; isTextFrame = true; } } // if (fragmentLength <= 0) if (runLength <= 0) { // Get the next run of text from the Bidi engine if (++numRun >= runCount) { break; } lineOffset = logicalLimit; if (NS_FAILED(aBpd->GetLogicalRun( lineOffset, &logicalLimit, &embeddingLevel) ) ) { break; } runLength = logicalLimit - lineOffset; } // if (runLength <= 0) if (frame == NS_BIDI_CONTROL_FRAME) { frame = nullptr; ++lineOffset; } else { propTable->Set(frame, nsIFrame::EmbeddingLevelProperty(), NS_INT32_TO_PTR(embeddingLevel)); propTable->Set(frame, nsIFrame::BaseLevelProperty(), NS_INT32_TO_PTR(aBpd->GetParaLevel())); propTable->Set(frame, nsIFrame::ParagraphDepthProperty(), NS_INT32_TO_PTR(aBpd->mParagraphDepth)); if (isTextFrame) { if ( (runLength > 0) && (runLength < fragmentLength) ) { /* * The text in this frame continues beyond the end of this directional run. * Create a non-fluid continuation frame for the next directional run. */ currentLine->MarkDirty(); nsIFrame* nextBidi; int32_t runEnd = contentOffset + runLength; rv = EnsureBidiContinuation(frame, &nextBidi, frameIndex, contentOffset, runEnd); if (NS_FAILED(rv)) { break; } nextBidi->AdjustOffsetsForBidi(runEnd, contentOffset + fragmentLength); lastFrame = frame = nextBidi; contentOffset = runEnd; } // if (runLength < fragmentLength) else { if (contentOffset + fragmentLength == contentTextLength) { /* * We have finished all the text in this content node. Convert any * further non-fluid continuations to fluid continuations and advance * frameIndex to the last frame in the content node */ int32_t newIndex = aBpd->GetLastFrameForContent(content); if (newIndex > frameIndex) { RemoveBidiContinuation(aBpd, frame, frameIndex, newIndex, lineOffset); frameIndex = newIndex; lastFrame = frame = aBpd->FrameAt(frameIndex); } } else if (fragmentLength > 0 && runLength > fragmentLength) { /* * There is more text that belongs to this directional run in the next * text frame: make sure it is a fluid continuation of the current frame. * Do not advance frameIndex, because the next frame may contain * multi-directional text and need to be split */ int32_t newIndex = frameIndex; do { } while (++newIndex < frameCount && aBpd->FrameAt(newIndex) == NS_BIDI_CONTROL_FRAME); if (newIndex < frameCount) { RemoveBidiContinuation(aBpd, frame, frameIndex, newIndex, lineOffset); } } else if (runLength == fragmentLength && frame->GetNextSibling()) { /* * If the directional run ends at the end of the frame, and this is * not the containing frame's last child, make sure that the next * frame is a non-fluid continuation */ nsIFrame* next = frame->GetNextInFlow(); if (next) { frame->SetNextContinuation(next); next->SetPrevContinuation(frame); } } frame->AdjustOffsetsForBidi(contentOffset, contentOffset + fragmentLength); currentLine->MarkDirty(); } } // isTextFrame else { ++lineOffset; } } // not bidi control frame int32_t temp = runLength; runLength -= fragmentLength; fragmentLength -= temp; if (frame && fragmentLength <= 0) { // If the frame is at the end of a run, and this is not the end of our // paragrah, split all ancestor inlines that need splitting. // To determine whether we're at the end of the run, we check that we've // finished processing the current run, and that the current frame // doesn't have a fluid continuation (it could have a fluid continuation // of zero length, so testing runLength alone is not sufficient). if (runLength <= 0 && !frame->GetNextInFlow()) { if (numRun + 1 < runCount) { nsIFrame* child = frame; nsIFrame* parent = frame->GetParent(); // As long as we're on the last sibling, the parent doesn't have to // be split. // However, if the parent has a fluid continuation, we do have to make // it non-fluid. This can happen e.g. when we have a first-letter // frame and the end of the first-letter coincides with the end of a // directional run. while (parent && IsBidiSplittable(parent) && !child->GetNextSibling()) { nsIFrame* next = parent->GetNextInFlow(); if (next) { parent->SetNextContinuation(next); next->SetPrevContinuation(parent); } child = parent; parent = child->GetParent(); } if (parent && IsBidiSplittable(parent)) { SplitInlineAncestors(parent, child); } } } else { // We're not at an end of a run. If |frame| is the last child of its // parent, and its ancestors happen to have bidi continuations, convert // them into fluid continuations. JoinInlineAncestors(frame); } } } // for if (aBpd->mParagraphDepth > 1) { nsIFrame* child; nsIFrame* parent; if (firstFrame) { child = firstFrame->GetParent(); if (child) { parent = child->GetParent(); if (parent && IsBidiSplittable(parent)) { nsIFrame* prev = child->GetPrevSibling(); if (prev) { SplitInlineAncestors(parent, prev); } } } } if (lastFrame) { child = lastFrame->GetParent(); if (child) { parent = child->GetParent(); if (parent && IsBidiSplittable(parent)) { SplitInlineAncestors(parent, child); } } } } #ifdef DEBUG #ifdef REALLY_NOISY_BIDI printf("---\nAfter Resolve(), frameTree =:\n"); aBlockFrame->List(stdout, 0); printf("===\n"); #endif #endif return rv; }
169
26,781
0
int CVE_2013_1792_PATCHED_install_user_keyrings(void) { struct user_struct *user; const struct cred *cred; struct key *uid_keyring, *session_keyring; char buf[20]; int ret; uid_t uid; cred = current_cred(); user = cred->user; uid = from_kuid(cred->user_ns, user->uid); kenter("%p{%u}", user, uid); if (user->uid_keyring && user->session_keyring) { kleave(" = 0 [exist]"); return 0; } mutex_lock(&key_user_keyring_mutex); ret = 0; if (!user->uid_keyring) { /* get the UID-specific keyring * - there may be one in existence already as it may have been * pinned by a session, but the user_struct pointing to it * may have been destroyed by setuid */ sprintf(buf, "_uid.%u", uid); uid_keyring = find_keyring_by_name(buf, true); if (IS_ERR(uid_keyring)) { uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID, cred, KEY_ALLOC_IN_QUOTA, NULL); if (IS_ERR(uid_keyring)) { ret = PTR_ERR(uid_keyring); goto error; } } /* get a default session keyring (which might also exist * already) */ sprintf(buf, "_uid_ses.%u", uid); session_keyring = find_keyring_by_name(buf, true); if (IS_ERR(session_keyring)) { session_keyring = keyring_alloc(buf, user->uid, INVALID_GID, cred, KEY_ALLOC_IN_QUOTA, NULL); if (IS_ERR(session_keyring)) { ret = PTR_ERR(session_keyring); goto error_release; } /* we install a link from the user session keyring to * the user keyring */ ret = key_link(session_keyring, uid_keyring); if (ret < 0) goto error_release_both; } /* install the keyrings */ user->uid_keyring = uid_keyring; user->session_keyring = session_keyring; } mutex_unlock(&key_user_keyring_mutex); kleave(" = 0"); return 0; error_release_both: key_put(session_keyring); error_release: key_put(uid_keyring); error: mutex_unlock(&key_user_keyring_mutex); kleave(" = %d", ret); return ret; }
170
135,548
0
static int CVE_2013_2094_PATCHED_perf_swevent_init(struct perf_event *event) { u64 event_id = event->attr.config; if (event->attr.type != PERF_TYPE_SOFTWARE) return -ENOENT; /* * no branch sampling for software events */ if (has_branch_stack(event)) return -EOPNOTSUPP; switch (event_id) { case PERF_COUNT_SW_CPU_CLOCK: case PERF_COUNT_SW_TASK_CLOCK: return -ENOENT; default: break; } if (event_id >= PERF_COUNT_SW_MAX) return -ENOENT; if (!event->parent) { int err; err = swevent_hlist_get(event); if (err) return err; static_key_slow_inc(&perf_swevent_enabled[event_id]); event->destroy = sw_perf_event_destroy; } return 0; }
171
22,411
0
static int CVE_2013_2548_PATCHED_crypto_report_one(struct crypto_alg *alg, struct crypto_user_alg *ualg, struct sk_buff *skb) { strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); strncpy(ualg->cru_driver_name, alg->cra_driver_name, sizeof(ualg->cru_driver_name)); strncpy(ualg->cru_module_name, module_name(alg->cra_module), sizeof(ualg->cru_module_name)); ualg->cru_type = 0; ualg->cru_mask = 0; ualg->cru_flags = alg->cra_flags; ualg->cru_refcnt = atomic_read(&alg->cra_refcnt); if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) goto nla_put_failure; if (alg->cra_flags & CRYPTO_ALG_LARVAL) { struct crypto_report_larval rl; strncpy(rl.type, "larval", sizeof(rl.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(struct crypto_report_larval), &rl)) goto nla_put_failure; goto out; } if (alg->cra_type && alg->cra_type->report) { if (alg->cra_type->report(skb, alg)) goto nla_put_failure; goto out; } switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) { case CRYPTO_ALG_TYPE_CIPHER: if (crypto_report_cipher(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_COMPRESS: if (crypto_report_comp(skb, alg)) goto nla_put_failure; break; } out: return 0; nla_put_failure: return -EMSGSIZE; }
172
180,229
0
static int CVE_2013_3674_PATCHED_cdg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; int ret; uint8_t command, inst; uint8_t cdg_data[CDG_DATA_SIZE]; AVFrame new_frame; CDGraphicsContext *cc = avctx->priv_data; if (buf_size < CDG_MINIMUM_PKT_SIZE) { av_log(avctx, AV_LOG_ERROR, "buffer too small for decoder\n"); return AVERROR(EINVAL); } if (buf_size > CDG_HEADER_SIZE + CDG_DATA_SIZE) { av_log(avctx, AV_LOG_ERROR, "buffer too big for decoder\n"); return AVERROR(EINVAL); } ret = avctx->reget_buffer(avctx, &cc->frame); if (ret) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return ret; } if (!avctx->frame_number) { memset(cc->frame.data[0], 0, cc->frame.linesize[0] * avctx->height); memset(cc->frame.data[1], 0, AVPALETTE_SIZE); } command = bytestream_get_byte(&buf); inst = bytestream_get_byte(&buf); inst &= CDG_MASK; buf += 2; /// skipping 2 unneeded bytes if (buf_size > CDG_HEADER_SIZE) bytestream_get_buffer(&buf, cdg_data, buf_size - CDG_HEADER_SIZE); if ((command & CDG_MASK) == CDG_COMMAND) { switch (inst) { case CDG_INST_MEMORY_PRESET: if (!(cdg_data[1] & 0x0F)) memset(cc->frame.data[0], cdg_data[0] & 0x0F, cc->frame.linesize[0] * CDG_FULL_HEIGHT); break; case CDG_INST_LOAD_PAL_LO: case CDG_INST_LOAD_PAL_HIGH: if (buf_size - CDG_HEADER_SIZE < CDG_DATA_SIZE) { av_log(avctx, AV_LOG_ERROR, "buffer too small for loading palette\n"); return AVERROR(EINVAL); } cdg_load_palette(cc, cdg_data, inst == CDG_INST_LOAD_PAL_LO); break; case CDG_INST_BORDER_PRESET: cdg_border_preset(cc, cdg_data); break; case CDG_INST_TILE_BLOCK_XOR: case CDG_INST_TILE_BLOCK: if (buf_size - CDG_HEADER_SIZE < CDG_DATA_SIZE) { av_log(avctx, AV_LOG_ERROR, "buffer too small for drawing tile\n"); return AVERROR(EINVAL); } ret = cdg_tile_block(cc, cdg_data, inst == CDG_INST_TILE_BLOCK_XOR); if (ret) { av_log(avctx, AV_LOG_ERROR, "tile is out of range\n"); return ret; } break; case CDG_INST_SCROLL_PRESET: case CDG_INST_SCROLL_COPY: if (buf_size - CDG_HEADER_SIZE < CDG_MINIMUM_SCROLL_SIZE) { av_log(avctx, AV_LOG_ERROR, "buffer too small for scrolling\n"); return AVERROR(EINVAL); } cdg_init_frame(&new_frame); ret = ff_get_buffer(avctx, &new_frame); if (ret) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } cdg_scroll(cc, cdg_data, &new_frame, inst == CDG_INST_SCROLL_COPY); avctx->release_buffer(avctx, &cc->frame); cc->frame = new_frame; break; default: break; } *got_frame = 1; } else { *got_frame = 0; buf_size = 0; } *(AVFrame *) data = cc->frame; return buf_size; }
173
140,502
0
static gboolean CVE_2013_4082_PATCHED_vwr_read(wtap *wth, int *err, gchar **err_info, gint64 *data_offset) { vwr_t *vwr = (vwr_t *)wth->priv; guint8 rec[B_SIZE]; /* local buffer (holds input record) */ int rec_size = 0, IS_TX; guint8 *data_ptr; guint16 pkt_len; /* length of radiotap headers */ /* read the next frame record header in the capture file; if no more frames, return */ if (!vwr_read_rec_header(vwr, wth->fh, &rec_size, &IS_TX, err, err_info)) return(FALSE); /* Read error or EOF */ *data_offset = (file_tell(wth->fh) - 16); /* set offset for random seek @PLCP */ /* got a frame record; read over entire record (frame + trailer) into a local buffer */ /* if we don't get it all, then declare an error, we can't process the frame */ if (file_read(rec, rec_size, wth->fh) != rec_size) { *err = file_error(wth->fh, err_info); if (*err == 0) *err = WTAP_ERR_SHORT_READ; return(FALSE); } if (rec_size < (int)vwr->STATS_LEN) { *err = file_error(wth->fh, err_info); if (*err == 0) *err_info = g_strdup_printf("vwr: Invalid record length %d (must be at least %u)", rec_size, vwr->STATS_LEN); *err = WTAP_ERR_BAD_FILE; return(FALSE); } /* before writing anything out, make sure the buffer has enough space for everything */ if ((vwr->FPGA_VERSION == vVW510021_W_FPGA) || (vwr->FPGA_VERSION == vVW510006_W_FPGA) ) /* frames are always 802.11 with an extended radiotap header */ pkt_len = (guint16)(rec_size + STATS_COMMON_FIELDS_LEN + EXT_RTAP_FIELDS_LEN); else /* frames are always ethernet with an extended ethernettap header */ pkt_len = (guint16)(rec_size + STATS_COMMON_FIELDS_LEN + STATS_ETHERNETTAP_FIELDS_LEN); buffer_assure_space(wth->frame_buffer, pkt_len); data_ptr = buffer_start_ptr(wth->frame_buffer); /* now format up the frame data */ switch (vwr->FPGA_VERSION) { case vVW510006_W_FPGA: vwr_read_rec_data(wth, data_ptr, rec, rec_size); break; case vVW510021_W_FPGA: vwr_read_rec_data_vVW510021(wth, data_ptr, rec, rec_size, IS_TX); break; case vVW510012_E_FPGA: vwr_read_rec_data_ethernet(wth, data_ptr, rec, rec_size, IS_TX); break; case vVW510024_E_FPGA: vwr_read_rec_data_ethernet(wth, data_ptr, rec, rec_size, IS_TX); break; } /* If the per-file encapsulation isn't known, set it to this packet's encapsulation */ /* If it *is* known, and it isn't this packet's encapsulation, set it to */ /* WTAP_ENCAP_PER_PACKET, as this file doesn't have a single encapsulation for all */ /* packets in the file */ if (wth->file_encap == WTAP_ENCAP_UNKNOWN) wth->file_encap = wth->phdr.pkt_encap; else { if (wth->file_encap != wth->phdr.pkt_encap) wth->file_encap = WTAP_ENCAP_PER_PACKET; } return(TRUE); }
174
128,681
0
int CVE_2013_4151_PATCHED_virtio_load(VirtIODevice *vdev, QEMUFile *f) { int i, ret; uint32_t num; uint32_t features; uint32_t supported_features; BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); if (k->load_config) { ret = k->load_config(qbus->parent, f); if (ret) return ret; } qemu_get_8s(f, &vdev->status); qemu_get_8s(f, &vdev->isr); qemu_get_be16s(f, &vdev->queue_sel); qemu_get_be32s(f, &features); if (virtio_set_features(vdev, features) < 0) { supported_features = k->get_features(qbus->parent); error_report("Features 0x%x unsupported. Allowed features: 0x%x", features, supported_features); return -1; } vdev->config_len = qemu_get_be32(f); qemu_get_buffer(f, vdev->config, vdev->config_len); num = qemu_get_be32(f); if (num > VIRTIO_PCI_QUEUE_MAX) { error_report("Invalid number of PCI queues: 0x%x", num); return -1; } for (i = 0; i < num; i++) { vdev->vq[i].vring.num = qemu_get_be32(f); if (k->has_variable_vring_alignment) { vdev->vq[i].vring.align = qemu_get_be32(f); } vdev->vq[i].pa = qemu_get_be64(f); qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); vdev->vq[i].signalled_used_valid = false; vdev->vq[i].notification = true; if (vdev->vq[i].pa) { uint16_t nheads; virtqueue_init(&vdev->vq[i]); nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx; /* Check it isn't doing very strange things with descriptor numbers. */ if (nheads > vdev->vq[i].vring.num) { error_report("VQ %d size 0x%x Guest index 0x%x " "inconsistent with Host index 0x%x: delta 0x%x", i, vdev->vq[i].vring.num, vring_avail_idx(&vdev->vq[i]), vdev->vq[i].last_avail_idx, nheads); return -1; } } else if (vdev->vq[i].last_avail_idx) { error_report("VQ %d address 0x0 " "inconsistent with Host index 0x%x", i, vdev->vq[i].last_avail_idx); return -1; } if (k->load_queue) { ret = k->load_queue(qbus->parent, i, f); if (ret) return ret; } } virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); return 0; }
175
75,374
0
static int CVE_2013_4162_PATCHED_udp_v6_push_pending_frames(struct sock *sk) { struct sk_buff *skb; struct udphdr *uh; struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); struct flowi6 *fl6; int err = 0; int is_udplite = IS_UDPLITE(sk); __wsum csum = 0; if (up->pending == AF_INET) return udp_push_pending_frames(sk); fl6 = &inet->cork.fl.u.ip6; /* Grab the skbuff where UDP header space exists. */ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) goto out; /* * Create a UDP header */ uh = udp_hdr(skb); uh->source = fl6->fl6_sport; uh->dest = fl6->fl6_dport; uh->len = htons(up->len); uh->check = 0; if (is_udplite) csum = udplite_csum_outgoing(sk, skb); else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, up->len); goto send; } else csum = udp_csum_outgoing(sk, skb); /* add protocol-dependent pseudo-header */ uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, up->len, fl6->flowi6_proto, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; send: err = ip6_push_pending_frames(sk); if (err) { if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); err = 0; } } else UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_OUTDATAGRAMS, is_udplite); out: up->len = 0; up->pending = 0; return err; }
176
73,421
0
static __inline__ int CVE_2013_4300_PATCHED_scm_check_creds(struct ucred *creds) { const struct cred *cred = current_cred(); kuid_t uid = make_kuid(cred->user_ns, creds->uid); kgid_t gid = make_kgid(cred->user_ns, creds->gid); if (!uid_valid(uid) || !gid_valid(gid)) return -EINVAL; if ((creds->pid == task_tgid_vnr(current) || ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) && ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || gid_eq(gid, cred->sgid)) || nsown_capable(CAP_SETGID))) { return 0; } return -EPERM; }
177
153,647
0
static int CVE_2013_4924_PATCHED_dissect_dcom_ActivationPropertiesBody(tvbuff_t *tvb, gint offset, packet_info *pinfo, proto_tree *tree, guint8 *drep) { gint old_offset; proto_item *sub_item; proto_tree *sub_tree; dcerpc_info *di; property_guids_t *pg; guint32 i; guint32 min_idx; di = (dcerpc_info *)pinfo->private_data; pg = (property_guids_t*)di->private_data; if (pg->id_idx == pg->size_idx) { min_idx = pg->id_idx; } else { /* TODO: expert info */ min_idx = MIN(pg->id_idx, pg->size_idx); } sub_item = proto_tree_add_text(tree, tvb, offset, 0, "Properties"); sub_tree = proto_item_add_subtree(sub_item, ett_properties); old_offset = offset; for (i = 0; i < min_idx; i++) { offset = dissect_dcom_ActivationProperty(tvb, offset, pinfo, sub_tree, drep, &pg->guid[i], pg->size[i]); } proto_item_set_len(sub_item, offset - old_offset); return offset; }
178
115,909
0
int CVE_2013_4933_PATCHED_netmon_open(wtap *wth, int *err, gchar **err_info) { int bytes_read; char magic[MAGIC_SIZE]; struct netmon_hdr hdr; int file_type; struct tm tm; guint32 frame_table_offset; guint32 frame_table_length; guint32 frame_table_size; guint32 *frame_table; #ifdef WORDS_BIGENDIAN unsigned int i; #endif netmon_t *netmon; /* Read in the string that should be at the start of a Network * Monitor file */ errno = WTAP_ERR_CANT_READ; bytes_read = file_read(magic, MAGIC_SIZE, wth->fh); if (bytes_read != MAGIC_SIZE) { *err = file_error(wth->fh, err_info); if (*err != 0 && *err != WTAP_ERR_SHORT_READ) return -1; return 0; } if (memcmp(magic, netmon_1_x_magic, MAGIC_SIZE) != 0 && memcmp(magic, netmon_2_x_magic, MAGIC_SIZE) != 0) { return 0; } /* Read the rest of the header. */ errno = WTAP_ERR_CANT_READ; bytes_read = file_read(&hdr, sizeof hdr, wth->fh); if (bytes_read != sizeof hdr) { *err = file_error(wth->fh, err_info); if (*err == 0) *err = WTAP_ERR_SHORT_READ; return -1; } switch (hdr.ver_major) { case 1: file_type = WTAP_FILE_NETMON_1_x; break; case 2: file_type = WTAP_FILE_NETMON_2_x; break; default: *err = WTAP_ERR_UNSUPPORTED; *err_info = g_strdup_printf("netmon: major version %u unsupported", hdr.ver_major); return -1; } hdr.network = pletohs(&hdr.network); if (hdr.network >= NUM_NETMON_ENCAPS || netmon_encap[hdr.network] == WTAP_ENCAP_UNKNOWN) { *err = WTAP_ERR_UNSUPPORTED_ENCAP; *err_info = g_strdup_printf("netmon: network type %u unknown or unsupported", hdr.network); return -1; } /* This is a netmon file */ wth->file_type = file_type; netmon = (netmon_t *)g_malloc(sizeof(netmon_t)); wth->priv = (void *)netmon; wth->subtype_read = netmon_read; wth->subtype_seek_read = netmon_seek_read; wth->subtype_sequential_close = netmon_sequential_close; /* NetMon capture file formats v2.1+ use per-packet encapsulation types. NetMon 3 sets the value in * the header to 1 (Ethernet) for backwards compability. */ if((hdr.ver_major == 2 && hdr.ver_minor >= 1) || hdr.ver_major > 2) wth->file_encap = WTAP_ENCAP_PER_PACKET; else wth->file_encap = netmon_encap[hdr.network]; wth->snapshot_length = 0; /* not available in header */ /* * Convert the time stamp to a "time_t" and a number of * milliseconds. */ tm.tm_year = pletohs(&hdr.ts_year) - 1900; tm.tm_mon = pletohs(&hdr.ts_month) - 1; tm.tm_mday = pletohs(&hdr.ts_day); tm.tm_hour = pletohs(&hdr.ts_hour); tm.tm_min = pletohs(&hdr.ts_min); tm.tm_sec = pletohs(&hdr.ts_sec); tm.tm_isdst = -1; netmon->start_secs = mktime(&tm); /* * XXX - what if "secs" is -1? Unlikely, but if the capture was * done in a time zone that switches between standard and summer * time sometime other than when we do, and thus the time was one * that doesn't exist here because a switch from standard to summer * time zips over it, it could happen. * * On the other hand, if the capture was done in a different time * zone, this won't work right anyway; unfortunately, the time * zone isn't stored in the capture file (why the hell didn't * they stuff a FILETIME, which is the number of 100-nanosecond * intervals since 1601-01-01 00:00:00 "UTC", there, instead * of stuffing a SYSTEMTIME, which is time-zone-dependent, there?). */ netmon->start_nsecs = pletohs(&hdr.ts_msec)*1000000; netmon->version_major = hdr.ver_major; netmon->version_minor = hdr.ver_minor; /* * Get the offset of the frame index table. */ frame_table_offset = pletohl(&hdr.frametableoffset); /* * It appears that some NetMon 2.x files don't have the * first packet starting exactly 128 bytes into the file. * * Furthermore, it also appears that there are "holes" in * the file, i.e. frame N+1 doesn't always follow immediately * after frame N. * * Therefore, we must read the frame table, and use the offsets * in it as the offsets of the frames. */ frame_table_length = pletohl(&hdr.frametablelength); frame_table_size = frame_table_length / (guint32)sizeof (guint32); if ((frame_table_size * sizeof (guint32)) != frame_table_length) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup_printf("netmon: frame table length is %u, which is not a multiple of the size of an entry", frame_table_length); return -1; } if (frame_table_size == 0) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup_printf("netmon: frame table length is %u, which means it's less than one entry in size", frame_table_length); return -1; } /* * XXX - clamp the size of the frame table, so that we don't * attempt to allocate a huge frame table and fail. * * Given that file offsets in the frame table are 32-bit, * a NetMon file cannot be bigger than 2^32 bytes. * Given that a NetMon 1.x-format packet header is 8 bytes, * that means a NetMon file cannot have more than * 512*2^20 packets. We'll pick that as the limit for * now; it's 1/8th of a 32-bit address space, which is * probably not going to exhaust the address space all by * itself, and probably won't exhaust the backing store. */ if (frame_table_size > 512*1024*1024) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup_printf("netmon: frame table length is %u, which is larger than we support", frame_table_length); return -1; } if (file_seek(wth->fh, frame_table_offset, SEEK_SET, err) == -1) { return -1; } frame_table = (guint32 *)g_try_malloc(frame_table_length); if (frame_table_length != 0 && frame_table == NULL) { *err = ENOMEM; /* we assume we're out of memory */ return -1; } errno = WTAP_ERR_CANT_READ; bytes_read = file_read(frame_table, frame_table_length, wth->fh); if ((guint32)bytes_read != frame_table_length) { *err = file_error(wth->fh, err_info); if (*err == 0) *err = WTAP_ERR_SHORT_READ; g_free(frame_table); return -1; } netmon->frame_table_size = frame_table_size; netmon->frame_table = frame_table; #ifdef WORDS_BIGENDIAN /* * OK, now byte-swap the frame table. */ for (i = 0; i < frame_table_size; i++) frame_table[i] = pletohl(&frame_table[i]); #endif /* Set up to start reading at the first frame. */ netmon->current_frame = 0; switch (netmon->version_major) { case 1: /* * Version 1.x of the file format supports * millisecond precision. */ wth->tsprecision = WTAP_FILE_TSPREC_MSEC; break; case 2: /* * Version 1.x of the file format supports * 100-nanosecond precision; we don't * currently support that, so say * "nanosecond precision" for now. */ wth->tsprecision = WTAP_FILE_TSPREC_NSEC; break; } return 1; }
179
118,430
0
static int CVE_2013_6380_PATCHED_aac_send_raw_srb(struct aac_dev* dev, void __user * arg) { struct fib* srbfib; int status; struct aac_srb *srbcmd = NULL; struct user_aac_srb *user_srbcmd = NULL; struct user_aac_srb __user *user_srb = arg; struct aac_srb_reply __user *user_reply; struct aac_srb_reply* reply; u32 fibsize = 0; u32 flags = 0; s32 rcode = 0; u32 data_dir; void __user *sg_user[32]; void *sg_list[32]; u32 sg_indx = 0; u32 byte_count = 0; u32 actual_fibsize64, actual_fibsize = 0; int i; if (dev->in_reset) { dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n")); return -EBUSY; } if (!capable(CAP_SYS_ADMIN)){ dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n")); return -EPERM; } /* * Allocate and initialize a Fib then setup a SRB command */ if (!(srbfib = aac_fib_alloc(dev))) { return -ENOMEM; } aac_fib_init(srbfib); /* raw_srb FIB is not FastResponseCapable */ srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable); srbcmd = (struct aac_srb*) fib_data(srbfib); memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){ dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n")); rcode = -EFAULT; goto cleanup; } if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) { rcode = -EINVAL; goto cleanup; } user_srbcmd = kmalloc(fibsize, GFP_KERNEL); if (!user_srbcmd) { dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n")); rcode = -ENOMEM; goto cleanup; } if(copy_from_user(user_srbcmd, user_srb,fibsize)){ dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n")); rcode = -EFAULT; goto cleanup; } user_reply = arg+fibsize; flags = user_srbcmd->flags; /* from user in cpu order */ // Fix up srb for endian and force some values srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this srbcmd->channel = cpu_to_le32(user_srbcmd->channel); srbcmd->id = cpu_to_le32(user_srbcmd->id); srbcmd->lun = cpu_to_le32(user_srbcmd->lun); srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout); srbcmd->flags = cpu_to_le32(flags); srbcmd->retry_limit = 0; // Obsolete parameter srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size); memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb)); switch (flags & (SRB_DataIn | SRB_DataOut)) { case SRB_DataOut: data_dir = DMA_TO_DEVICE; break; case (SRB_DataIn | SRB_DataOut): data_dir = DMA_BIDIRECTIONAL; break; case SRB_DataIn: data_dir = DMA_FROM_DEVICE; break; default: data_dir = DMA_NONE; } if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n", le32_to_cpu(srbcmd->sg.count))); rcode = -EINVAL; goto cleanup; } actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry)); actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) * (sizeof(struct sgentry64) - sizeof(struct sgentry)); /* User made a mistake - should not continue */ if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) { dprintk((KERN_DEBUG"aacraid: Bad Size specified in " "Raw SRB command calculated fibsize=%lu;%lu " "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu " "issued fibsize=%d\n", actual_fibsize, actual_fibsize64, user_srbcmd->sg.count, sizeof(struct aac_srb), sizeof(struct sgentry), sizeof(struct sgentry64), fibsize)); rcode = -EINVAL; goto cleanup; } if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) { dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n")); rcode = -EINVAL; goto cleanup; } byte_count = 0; if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) { struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg; struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg; /* * This should also catch if user used the 32 bit sgmap */ if (actual_fibsize64 == fibsize) { actual_fibsize = actual_fibsize64; for (i = 0; i < upsg->count; i++) { u64 addr; void* p; if (upsg->sg[i].count > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : 65536)) { rcode = -EINVAL; goto cleanup; } /* Does this really need to be GFP_DMA? */ p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA); if(!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", upsg->sg[i].count,i,upsg->count)); rcode = -ENOMEM; goto cleanup; } addr = (u64)upsg->sg[i].addr[0]; addr += ((u64)upsg->sg[i].addr[1]) << 32; sg_user[i] = (void __user *)(uintptr_t)addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; if (flags & SRB_DataOut) { if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir); psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); psg->sg[i].addr[1] = cpu_to_le32(addr>>32); byte_count += upsg->sg[i].count; psg->sg[i].count = cpu_to_le32(upsg->sg[i].count); } } else { struct user_sgmap* usg; usg = kmalloc(actual_fibsize - sizeof(struct aac_srb) + sizeof(struct sgmap), GFP_KERNEL); if (!usg) { dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n")); rcode = -ENOMEM; goto cleanup; } memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb) + sizeof(struct sgmap)); actual_fibsize = actual_fibsize64; for (i = 0; i < usg->count; i++) { u64 addr; void* p; if (usg->sg[i].count > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : 65536)) { kfree(usg); rcode = -EINVAL; goto cleanup; } /* Does this really need to be GFP_DMA? */ p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); if(!p) { dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", usg->sg[i].count,i,usg->count)); kfree(usg); rcode = -ENOMEM; goto cleanup; } sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; if (flags & SRB_DataOut) { if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ kfree (usg); dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir); psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); psg->sg[i].addr[1] = cpu_to_le32(addr>>32); byte_count += usg->sg[i].count; psg->sg[i].count = cpu_to_le32(usg->sg[i].count); } kfree (usg); } srbcmd->count = cpu_to_le32(byte_count); psg->count = cpu_to_le32(sg_indx+1); status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); } else { struct user_sgmap* upsg = &user_srbcmd->sg; struct sgmap* psg = &srbcmd->sg; if (actual_fibsize64 == fibsize) { struct user_sgmap64* usg = (struct user_sgmap64 *)upsg; for (i = 0; i < upsg->count; i++) { uintptr_t addr; void* p; if (usg->sg[i].count > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : 65536)) { rcode = -EINVAL; goto cleanup; } /* Does this really need to be GFP_DMA? */ p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); if(!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", usg->sg[i].count,i,usg->count)); rcode = -ENOMEM; goto cleanup; } addr = (u64)usg->sg[i].addr[0]; addr += ((u64)usg->sg[i].addr[1]) << 32; sg_user[i] = (void __user *)addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; if (flags & SRB_DataOut) { if(copy_from_user(p,sg_user[i],usg->sg[i].count)){ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir); psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff); byte_count += usg->sg[i].count; psg->sg[i].count = cpu_to_le32(usg->sg[i].count); } } else { for (i = 0; i < upsg->count; i++) { dma_addr_t addr; void* p; if (upsg->sg[i].count > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : 65536)) { rcode = -EINVAL; goto cleanup; } p = kmalloc(upsg->sg[i].count, GFP_KERNEL); if (!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", upsg->sg[i].count, i, upsg->count)); rcode = -ENOMEM; goto cleanup; } sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; if (flags & SRB_DataOut) { if(copy_from_user(p, sg_user[i], upsg->sg[i].count)) { dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir); psg->sg[i].addr = cpu_to_le32(addr); byte_count += upsg->sg[i].count; psg->sg[i].count = cpu_to_le32(upsg->sg[i].count); } } srbcmd->count = cpu_to_le32(byte_count); psg->count = cpu_to_le32(sg_indx+1); status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); } if (status == -ERESTARTSYS) { rcode = -ERESTARTSYS; goto cleanup; } if (status != 0){ dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n")); rcode = -ENXIO; goto cleanup; } if (flags & SRB_DataIn) { for(i = 0 ; i <= sg_indx; i++){ byte_count = le32_to_cpu( (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count : srbcmd->sg.sg[i].count); if(copy_to_user(sg_user[i], sg_list[i], byte_count)){ dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n")); rcode = -EFAULT; goto cleanup; } } } reply = (struct aac_srb_reply *) fib_data(srbfib); if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){ dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n")); rcode = -EFAULT; goto cleanup; } cleanup: kfree(user_srbcmd); for(i=0; i <= sg_indx; i++){ kfree(sg_list[i]); } if (rcode != -ERESTARTSYS) { aac_fib_complete(srbfib); aac_fib_free(srbfib); } return rcode; }
180
183,705
0
static int CVE_2013_6436_PATCHED_lxcDomainGetMemoryParameters(virDomainPtr dom, virTypedParameterPtr params, int *nparams, unsigned int flags) { virCapsPtr caps = NULL; virDomainDefPtr vmdef = NULL; virDomainObjPtr vm = NULL; virLXCDomainObjPrivatePtr priv = NULL; virLXCDriverPtr driver = dom->conn->privateData; unsigned long long val; int ret = -1; size_t i; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG, -1); if (!(vm = lxcDomObjFromDomain(dom))) goto cleanup; priv = vm->privateData; if (virDomainGetMemoryParametersEnsureACL(dom->conn, vm->def) < 0 || !(caps = virLXCDriverGetCapabilities(driver, false)) || virDomainLiveConfigHelperMethod(caps, driver->xmlopt, vm, &flags, &vmdef) < 0) goto cleanup; if (flags & VIR_DOMAIN_AFFECT_LIVE && !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_MEMORY)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("cgroup memory controller is not mounted")); goto cleanup; } if ((*nparams) == 0) { /* Current number of memory parameters supported by cgroups */ *nparams = LXC_NB_MEM_PARAM; ret = 0; goto cleanup; } for (i = 0; i < LXC_NB_MEM_PARAM && i < *nparams; i++) { virTypedParameterPtr param = &params[i]; val = 0; switch (i) { case 0: /* fill memory hard limit here */ if (flags & VIR_DOMAIN_AFFECT_CONFIG) { val = vmdef->mem.hard_limit; val = val ? val : VIR_DOMAIN_MEMORY_PARAM_UNLIMITED; } else if (virCgroupGetMemoryHardLimit(priv->cgroup, &val) < 0) { goto cleanup; } if (virTypedParameterAssign(param, VIR_DOMAIN_MEMORY_HARD_LIMIT, VIR_TYPED_PARAM_ULLONG, val) < 0) goto cleanup; break; case 1: /* fill memory soft limit here */ if (flags & VIR_DOMAIN_AFFECT_CONFIG) { val = vmdef->mem.soft_limit; val = val ? val : VIR_DOMAIN_MEMORY_PARAM_UNLIMITED; } else if (virCgroupGetMemorySoftLimit(priv->cgroup, &val) < 0) { goto cleanup; } if (virTypedParameterAssign(param, VIR_DOMAIN_MEMORY_SOFT_LIMIT, VIR_TYPED_PARAM_ULLONG, val) < 0) goto cleanup; break; case 2: /* fill swap hard limit here */ if (flags & VIR_DOMAIN_AFFECT_CONFIG) { val = vmdef->mem.swap_hard_limit; val = val ? val : VIR_DOMAIN_MEMORY_PARAM_UNLIMITED; } else if (virCgroupGetMemSwapHardLimit(priv->cgroup, &val) < 0) { goto cleanup; } if (virTypedParameterAssign(param, VIR_DOMAIN_MEMORY_SWAP_HARD_LIMIT, VIR_TYPED_PARAM_ULLONG, val) < 0) goto cleanup; break; /* coverity[dead_error_begin] */ default: break; /* should not hit here */ } } if (*nparams > LXC_NB_MEM_PARAM) *nparams = LXC_NB_MEM_PARAM; ret = 0; cleanup: if (vm) virObjectUnlock(vm); virObjectUnref(caps); return ret; }
181
49,163
0
static int CVE_2013_7008_PATCHED_decode_slice_header(H264Context *h, H264Context *h0) { unsigned int first_mb_in_slice; unsigned int pps_id; int num_ref_idx_active_override_flag, ret; unsigned int slice_type, tmp, i, j; int last_pic_structure, last_pic_droppable; int must_reinit; int needs_reinit = 0; h->me.qpel_put = h->h264qpel.put_h264_qpel_pixels_tab; h->me.qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab; first_mb_in_slice = get_ue_golomb_long(&h->gb); if (first_mb_in_slice == 0) { // FIXME better field boundary detection if (h0->current_slice && FIELD_PICTURE(h)) { field_end(h, 1); } h0->current_slice = 0; if (!h0->first_field) { if (h->cur_pic_ptr && !h->droppable) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, h->picture_structure == PICT_BOTTOM_FIELD); } h->cur_pic_ptr = NULL; } } slice_type = get_ue_golomb_31(&h->gb); if (slice_type > 9) { av_log(h->avctx, AV_LOG_ERROR, "slice type too large (%d) at %d %d\n", slice_type, h->mb_x, h->mb_y); return -1; } if (slice_type > 4) { slice_type -= 5; h->slice_type_fixed = 1; } else h->slice_type_fixed = 0; slice_type = golomb_to_pict_type[slice_type]; h->slice_type = slice_type; h->slice_type_nos = slice_type & 3; // to make a few old functions happy, it's wrong though h->pict_type = h->slice_type; pps_id = get_ue_golomb(&h->gb); if (pps_id >= MAX_PPS_COUNT) { av_log(h->avctx, AV_LOG_ERROR, "pps_id %d out of range\n", pps_id); return -1; } if (!h0->pps_buffers[pps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing PPS %u referenced\n", pps_id); return -1; } h->pps = *h0->pps_buffers[pps_id]; if (!h0->sps_buffers[h->pps.sps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing SPS %u referenced\n", h->pps.sps_id); return -1; } if (h->pps.sps_id != h->current_sps_id || h0->sps_buffers[h->pps.sps_id]->new) { h0->sps_buffers[h->pps.sps_id]->new = 0; h->current_sps_id = h->pps.sps_id; h->sps = *h0->sps_buffers[h->pps.sps_id]; if (h->mb_width != h->sps.mb_width || h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) || h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma || h->cur_chroma_format_idc != h->sps.chroma_format_idc ) needs_reinit = 1; if (h->bit_depth_luma != h->sps.bit_depth_luma || h->chroma_format_idc != h->sps.chroma_format_idc) { h->bit_depth_luma = h->sps.bit_depth_luma; h->chroma_format_idc = h->sps.chroma_format_idc; needs_reinit = 1; } if ((ret = h264_set_parameter_from_sps(h)) < 0) return ret; } h->avctx->profile = ff_h264_get_profile(&h->sps); h->avctx->level = h->sps.level_idc; h->avctx->refs = h->sps.ref_frame_count; must_reinit = (h->context_initialized && ( 16*h->sps.mb_width != h->avctx->coded_width || 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height || h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma || h->cur_chroma_format_idc != h->sps.chroma_format_idc || av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio) || h->mb_width != h->sps.mb_width || h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) )); if (h0->avctx->pix_fmt != get_pixel_format(h0, 0)) must_reinit = 1; h->mb_width = h->sps.mb_width; h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag); h->mb_num = h->mb_width * h->mb_height; h->mb_stride = h->mb_width + 1; h->b_stride = h->mb_width * 4; h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p h->width = 16 * h->mb_width; h->height = 16 * h->mb_height; ret = init_dimensions(h); if (ret < 0) return ret; if (h->sps.video_signal_type_present_flag) { h->avctx->color_range = h->sps.full_range>0 ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; if (h->sps.colour_description_present_flag) { if (h->avctx->colorspace != h->sps.colorspace) needs_reinit = 1; h->avctx->color_primaries = h->sps.color_primaries; h->avctx->color_trc = h->sps.color_trc; h->avctx->colorspace = h->sps.colorspace; } } if (h->context_initialized && (h->width != h->avctx->coded_width || h->height != h->avctx->coded_height || must_reinit || needs_reinit)) { if (h != h0) { av_log(h->avctx, AV_LOG_ERROR, "changing width/height on " "slice %d\n", h0->current_slice + 1); return AVERROR_INVALIDDATA; } flush_change(h); if ((ret = get_pixel_format(h, 1)) < 0) return ret; h->avctx->pix_fmt = ret; av_log(h->avctx, AV_LOG_INFO, "Reinit context to %dx%d, " "pix_fmt: %d\n", h->width, h->height, h->avctx->pix_fmt); if ((ret = h264_slice_header_init(h, 1)) < 0) { av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed\n"); return ret; } } if (!h->context_initialized) { if (h != h0) { av_log(h->avctx, AV_LOG_ERROR, "Cannot (re-)initialize context during parallel decoding.\n"); return -1; } if ((ret = get_pixel_format(h, 1)) < 0) return ret; h->avctx->pix_fmt = ret; if ((ret = h264_slice_header_init(h, 0)) < 0) { av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed\n"); return ret; } } if (h == h0 && h->dequant_coeff_pps != pps_id) { h->dequant_coeff_pps = pps_id; init_dequant_tables(h); } h->frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num); h->mb_mbaff = 0; h->mb_aff_frame = 0; last_pic_structure = h0->picture_structure; last_pic_droppable = h0->droppable; h->droppable = h->nal_ref_idc == 0; if (h->sps.frame_mbs_only_flag) { h->picture_structure = PICT_FRAME; } else { if (!h->sps.direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) { av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n"); return -1; } if (get_bits1(&h->gb)) { // field_pic_flag h->picture_structure = PICT_TOP_FIELD + get_bits1(&h->gb); // bottom_field_flag } else { h->picture_structure = PICT_FRAME; h->mb_aff_frame = h->sps.mb_aff; } } h->mb_field_decoding_flag = h->picture_structure != PICT_FRAME; if (h0->current_slice != 0) { if (last_pic_structure != h->picture_structure || last_pic_droppable != h->droppable) { av_log(h->avctx, AV_LOG_ERROR, "Changing field mode (%d -> %d) between slices is not allowed\n", last_pic_structure, h->picture_structure); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_INVALIDDATA; } else if (!h0->cur_pic_ptr) { av_log(h->avctx, AV_LOG_ERROR, "unset cur_pic_ptr on %d. slice\n", h0->current_slice + 1); return AVERROR_INVALIDDATA; } } else { /* Shorten frame num gaps so we don't have to allocate reference * frames just to throw them away */ if (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) { int unwrap_prev_frame_num = h->prev_frame_num; int max_frame_num = 1 << h->sps.log2_max_frame_num; if (unwrap_prev_frame_num > h->frame_num) unwrap_prev_frame_num -= max_frame_num; if ((h->frame_num - unwrap_prev_frame_num) > h->sps.ref_frame_count) { unwrap_prev_frame_num = (h->frame_num - h->sps.ref_frame_count) - 1; if (unwrap_prev_frame_num < 0) unwrap_prev_frame_num += max_frame_num; h->prev_frame_num = unwrap_prev_frame_num; } } /* See if we have a decoded first field looking for a pair... * Here, we're using that to see if we should mark previously * decode frames as "finished". * We have to do that before the "dummy" in-between frame allocation, * since that can modify h->cur_pic_ptr. */ if (h0->first_field) { assert(h0->cur_pic_ptr); assert(h0->cur_pic_ptr->f.data[0]); assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF); /* Mark old field/frame as completed */ if (h0->cur_pic_ptr->tf.owner == h0->avctx) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_BOTTOM_FIELD); } /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ if (last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { if (h0->cur_pic_ptr->frame_num != h->frame_num) { /* This and previous field were reference, but had * different frame_nums. Consider this field first in * pair. Throw away previous field except for reference * purposes. */ if (last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { /* Second field in complementary pair */ if (!((last_pic_structure == PICT_TOP_FIELD && h->picture_structure == PICT_BOTTOM_FIELD) || (last_pic_structure == PICT_BOTTOM_FIELD && h->picture_structure == PICT_TOP_FIELD))) { av_log(h->avctx, AV_LOG_ERROR, "Invalid field mode combination %d/%d\n", last_pic_structure, h->picture_structure); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_INVALIDDATA; } else if (last_pic_droppable != h->droppable) { avpriv_request_sample(h->avctx, "Found reference and non-reference fields in the same frame, which"); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_PATCHWELCOME; } } } } while (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 && !h0->first_field && h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) { Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL; av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n", h->frame_num, h->prev_frame_num); if (!h->sps.gaps_in_frame_num_allowed_flag) for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++) h->last_pocs[i] = INT_MIN; if (h264_frame_start(h) < 0) return -1; h->prev_frame_num++; h->prev_frame_num %= 1 << h->sps.log2_max_frame_num; h->cur_pic_ptr->frame_num = h->prev_frame_num; ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0); ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1); if ((ret = ff_generate_sliding_window_mmcos(h, 1)) < 0 && h->avctx->err_recognition & AV_EF_EXPLODE) return ret; if (ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index) < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return AVERROR_INVALIDDATA; /* Error concealment: if a ref is missing, copy the previous ref in its place. * FIXME: avoiding a memcpy would be nice, but ref handling makes many assumptions * about there being no actual duplicates. * FIXME: this doesn't copy padding for out-of-frame motion vectors. Given we're * concealing a lost frame, this probably isn't noticeable by comparison, but it should * be fixed. */ if (h->short_ref_count) { if (prev) { av_image_copy(h->short_ref[0]->f.data, h->short_ref[0]->f.linesize, (const uint8_t **)prev->f.data, prev->f.linesize, h->avctx->pix_fmt, h->mb_width * 16, h->mb_height * 16); h->short_ref[0]->poc = prev->poc + 2; } h->short_ref[0]->frame_num = h->prev_frame_num; } } /* See if we have a decoded first field looking for a pair... * We're using that to see whether to continue decoding in that * frame, or to allocate a new one. */ if (h0->first_field) { assert(h0->cur_pic_ptr); assert(h0->cur_pic_ptr->f.data[0]); assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF); /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ h0->cur_pic_ptr = NULL; h0->first_field = FIELD_PICTURE(h); } else { if (h0->cur_pic_ptr->frame_num != h->frame_num) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, h0->picture_structure==PICT_BOTTOM_FIELD); /* This and the previous field had different frame_nums. * Consider this field first in pair. Throw away previous * one except for reference purposes. */ h0->first_field = 1; h0->cur_pic_ptr = NULL; } else { /* Second field in complementary pair */ h0->first_field = 0; } } } else { /* Frame or first field in a potentially complementary pair */ h0->first_field = FIELD_PICTURE(h); } if (!FIELD_PICTURE(h) || h0->first_field) { if (h264_frame_start(h) < 0) { h0->first_field = 0; return -1; } } else { release_unused_pictures(h, 0); } /* Some macroblocks can be accessed before they're available in case * of lost slices, MBAFF or threading. */ if (FIELD_PICTURE(h)) { for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++) memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table)); } else { memset(h->slice_table, -1, (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table)); } h0->last_slice_type = -1; } if (h != h0 && (ret = clone_slice(h, h0)) < 0) return ret; /* can't be in alloc_tables because linesize isn't known there. * FIXME: redo bipred weight to not require extra buffer? */ for (i = 0; i < h->slice_context_count; i++) if (h->thread_context[i]) { ret = alloc_scratch_buffers(h->thread_context[i], h->linesize); if (ret < 0) return ret; } h->cur_pic_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup av_assert1(h->mb_num == h->mb_width * h->mb_height); if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num || first_mb_in_slice >= h->mb_num) { av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n"); return -1; } h->resync_mb_x = h->mb_x = first_mb_in_slice % h->mb_width; h->resync_mb_y = h->mb_y = (first_mb_in_slice / h->mb_width) << FIELD_OR_MBAFF_PICTURE(h); if (h->picture_structure == PICT_BOTTOM_FIELD) h->resync_mb_y = h->mb_y = h->mb_y + 1; av_assert1(h->mb_y < h->mb_height); if (h->picture_structure == PICT_FRAME) { h->curr_pic_num = h->frame_num; h->max_pic_num = 1 << h->sps.log2_max_frame_num; } else { h->curr_pic_num = 2 * h->frame_num + 1; h->max_pic_num = 1 << (h->sps.log2_max_frame_num + 1); } if (h->nal_unit_type == NAL_IDR_SLICE) get_ue_golomb(&h->gb); /* idr_pic_id */ if (h->sps.poc_type == 0) { h->poc_lsb = get_bits(&h->gb, h->sps.log2_max_poc_lsb); if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) h->delta_poc_bottom = get_se_golomb(&h->gb); } if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) { h->delta_poc[0] = get_se_golomb(&h->gb); if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) h->delta_poc[1] = get_se_golomb(&h->gb); } ff_init_poc(h, h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc); if (h->pps.redundant_pic_cnt_present) h->redundant_pic_count = get_ue_golomb(&h->gb); // set defaults, might be overridden a few lines later h->ref_count[0] = h->pps.ref_count[0]; h->ref_count[1] = h->pps.ref_count[1]; if (h->slice_type_nos != AV_PICTURE_TYPE_I) { unsigned max[2]; max[0] = max[1] = h->picture_structure == PICT_FRAME ? 15 : 31; if (h->slice_type_nos == AV_PICTURE_TYPE_B) h->direct_spatial_mv_pred = get_bits1(&h->gb); num_ref_idx_active_override_flag = get_bits1(&h->gb); if (num_ref_idx_active_override_flag) { h->ref_count[0] = get_ue_golomb(&h->gb) + 1; if (h->slice_type_nos == AV_PICTURE_TYPE_B) { h->ref_count[1] = get_ue_golomb(&h->gb) + 1; } else // full range is spec-ok in this case, even for frames h->ref_count[1] = 1; } if (h->ref_count[0]-1 > max[0] || h->ref_count[1]-1 > max[1]){ av_log(h->avctx, AV_LOG_ERROR, "reference overflow %u > %u or %u > %u\n", h->ref_count[0]-1, max[0], h->ref_count[1]-1, max[1]); h->ref_count[0] = h->ref_count[1] = 0; return AVERROR_INVALIDDATA; } if (h->slice_type_nos == AV_PICTURE_TYPE_B) h->list_count = 2; else h->list_count = 1; } else { h->list_count = 0; h->ref_count[0] = h->ref_count[1] = 0; } if (slice_type != AV_PICTURE_TYPE_I && (h0->current_slice == 0 || slice_type != h0->last_slice_type || memcmp(h0->last_ref_count, h0->ref_count, sizeof(h0->ref_count)))) { ff_h264_fill_default_ref_list(h); } if (h->slice_type_nos != AV_PICTURE_TYPE_I && ff_h264_decode_ref_pic_list_reordering(h) < 0) { h->ref_count[1] = h->ref_count[0] = 0; return -1; } if ((h->pps.weighted_pred && h->slice_type_nos == AV_PICTURE_TYPE_P) || (h->pps.weighted_bipred_idc == 1 && h->slice_type_nos == AV_PICTURE_TYPE_B)) pred_weight_table(h); else if (h->pps.weighted_bipred_idc == 2 && h->slice_type_nos == AV_PICTURE_TYPE_B) { implicit_weight_table(h, -1); } else { h->use_weight = 0; for (i = 0; i < 2; i++) { h->luma_weight_flag[i] = 0; h->chroma_weight_flag[i] = 0; } } // If frame-mt is enabled, only update mmco tables for the first slice // in a field. Subsequent slices can temporarily clobber h->mmco_index // or h->mmco, which will cause ref list mix-ups and decoding errors // further down the line. This may break decoding if the first slice is // corrupt, thus we only do this if frame-mt is enabled. if (h->nal_ref_idc && ff_h264_decode_ref_pic_marking(h0, &h->gb, !(h->avctx->active_thread_type & FF_THREAD_FRAME) || h0->current_slice == 0) < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return AVERROR_INVALIDDATA; if (FRAME_MBAFF(h)) { ff_h264_fill_mbaff_ref_list(h); if (h->pps.weighted_bipred_idc == 2 && h->slice_type_nos == AV_PICTURE_TYPE_B) { implicit_weight_table(h, 0); implicit_weight_table(h, 1); } } if (h->slice_type_nos == AV_PICTURE_TYPE_B && !h->direct_spatial_mv_pred) ff_h264_direct_dist_scale_factor(h); ff_h264_direct_ref_list_init(h); if (h->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) { tmp = get_ue_golomb_31(&h->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n"); return -1; } h->cabac_init_idc = tmp; } h->last_qscale_diff = 0; tmp = h->pps.init_qp + get_se_golomb(&h->gb); if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) { av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp); return -1; } h->qscale = tmp; h->chroma_qp[0] = get_chroma_qp(h, 0, h->qscale); h->chroma_qp[1] = get_chroma_qp(h, 1, h->qscale); // FIXME qscale / qp ... stuff if (h->slice_type == AV_PICTURE_TYPE_SP) get_bits1(&h->gb); /* sp_for_switch_flag */ if (h->slice_type == AV_PICTURE_TYPE_SP || h->slice_type == AV_PICTURE_TYPE_SI) get_se_golomb(&h->gb); /* slice_qs_delta */ h->deblocking_filter = 1; h->slice_alpha_c0_offset = 52; h->slice_beta_offset = 52; if (h->pps.deblocking_filter_parameters_present) { tmp = get_ue_golomb_31(&h->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, "deblocking_filter_idc %u out of range\n", tmp); return -1; } h->deblocking_filter = tmp; if (h->deblocking_filter < 2) h->deblocking_filter ^= 1; // 1<->0 if (h->deblocking_filter) { h->slice_alpha_c0_offset += get_se_golomb(&h->gb) << 1; h->slice_beta_offset += get_se_golomb(&h->gb) << 1; if (h->slice_alpha_c0_offset > 104U || h->slice_beta_offset > 104U) { av_log(h->avctx, AV_LOG_ERROR, "deblocking filter parameters %d %d out of range\n", h->slice_alpha_c0_offset, h->slice_beta_offset); return -1; } } } if (h->avctx->skip_loop_filter >= AVDISCARD_ALL || (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->slice_type_nos != AV_PICTURE_TYPE_I) || (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR && h->slice_type_nos == AV_PICTURE_TYPE_B) || (h->avctx->skip_loop_filter >= AVDISCARD_NONREF && h->nal_ref_idc == 0)) h->deblocking_filter = 0; if (h->deblocking_filter == 1 && h0->max_contexts > 1) { if (h->avctx->flags2 & CODEC_FLAG2_FAST) { /* Cheat slightly for speed: * Do not bother to deblock across slices. */ h->deblocking_filter = 2; } else { h0->max_contexts = 1; if (!h0->single_decode_warning) { av_log(h->avctx, AV_LOG_INFO, "Cannot parallelize deblocking type 1, decoding such frames in sequential order\n"); h0->single_decode_warning = 1; } if (h != h0) { av_log(h->avctx, AV_LOG_ERROR, "Deblocking switched inside frame.\n"); return 1; } } } h->qp_thresh = 15 + 52 - FFMIN(h->slice_alpha_c0_offset, h->slice_beta_offset) - FFMAX3(0, h->pps.chroma_qp_index_offset[0], h->pps.chroma_qp_index_offset[1]) + 6 * (h->sps.bit_depth_luma - 8); h0->last_slice_type = slice_type; memcpy(h0->last_ref_count, h0->ref_count, sizeof(h0->last_ref_count)); h->slice_num = ++h0->current_slice; if (h->slice_num) h0->slice_row[(h->slice_num-1)&(MAX_SLICES-1)]= h->resync_mb_y; if ( h0->slice_row[h->slice_num&(MAX_SLICES-1)] + 3 >= h->resync_mb_y && h0->slice_row[h->slice_num&(MAX_SLICES-1)] <= h->resync_mb_y && h->slice_num >= MAX_SLICES) { //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", h->slice_num, MAX_SLICES); } for (j = 0; j < 2; j++) { int id_list[16]; int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j]; for (i = 0; i < 16; i++) { id_list[i] = 60; if (j < h->list_count && i < h->ref_count[j] && h->ref_list[j][i].f.buf[0]) { int k; AVBuffer *buf = h->ref_list[j][i].f.buf[0]->buffer; for (k = 0; k < h->short_ref_count; k++) if (h->short_ref[k]->f.buf[0]->buffer == buf) { id_list[i] = k; break; } for (k = 0; k < h->long_ref_count; k++) if (h->long_ref[k] && h->long_ref[k]->f.buf[0]->buffer == buf) { id_list[i] = h->short_ref_count + k; break; } } } ref2frm[0] = ref2frm[1] = -1; for (i = 0; i < 16; i++) ref2frm[i + 2] = 4 * id_list[i] + (h->ref_list[j][i].reference & 3); ref2frm[18 + 0] = ref2frm[18 + 1] = -1; for (i = 16; i < 48; i++) ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] + (h->ref_list[j][i].reference & 3); } if (h->ref_count[0]) h->er.last_pic = &h->ref_list[0][0]; if (h->ref_count[1]) h->er.next_pic = &h->ref_list[1][0]; h->er.ref_count = h->ref_count[0]; if (h->avctx->debug & FF_DEBUG_PICT_INFO) { av_log(h->avctx, AV_LOG_DEBUG, "slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n", h->slice_num, (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"), first_mb_in_slice, av_get_picture_type_char(h->slice_type), h->slice_type_fixed ? " fix" : "", h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "", pps_id, h->frame_num, h->cur_pic_ptr->field_poc[0], h->cur_pic_ptr->field_poc[1], h->ref_count[0], h->ref_count[1], h->qscale, h->deblocking_filter, h->slice_alpha_c0_offset / 2 - 26, h->slice_beta_offset / 2 - 26, h->use_weight, h->use_weight == 1 && h->use_weight_chroma ? "c" : "", h->slice_type == AV_PICTURE_TYPE_B ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : ""); } return 0; }
182
99,014
0
static int CVE_2013_7021_PATCHED_filter_frame(AVFilterLink *inlink, AVFrame *buf) { AVFilterContext *ctx = inlink->dst; FPSContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; int64_t delta; int i, ret; s->frames_in++; /* discard frames until we get the first timestamp */ if (s->pts == AV_NOPTS_VALUE) { if (buf->pts != AV_NOPTS_VALUE) { ret = write_to_fifo(s->fifo, buf); if (ret < 0) return ret; s->first_pts = s->pts = buf->pts; } else { av_log(ctx, AV_LOG_WARNING, "Discarding initial frame(s) with no " "timestamp.\n"); av_frame_free(&buf); s->drop++; } return 0; } /* now wait for the next timestamp */ if (buf->pts == AV_NOPTS_VALUE || av_fifo_size(s->fifo) <= 0) { return write_to_fifo(s->fifo, buf); } /* number of output frames */ delta = av_rescale_q_rnd(buf->pts - s->pts, inlink->time_base, outlink->time_base, s->rounding); if (delta < 1) { /* drop the frame and everything buffered except the first */ AVFrame *tmp; int drop = av_fifo_size(s->fifo)/sizeof(AVFrame*); av_log(ctx, AV_LOG_DEBUG, "Dropping %d frame(s).\n", drop); s->drop += drop; av_fifo_generic_read(s->fifo, &tmp, sizeof(tmp), NULL); flush_fifo(s->fifo); ret = write_to_fifo(s->fifo, tmp); av_frame_free(&buf); return ret; } /* can output >= 1 frames */ for (i = 0; i < delta; i++) { AVFrame *buf_out; av_fifo_generic_read(s->fifo, &buf_out, sizeof(buf_out), NULL); /* duplicate the frame if needed */ if (!av_fifo_size(s->fifo) && i < delta - 1) { AVFrame *dup = av_frame_clone(buf_out); av_log(ctx, AV_LOG_DEBUG, "Duplicating frame.\n"); if (dup) ret = write_to_fifo(s->fifo, dup); else ret = AVERROR(ENOMEM); if (ret < 0) { av_frame_free(&buf_out); av_frame_free(&buf); return ret; } s->dup++; } buf_out->pts = av_rescale_q(s->first_pts, inlink->time_base, outlink->time_base) + s->frames_out; if ((ret = ff_filter_frame(outlink, buf_out)) < 0) { av_frame_free(&buf); return ret; } s->frames_out++; } flush_fifo(s->fifo); ret = write_to_fifo(s->fifo, buf); s->pts = s->first_pts + av_rescale_q(s->frames_out, outlink->time_base, inlink->time_base); return ret; }
183
113,275
0
asmlinkage long CVE_2014_0038_PATCHED_compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct compat_timespec __user *timeout) { int datagrams; struct timespec ktspec; if (flags & MSG_CMSG_COMPAT) return -EINVAL; if (timeout == NULL) return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT, NULL); if (compat_get_timespec(&ktspec, timeout)) return -EFAULT; datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT, &ktspec); if (datagrams > 0 && compat_put_timespec(&ktspec, timeout)) datagrams = -EFAULT; return datagrams; }
184
152,898
0
/* get cookie from Request headers */ struct ast_variable *CVE_2014_2286_PATCHED_ast_http_get_cookies(struct ast_variable *headers) { struct ast_variable *v, *cookies = NULL; for (v = headers; v; v = v->next) { if (!strcasecmp(v->name, "Cookie")) { ast_variables_destroy(cookies); cookies = parse_cookies(v->value); } } return cookies; }
185
95,273
0
int CVE_2014_2851_PATCHED_ping_init_sock(struct sock *sk) { struct net *net = sock_net(sk); kgid_t group = current_egid(); struct group_info *group_info; int i, j, count; kgid_t low, high; int ret = 0; inet_get_ping_group_range_net(net, &low, &high); if (gid_lte(low, group) && gid_lte(group, high)) return 0; group_info = get_current_groups(); count = group_info->ngroups; for (i = 0; i < group_info->nblocks; i++) { int cp_count = min_t(int, NGROUPS_PER_BLOCK, count); for (j = 0; j < cp_count; j++) { kgid_t gid = group_info->blocks[i][j]; if (gid_lte(low, gid) && gid_lte(gid, high)) goto out_release_group; } count -= cp_count; } ret = -EACCES; out_release_group: put_group_info(group_info); return ret; }
186
134,499
0
static int CVE_2014_3610_PATCHED_wrmsr_interception(struct vcpu_svm *svm) { struct msr_data msr; u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); msr.data = data; msr.index = ecx; msr.host_initiated = false; svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; if (kvm_set_msr(&svm->vcpu, &msr)) { trace_kvm_msr_write_ex(ecx, data); kvm_inject_gp(&svm->vcpu, 0); } else { trace_kvm_msr_write(ecx, data); skip_emulated_instruction(&svm->vcpu); } return 1; }
187
154,714
0
int CVE_2014_4608_PATCHED_lzo1x_decompress_safe(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len) { unsigned char *op; const unsigned char *ip; size_t t, next; size_t state = 0; const unsigned char *m_pos; const unsigned char * const ip_end = in + in_len; unsigned char * const op_end = out + *out_len; op = out; ip = in; if (unlikely(in_len < 3)) goto input_overrun; if (*ip > 17) { t = *ip++ - 17; if (t < 4) { next = t; goto match_next; } goto copy_literal_run; } for (;;) { t = *ip++; if (t < 16) { if (likely(state == 0)) { if (unlikely(t == 0)) { while (unlikely(*ip == 0)) { t += 255; ip++; NEED_IP(1, 0); } t += 15 + *ip++; } t += 3; copy_literal_run: #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) { const unsigned char *ie = ip + t; unsigned char *oe = op + t; do { COPY8(op, ip); op += 8; ip += 8; COPY8(op, ip); op += 8; ip += 8; } while (ip < ie); ip = ie; op = oe; } else #endif { NEED_OP(t, 0); NEED_IP(t, 3); do { *op++ = *ip++; } while (--t > 0); } state = 4; continue; } else if (state != 4) { next = t & 3; m_pos = op - 1; m_pos -= t >> 2; m_pos -= *ip++ << 2; TEST_LB(m_pos); NEED_OP(2, 0); op[0] = m_pos[0]; op[1] = m_pos[1]; op += 2; goto match_next; } else { next = t & 3; m_pos = op - (1 + M2_MAX_OFFSET); m_pos -= t >> 2; m_pos -= *ip++ << 2; t = 3; } } else if (t >= 64) { next = t & 3; m_pos = op - 1; m_pos -= (t >> 2) & 7; m_pos -= *ip++ << 3; t = (t >> 5) - 1 + (3 - 1); } else if (t >= 32) { t = (t & 31) + (3 - 1); if (unlikely(t == 2)) { while (unlikely(*ip == 0)) { t += 255; ip++; NEED_IP(1, 0); } t += 31 + *ip++; NEED_IP(2, 0); } m_pos = op - 1; next = get_unaligned_le16(ip); ip += 2; m_pos -= next >> 2; next &= 3; } else { m_pos = op; m_pos -= (t & 8) << 11; t = (t & 7) + (3 - 1); if (unlikely(t == 2)) { while (unlikely(*ip == 0)) { t += 255; ip++; NEED_IP(1, 0); } t += 7 + *ip++; NEED_IP(2, 0); } next = get_unaligned_le16(ip); ip += 2; m_pos -= next >> 2; next &= 3; if (m_pos == op) goto eof_found; m_pos -= 0x4000; } TEST_LB(m_pos); #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) if (op - m_pos >= 8) { unsigned char *oe = op + t; if (likely(HAVE_OP(t, 15))) { do { COPY8(op, m_pos); op += 8; m_pos += 8; COPY8(op, m_pos); op += 8; m_pos += 8; } while (op < oe); op = oe; if (HAVE_IP(6, 0)) { state = next; COPY4(op, ip); op += next; ip += next; continue; } } else { NEED_OP(t, 0); do { *op++ = *m_pos++; } while (op < oe); } } else #endif { unsigned char *oe = op + t; NEED_OP(t, 0); op[0] = m_pos[0]; op[1] = m_pos[1]; op += 2; m_pos += 2; do { *op++ = *m_pos++; } while (op < oe); } match_next: state = next; t = next; #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) { COPY4(op, ip); op += t; ip += t; } else #endif { NEED_IP(t, 3); NEED_OP(t, 0); while (t > 0) { *op++ = *ip++; t--; } } } eof_found: *out_len = op - out; return (t != 3 ? LZO_E_ERROR : ip == ip_end ? LZO_E_OK : ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN); input_overrun: *out_len = op - out; return LZO_E_INPUT_OVERRUN; output_overrun: *out_len = op - out; return LZO_E_OUTPUT_OVERRUN; lookbehind_overrun: *out_len = op - out; return LZO_E_LOOKBEHIND_OVERRUN; }
188
119,189
0
static int CVE_2014_4943_PATCHED_pppol2tp_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct l2tp_session *session; struct l2tp_tunnel *tunnel; struct pppol2tp_session *ps; int val; int err; if (level != SOL_PPPOL2TP) return -EINVAL; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; err = -ENOTCONN; if (sk->sk_user_data == NULL) goto end; /* Get session context from the socket */ err = -EBADF; session = pppol2tp_sock_to_session(sk); if (session == NULL) goto end; /* Special case: if session_id == 0x0000, treat as operation on tunnel */ ps = l2tp_session_priv(session); if ((session->session_id == 0) && (session->peer_session_id == 0)) { err = -EBADF; tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); if (tunnel == NULL) goto end_put_sess; err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val); sock_put(ps->tunnel_sock); } else err = pppol2tp_session_setsockopt(sk, session, optname, val); err = 0; end_put_sess: sock_put(sk); end: return err; }
189
24,356
0
static int CVE_2014_5271_PATCHED_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet) { ProresContext *ctx = avctx->priv_data; uint8_t *orig_buf, *buf, *slice_hdr, *slice_sizes, *tmp; uint8_t *picture_size_pos; PutBitContext pb; int x, y, i, mb, q = 0; int sizes[4] = { 0 }; int slice_hdr_size = 2 + 2 * (ctx->num_planes - 1); int frame_size, picture_size, slice_size; int pkt_size, ret; uint8_t frame_flags; *avctx->coded_frame = *pic; avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; avctx->coded_frame->key_frame = 1; pkt_size = ctx->frame_size_upper_bound; if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size + FF_MIN_BUFFER_SIZE)) < 0) return ret; orig_buf = pkt->data; // frame atom orig_buf += 4; // frame size bytestream_put_be32 (&orig_buf, FRAME_ID); // frame container ID buf = orig_buf; // frame header tmp = buf; buf += 2; // frame header size will be stored here bytestream_put_be16 (&buf, 0); // version 1 bytestream_put_buffer(&buf, ctx->vendor, 4); bytestream_put_be16 (&buf, avctx->width); bytestream_put_be16 (&buf, avctx->height); frame_flags = ctx->chroma_factor << 6; if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) frame_flags |= pic->top_field_first ? 0x04 : 0x08; bytestream_put_byte (&buf, frame_flags); bytestream_put_byte (&buf, 0); // reserved bytestream_put_byte (&buf, avctx->color_primaries); bytestream_put_byte (&buf, avctx->color_trc); bytestream_put_byte (&buf, avctx->colorspace); bytestream_put_byte (&buf, 0x40 | (ctx->alpha_bits >> 3)); bytestream_put_byte (&buf, 0); // reserved if (ctx->quant_sel != QUANT_MAT_DEFAULT) { bytestream_put_byte (&buf, 0x03); // matrix flags - both matrices are present // luma quantisation matrix for (i = 0; i < 64; i++) bytestream_put_byte(&buf, ctx->quant_mat[i]); // chroma quantisation matrix for (i = 0; i < 64; i++) bytestream_put_byte(&buf, ctx->quant_mat[i]); } else { bytestream_put_byte (&buf, 0x00); // matrix flags - default matrices are used } bytestream_put_be16 (&tmp, buf - orig_buf); // write back frame header size for (ctx->cur_picture_idx = 0; ctx->cur_picture_idx < ctx->pictures_per_frame; ctx->cur_picture_idx++) { // picture header picture_size_pos = buf + 1; bytestream_put_byte (&buf, 0x40); // picture header size (in bits) buf += 4; // picture data size will be stored here bytestream_put_be16 (&buf, ctx->slices_per_picture); bytestream_put_byte (&buf, av_log2(ctx->mbs_per_slice) << 4); // slice width and height in MBs // seek table - will be filled during slice encoding slice_sizes = buf; buf += ctx->slices_per_picture * 2; // slices if (!ctx->force_quant) { ret = avctx->execute2(avctx, find_quant_thread, NULL, NULL, ctx->mb_height); if (ret) return ret; } for (y = 0; y < ctx->mb_height; y++) { int mbs_per_slice = ctx->mbs_per_slice; for (x = mb = 0; x < ctx->mb_width; x += mbs_per_slice, mb++) { q = ctx->force_quant ? ctx->force_quant : ctx->slice_q[mb + y * ctx->slices_width]; while (ctx->mb_width - x < mbs_per_slice) mbs_per_slice >>= 1; bytestream_put_byte(&buf, slice_hdr_size << 3); slice_hdr = buf; buf += slice_hdr_size - 1; init_put_bits(&pb, buf, (pkt_size - (buf - orig_buf)) * 8); ret = encode_slice(avctx, pic, &pb, sizes, x, y, q, mbs_per_slice); if (ret < 0) return ret; bytestream_put_byte(&slice_hdr, q); slice_size = slice_hdr_size + sizes[ctx->num_planes - 1]; for (i = 0; i < ctx->num_planes - 1; i++) { bytestream_put_be16(&slice_hdr, sizes[i]); slice_size += sizes[i]; } bytestream_put_be16(&slice_sizes, slice_size); buf += slice_size - slice_hdr_size; } } picture_size = buf - (picture_size_pos - 1); bytestream_put_be32(&picture_size_pos, picture_size); } orig_buf -= 8; frame_size = buf - orig_buf; bytestream_put_be32(&orig_buf, frame_size); pkt->size = frame_size; pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; return 0; }
190
182,073
0
int CVE_2014_8369_PATCHED_kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { gfn_t gfn, end_gfn; pfn_t pfn; int r = 0; struct iommu_domain *domain = kvm->arch.iommu_domain; int flags; /* check if iommu exists and in use */ if (!domain) return 0; gfn = slot->base_gfn; end_gfn = gfn + slot->npages; flags = IOMMU_READ; if (!(slot->flags & KVM_MEM_READONLY)) flags |= IOMMU_WRITE; if (!kvm->arch.iommu_noncoherent) flags |= IOMMU_CACHE; while (gfn < end_gfn) { unsigned long page_size; /* Check if already mapped */ if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { gfn += 1; continue; } /* Get the page size we could use to map */ page_size = kvm_host_page_size(kvm, gfn); /* Make sure the page_size does not exceed the memslot */ while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) page_size >>= 1; /* Make sure gfn is aligned to the page size we want to map */ while ((gfn << PAGE_SHIFT) & (page_size - 1)) page_size >>= 1; /* Make sure hva is aligned to the page size we want to map */ while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1)) page_size >>= 1; /* * Pin all pages we are about to map in memory. This is * important because we unmap and unpin in 4kb steps later. */ pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT); if (is_error_noslot_pfn(pfn)) { gfn += 1; continue; } /* Map into IO address space */ r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), page_size, flags); if (r) { printk(KERN_ERR "kvm_iommu_map_address:" "iommu failed to map pfn=%llx\n", pfn); kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT); goto unmap_pages; } gfn += page_size >> PAGE_SHIFT; } return 0; unmap_pages: kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn); return r; }
191
140,576
0
nsresult CVE_2014_8643_PATCHED_XRE_InitChildProcess(int aArgc, char* aArgv[]) { NS_ENSURE_ARG_MIN(aArgc, 2); NS_ENSURE_ARG_POINTER(aArgv); NS_ENSURE_ARG_POINTER(aArgv[0]); #if defined(XP_WIN) // From the --attach-console support in nsNativeAppSupportWin.cpp, but // here we are a content child process, so we always attempt to attach // to the parent's (ie, the browser's) console. // Try to attach console to the parent process. // It will succeed when the parent process is a command line, // so that stdio will be displayed in it. if (AttachConsole(ATTACH_PARENT_PROCESS)) { // Change std handles to refer to new console handles. // Before doing so, ensure that stdout/stderr haven't been // redirected to a valid file if (_fileno(stdout) == -1 || _get_osfhandle(fileno(stdout)) == -1) freopen("CONOUT$", "w", stdout); // Merge stderr into CONOUT$ since there isn't any `CONERR$`. // http://msdn.microsoft.com/en-us/library/windows/desktop/ms683231%28v=vs.85%29.aspx if (_fileno(stderr) == -1 || _get_osfhandle(fileno(stderr)) == -1) freopen("CONOUT$", "w", stderr); if (_fileno(stdin) == -1 || _get_osfhandle(fileno(stdin)) == -1) freopen("CONIN$", "r", stdin); } #endif char aLocal; profiler_init(&aLocal); PROFILER_LABEL("Startup", "CVE_2014_8643_PATCHED_XRE_InitChildProcess", js::ProfileEntry::Category::OTHER); // Complete 'task_t' exchange for Mac OS X. This structure has the same size // regardless of architecture so we don't have any cross-arch issues here. #ifdef XP_MACOSX if (aArgc < 1) return NS_ERROR_FAILURE; const char* const mach_port_name = aArgv[--aArgc]; const int kTimeoutMs = 1000; MachSendMessage child_message(0); if (!child_message.AddDescriptor(MachMsgPortDescriptor(mach_task_self()))) { NS_WARNING("child AddDescriptor(mach_task_self()) failed."); return NS_ERROR_FAILURE; } ReceivePort child_recv_port; mach_port_t raw_child_recv_port = child_recv_port.GetPort(); if (!child_message.AddDescriptor(MachMsgPortDescriptor(raw_child_recv_port))) { NS_WARNING("Adding descriptor to message failed"); return NS_ERROR_FAILURE; } MachPortSender child_sender(mach_port_name); kern_return_t err = child_sender.SendMessage(child_message, kTimeoutMs); if (err != KERN_SUCCESS) { NS_WARNING("child SendMessage() failed"); return NS_ERROR_FAILURE; } MachReceiveMessage parent_message; err = child_recv_port.WaitForMessage(&parent_message, kTimeoutMs); if (err != KERN_SUCCESS) { NS_WARNING("child WaitForMessage() failed"); return NS_ERROR_FAILURE; } if (parent_message.GetTranslatedPort(0) == MACH_PORT_NULL) { NS_WARNING("child GetTranslatedPort(0) failed"); return NS_ERROR_FAILURE; } err = task_set_bootstrap_port(mach_task_self(), parent_message.GetTranslatedPort(0)); if (err != KERN_SUCCESS) { NS_WARNING("child task_set_bootstrap_port() failed"); return NS_ERROR_FAILURE; } #endif SetupErrorHandling(aArgv[0]); #if defined(MOZ_CRASHREPORTER) if (aArgc < 1) return NS_ERROR_FAILURE; const char* const crashReporterArg = aArgv[--aArgc]; # if defined(XP_WIN) || defined(XP_MACOSX) // on windows and mac, |crashReporterArg| is the named pipe on which the // server is listening for requests, or "-" if crash reporting is // disabled. if (0 != strcmp("-", crashReporterArg) && !XRE_SetRemoteExceptionHandler(crashReporterArg)) { // Bug 684322 will add better visibility into this condition NS_WARNING("Could not setup crash reporting\n"); } # elif defined(OS_LINUX) // on POSIX, |crashReporterArg| is "true" if crash reporting is // enabled, false otherwise if (0 != strcmp("false", crashReporterArg) && !XRE_SetRemoteExceptionHandler(nullptr)) { // Bug 684322 will add better visibility into this condition NS_WARNING("Could not setup crash reporting\n"); } # else # error "OOP crash reporting unsupported on this platform" # endif #endif // if defined(MOZ_CRASHREPORTER) gArgv = aArgv; gArgc = aArgc; #if defined(MOZ_WIDGET_GTK) g_thread_init(nullptr); #endif #if defined(MOZ_WIDGET_QT) nsQAppInstance::AddRef(); #endif if (PR_GetEnv("MOZ_DEBUG_CHILD_PROCESS")) { #ifdef OS_POSIX printf("\n\nCHILDCHILDCHILDCHILD\n debug me @ %d\n\n", getpid()); sleep(30); #elif defined(OS_WIN) // Windows has a decent JIT debugging story, so NS_DebugBreak does the // right thing. NS_DebugBreak(NS_DEBUG_BREAK, "Invoking NS_DebugBreak() to debug child process", nullptr, __FILE__, __LINE__); #endif } // child processes launched by GeckoChildProcessHost get this magic // argument appended to their command lines const char* const parentPIDString = aArgv[aArgc-1]; NS_ABORT_IF_FALSE(parentPIDString, "NULL parent PID"); --aArgc; char* end = 0; base::ProcessId parentPID = strtol(parentPIDString, &end, 10); NS_ABORT_IF_FALSE(!*end, "invalid parent PID"); // Retrieve the parent process handle. We need this for shared memory use and // for creating new transports in the child. base::ProcessHandle parentHandle = 0; if (XRE_GetProcessType() != GeckoProcessType_GMPlugin) { mozilla::DebugOnly<bool> ok = base::OpenProcessHandle(parentPID, &parentHandle); NS_ABORT_IF_FALSE(ok, "can't open handle to parent"); } #if defined(XP_WIN) // On Win7+, register the application user model id passed in by // parent. This insures windows created by the container properly // group with the parent app on the Win7 taskbar. const char* const appModelUserId = aArgv[--aArgc]; if (appModelUserId) { // '-' implies no support if (*appModelUserId != '-') { nsString appId; appId.AssignWithConversion(nsDependentCString(appModelUserId)); // The version string is encased in quotes appId.Trim(NS_LITERAL_CSTRING("\"").get()); // Set the id SetTaskbarGroupId(appId); } } #endif base::AtExitManager exitManager; NotificationService notificationService; NS_LogInit(); nsresult rv = XRE_InitCommandLine(aArgc, aArgv); if (NS_FAILED(rv)) { profiler_shutdown(); NS_LogTerm(); return NS_ERROR_FAILURE; } MessageLoop::Type uiLoopType; switch (XRE_GetProcessType()) { case GeckoProcessType_Content: // Content processes need the XPCOM/chromium frankenventloop uiLoopType = MessageLoop::TYPE_MOZILLA_CHILD; break; default: uiLoopType = MessageLoop::TYPE_UI; break; } { // This is a lexical scope for the MessageLoop below. We want it // to go out of scope before NS_LogTerm() so that we don't get // spurious warnings about XPCOM objects being destroyed from a // static context. // Associate this thread with a UI MessageLoop MessageLoop uiMessageLoop(uiLoopType); { nsAutoPtr<ProcessChild> process; #ifdef XP_WIN mozilla::ipc::windows::InitUIThread(); #endif switch (XRE_GetProcessType()) { case GeckoProcessType_Default: NS_RUNTIMEABORT("This makes no sense"); break; case GeckoProcessType_Plugin: process = new PluginProcessChild(parentHandle); break; case GeckoProcessType_Content: { process = new ContentProcess(parentHandle); // If passed in grab the application path for xpcom init nsCString appDir; for (int idx = aArgc; idx > 0; idx--) { if (aArgv[idx] && !strcmp(aArgv[idx], "-appdir")) { appDir.Assign(nsDependentCString(aArgv[idx+1])); static_cast<ContentProcess*>(process.get())->SetAppDir(appDir); break; } } } break; case GeckoProcessType_IPDLUnitTest: #ifdef MOZ_IPDL_TESTS process = new IPDLUnitTestProcessChild(parentHandle); #else NS_RUNTIMEABORT("rebuild with --enable-ipdl-tests"); #endif break; case GeckoProcessType_GMPlugin: process = new gmp::GMPProcessChild(parentHandle); break; default: NS_RUNTIMEABORT("Unknown main thread class"); } if (!process->Init()) { profiler_shutdown(); NS_LogTerm(); return NS_ERROR_FAILURE; } // Run the UI event loop on the main thread. uiMessageLoop.MessageLoop::Run(); // Allow ProcessChild to clean up after itself before going out of // scope and being deleted process->CleanUp(); mozilla::Omnijar::CleanUp(); } } profiler_shutdown(); NS_LogTerm(); return XRE_DeinitCommandLine(); }
192
126,463
0
static int CVE_2014_9316_PATCHED_mjpeg_decode_app(MJpegDecodeContext *s) { int len, id, i; len = get_bits(&s->gb, 16); if (len < 6) return AVERROR_INVALIDDATA; if (8 * len > get_bits_left(&s->gb)) return AVERROR_INVALIDDATA; id = get_bits_long(&s->gb, 32); len -= 6; if (s->avctx->debug & FF_DEBUG_STARTCODE) { char id_str[32]; av_get_codec_tag_string(id_str, sizeof(id_str), av_bswap32(id)); av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n", id_str, id, len); } /* Buggy AVID, it puts EOI only at every 10th frame. */ /* Also, this fourcc is used by non-avid files too, it holds some information, but it's always present in AVID-created files. */ if (id == AV_RB32("AVI1")) { /* structure: 4bytes AVI1 1bytes polarity 1bytes always zero 4bytes field_size 4bytes field_size_less_padding */ s->buggy_avid = 1; i = get_bits(&s->gb, 8); len--; av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i); #if 0 skip_bits(&s->gb, 8); skip_bits(&s->gb, 32); skip_bits(&s->gb, 32); len -= 10; #endif goto out; } // len -= 2; if (id == AV_RB32("JFIF")) { int t_w, t_h, v1, v2; skip_bits(&s->gb, 8); /* the trailing zero-byte */ v1 = get_bits(&s->gb, 8); v2 = get_bits(&s->gb, 8); skip_bits(&s->gb, 8); s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16); s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16); ff_set_sar(s->avctx, s->avctx->sample_aspect_ratio); if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n", v1, v2, s->avctx->sample_aspect_ratio.num, s->avctx->sample_aspect_ratio.den); t_w = get_bits(&s->gb, 8); t_h = get_bits(&s->gb, 8); if (t_w && t_h) { /* skip thumbnail */ if (len -10 - (t_w * t_h * 3) > 0) len -= t_w * t_h * 3; } len -= 10; goto out; } if (id == AV_RB32("Adob") && (get_bits(&s->gb, 8) == 'e')) { skip_bits(&s->gb, 16); /* version */ skip_bits(&s->gb, 16); /* flags0 */ skip_bits(&s->gb, 16); /* flags1 */ s->adobe_transform = get_bits(&s->gb, 8); if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform); len -= 7; goto out; } if (id == AV_RB32("LJIF")) { int rgb = s->rgb; int pegasus_rct = s->pegasus_rct; if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, "Pegasus lossless jpeg header found\n"); skip_bits(&s->gb, 16); /* version ? */ skip_bits(&s->gb, 16); /* unknown always 0? */ skip_bits(&s->gb, 16); /* unknown always 0? */ skip_bits(&s->gb, 16); /* unknown always 0? */ switch (i=get_bits(&s->gb, 8)) { case 1: rgb = 1; pegasus_rct = 0; break; case 2: rgb = 1; pegasus_rct = 1; break; default: av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i); } len -= 9; if (s->got_picture) if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) { av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n"); goto out; } s->rgb = rgb; s->pegasus_rct = pegasus_rct; goto out; } if (id == AV_RL32("colr") && len > 0) { s->colr = get_bits(&s->gb, 8); if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr); len --; goto out; } if (id == AV_RL32("xfrm") && len > 0) { s->xfrm = get_bits(&s->gb, 8); if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm); len --; goto out; } /* JPS extension by VRex */ if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) { int flags, layout, type; if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n"); skip_bits(&s->gb, 32); len -= 4; /* JPS_ */ skip_bits(&s->gb, 16); len -= 2; /* block length */ skip_bits(&s->gb, 8); /* reserved */ flags = get_bits(&s->gb, 8); layout = get_bits(&s->gb, 8); type = get_bits(&s->gb, 8); len -= 4; s->stereo3d = av_stereo3d_alloc(); if (!s->stereo3d) { goto out; } if (type == 0) { s->stereo3d->type = AV_STEREO3D_2D; } else if (type == 1) { switch (layout) { case 0x01: s->stereo3d->type = AV_STEREO3D_LINES; break; case 0x02: s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE; break; case 0x03: s->stereo3d->type = AV_STEREO3D_TOPBOTTOM; break; } if (!(flags & 0x04)) { s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT; } } goto out; } /* EXIF metadata */ if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) { GetByteContext gbytes; int ret, le, ifd_offset, bytes_read; const uint8_t *aligned; skip_bits(&s->gb, 16); // skip padding len -= 2; // init byte wise reading aligned = align_get_bits(&s->gb); bytestream2_init(&gbytes, aligned, len); // read TIFF header ret = ff_tdecode_header(&gbytes, &le, &ifd_offset); if (ret) { av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n"); } else { bytestream2_seek(&gbytes, ifd_offset, SEEK_SET); // read 0th IFD and store the metadata // (return values > 0 indicate the presence of subimage metadata) ret = avpriv_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata); if (ret < 0) { av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n"); } } bytes_read = bytestream2_tell(&gbytes); skip_bits(&s->gb, bytes_read << 3); len -= bytes_read; goto out; } /* Apple MJPEG-A */ if ((s->start_code == APP1) && (len > (0x28 - 8))) { id = get_bits_long(&s->gb, 32); len -= 4; /* Apple MJPEG-A */ if (id == AV_RB32("mjpg")) { #if 0 skip_bits(&s->gb, 32); /* field size */ skip_bits(&s->gb, 32); /* pad field size */ skip_bits(&s->gb, 32); /* next off */ skip_bits(&s->gb, 32); /* quant off */ skip_bits(&s->gb, 32); /* huff off */ skip_bits(&s->gb, 32); /* image off */ skip_bits(&s->gb, 32); /* scan off */ skip_bits(&s->gb, 32); /* data off */ #endif if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n"); } } out: /* slow but needed for extreme adobe jpegs */ if (len < 0) av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error, decode_app parser read over the end\n"); while (--len > 0) skip_bits(&s->gb, 8); return 0; }
193
4,448
0
int CVE_2014_9374_PATCHED_AST_OPTIONAL_API_NAME(ast_websocket_read)(struct ast_websocket *session, char **payload, uint64_t *payload_len, enum ast_websocket_opcode *opcode, int *fragmented) { char buf[MAXIMUM_FRAME_SIZE] = ""; int fin = 0; int mask_present = 0; char *mask = NULL, *new_payload = NULL; size_t options_len = 0, frame_size = 0; *payload = NULL; *payload_len = 0; *fragmented = 0; if (ws_safe_read(session, &buf[0], MIN_WS_HDR_SZ, opcode)) { return 0; } frame_size += MIN_WS_HDR_SZ; /* ok, now we have the first 2 bytes, so we know some flags, opcode and payload length (or whether payload length extension will be required) */ *opcode = buf[0] & 0xf; *payload_len = buf[1] & 0x7f; if (*opcode == AST_WEBSOCKET_OPCODE_TEXT || *opcode == AST_WEBSOCKET_OPCODE_BINARY || *opcode == AST_WEBSOCKET_OPCODE_CONTINUATION || *opcode == AST_WEBSOCKET_OPCODE_PING || *opcode == AST_WEBSOCKET_OPCODE_PONG) { fin = (buf[0] >> 7) & 1; mask_present = (buf[1] >> 7) & 1; /* Based on the mask flag and payload length, determine how much more we need to read before start parsing the rest of the header */ options_len += mask_present ? 4 : 0; options_len += (*payload_len == 126) ? 2 : (*payload_len == 127) ? 8 : 0; if (options_len) { /* read the rest of the header options */ if (ws_safe_read(session, &buf[frame_size], options_len, opcode)) { return 0; } frame_size += options_len; } if (*payload_len == 126) { /* Grab the 2-byte payload length */ *payload_len = ntohs(get_unaligned_uint16(&buf[2])); mask = &buf[4]; } else if (*payload_len == 127) { /* Grab the 8-byte payload length */ *payload_len = ntohl(get_unaligned_uint64(&buf[2])); mask = &buf[10]; } else { /* Just set the mask after the small 2-byte header */ mask = &buf[2]; } /* Now read the rest of the payload */ *payload = &buf[frame_size]; /* payload will start here, at the end of the options, if any */ frame_size = frame_size + (*payload_len); /* final frame size is header + optional headers + payload data */ if (frame_size > MAXIMUM_FRAME_SIZE) { ast_log(LOG_WARNING, "Cannot fit huge websocket frame of %zu bytes\n", frame_size); /* The frame won't fit :-( */ ast_websocket_close(session, 1009); return -1; } if (ws_safe_read(session, (*payload), (*payload_len), opcode)) { return 0; } /* If a mask is present unmask the payload */ if (mask_present) { unsigned int pos; for (pos = 0; pos < *payload_len; pos++) { (*payload)[pos] ^= mask[pos % 4]; } } /* Per the RFC for PING we need to send back an opcode with the application data as received */ if ((*opcode == AST_WEBSOCKET_OPCODE_PING) && (ast_websocket_write(session, AST_WEBSOCKET_OPCODE_PONG, *payload, *payload_len))) { *payload_len = 0; ast_websocket_close(session, 1009); return 0; } if (*payload_len) { if (!(new_payload = ast_realloc(session->payload, (session->payload_len + *payload_len)))) { ast_log(LOG_WARNING, "Failed allocation: %p, %zu, %"PRIu64"\n", session->payload, session->payload_len, *payload_len); *payload_len = 0; ast_websocket_close(session, 1009); return 0; } session->payload = new_payload; memcpy((session->payload + session->payload_len), (*payload), (*payload_len)); session->payload_len += *payload_len; } else if (!session->payload_len && session->payload) { ast_free(session->payload); session->payload = NULL; } if (!fin && session->reconstruct && (session->payload_len < session->reconstruct)) { /* If this is not a final message we need to defer returning it until later */ if (*opcode != AST_WEBSOCKET_OPCODE_CONTINUATION) { session->opcode = *opcode; } *opcode = AST_WEBSOCKET_OPCODE_CONTINUATION; *payload_len = 0; *payload = NULL; } else { if (*opcode == AST_WEBSOCKET_OPCODE_CONTINUATION) { if (!fin) { /* If this was not actually the final message tell the user it is fragmented so they can deal with it accordingly */ *fragmented = 1; } else { /* Final frame in multi-frame so push up the actual opcode */ *opcode = session->opcode; } } *payload_len = session->payload_len; *payload = session->payload; session->payload_len = 0; } } else if (*opcode == AST_WEBSOCKET_OPCODE_CLOSE) { /* Make the payload available so the user can look at the reason code if they so desire */ if ((*payload_len) && (new_payload = ast_realloc(session->payload, *payload_len))) { if (ws_safe_read(session, &buf[frame_size], (*payload_len), opcode)) { return 0; } session->payload = new_payload; memcpy(session->payload, &buf[frame_size], *payload_len); *payload = session->payload; frame_size += (*payload_len); } session->closing = 1; } else { ast_log(LOG_WARNING, "WebSocket unknown opcode %u\n", *opcode); /* We received an opcode that we don't understand, the RFC states that 1003 is for a type of data that can't be accepted... opcodes * fit that, I think. */ ast_websocket_close(session, 1003); } return 0; }
194
123,870
0
static int CVE_2014_9676_PATCHED_seg_write_packet(AVFormatContext *s, AVPacket *pkt) { SegmentContext *seg = s->priv_data; AVFormatContext *oc = seg->avf; AVStream *st = s->streams[pkt->stream_index]; int64_t end_pts = INT64_MAX, offset; int start_frame = INT_MAX; int ret; if (seg->times) { end_pts = seg->segment_count < seg->nb_times ? seg->times[seg->segment_count] : INT64_MAX; } else if (seg->frames) { start_frame = seg->segment_count <= seg->nb_frames ? seg->frames[seg->segment_count] : INT_MAX; } else { end_pts = seg->time * (seg->segment_count+1); } av_dlog(s, "packet stream:%d pts:%s pts_time:%s is_key:%d frame:%d\n", pkt->stream_index, av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &st->time_base), pkt->flags & AV_PKT_FLAG_KEY, pkt->stream_index == seg->reference_stream_index ? seg->frame_count : -1); if (pkt->stream_index == seg->reference_stream_index && pkt->flags & AV_PKT_FLAG_KEY && (seg->frame_count >= start_frame || (pkt->pts != AV_NOPTS_VALUE && av_compare_ts(pkt->pts, st->time_base, end_pts-seg->time_delta, AV_TIME_BASE_Q) >= 0))) { if ((ret = segment_end(s, seg->individual_header_trailer, 0)) < 0) goto fail; if ((ret = segment_start(s, seg->individual_header_trailer)) < 0) goto fail; oc = seg->avf; seg->cur_entry.index = seg->segment_idx; seg->cur_entry.start_time = (double)pkt->pts * av_q2d(st->time_base); seg->cur_entry.start_pts = av_rescale_q(pkt->pts, st->time_base, AV_TIME_BASE_Q); } else if (pkt->pts != AV_NOPTS_VALUE) { seg->cur_entry.end_time = FFMAX(seg->cur_entry.end_time, (double)(pkt->pts + pkt->duration) * av_q2d(st->time_base)); } if (seg->is_first_pkt) { av_log(s, AV_LOG_DEBUG, "segment:'%s' starts with packet stream:%d pts:%s pts_time:%s frame:%d\n", seg->avf->filename, pkt->stream_index, av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &st->time_base), seg->frame_count); seg->is_first_pkt = 0; } av_log(s, AV_LOG_DEBUG, "stream:%d start_pts_time:%s pts:%s pts_time:%s dts:%s dts_time:%s", pkt->stream_index, av_ts2timestr(seg->cur_entry.start_pts, &AV_TIME_BASE_Q), av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &st->time_base), av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &st->time_base)); /* compute new timestamps */ offset = av_rescale_q(seg->initial_offset - (seg->reset_timestamps ? seg->cur_entry.start_pts : 0), AV_TIME_BASE_Q, st->time_base); if (pkt->pts != AV_NOPTS_VALUE) pkt->pts += offset; if (pkt->dts != AV_NOPTS_VALUE) pkt->dts += offset; av_log(s, AV_LOG_DEBUG, " -> pts:%s pts_time:%s dts:%s dts_time:%s\n", av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &st->time_base), av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &st->time_base)); ret = ff_write_chained(oc, pkt->stream_index, pkt, s); fail: if (pkt->stream_index == seg->reference_stream_index) seg->frame_count++; if (ret < 0) { if (seg->list) avio_close(seg->list_pb); avformat_free_context(seg->avf); } return ret; }
195
37,391
0
static int CVE_2015_0228_PATCHED_lua_websocket_read(lua_State *L) { apr_socket_t *sock; apr_status_t rv; int do_read = 1; int n = 0; apr_size_t len = 1; apr_size_t plen = 0; unsigned short payload_short = 0; apr_uint64_t payload_long = 0; unsigned char *mask_bytes; char byte; int plaintext; request_rec *r = ap_lua_check_request_rec(L, 1); plaintext = ap_lua_ssl_is_https(r->connection) ? 0 : 1; mask_bytes = apr_pcalloc(r->pool, 4); sock = ap_get_conn_socket(r->connection); while (do_read) { do_read = 0; /* Get opcode and FIN bit */ if (plaintext) { rv = apr_socket_recv(sock, &byte, &len); } else { rv = lua_websocket_readbytes(r->connection, &byte, 1); } if (rv == APR_SUCCESS) { unsigned char fin, opcode, mask, payload; fin = byte >> 7; opcode = (byte << 4) >> 4; /* Get the payload length and mask bit */ if (plaintext) { rv = apr_socket_recv(sock, &byte, &len); } else { rv = lua_websocket_readbytes(r->connection, &byte, 1); } if (rv == APR_SUCCESS) { mask = byte >> 7; payload = byte - 128; plen = payload; /* Extended payload? */ if (payload == 126) { len = 2; if (plaintext) { rv = apr_socket_recv(sock, (char*) &payload_short, &len); } else { rv = lua_websocket_readbytes(r->connection, (char*) &payload_short, 2); } payload_short = ntohs(payload_short); if (rv == APR_SUCCESS) { plen = payload_short; } else { return 0; } } /* Super duper extended payload? */ if (payload == 127) { len = 8; if (plaintext) { rv = apr_socket_recv(sock, (char*) &payload_long, &len); } else { rv = lua_websocket_readbytes(r->connection, (char*) &payload_long, 8); } if (rv == APR_SUCCESS) { plen = ap_ntoh64(&payload_long); } else { return 0; } } ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "Websocket: Reading %" APR_SIZE_T_FMT " (%s) bytes, masking is %s. %s", plen, (payload >= 126) ? "extra payload" : "no extra payload", mask ? "on" : "off", fin ? "This is a final frame" : "more to follow"); if (mask) { len = 4; if (plaintext) { rv = apr_socket_recv(sock, (char*) mask_bytes, &len); } else { rv = lua_websocket_readbytes(r->connection, (char*) mask_bytes, 4); } if (rv != APR_SUCCESS) { return 0; } } if (plen < (HUGE_STRING_LEN*1024) && plen > 0) { apr_size_t remaining = plen; apr_size_t received; apr_off_t at = 0; char *buffer = apr_palloc(r->pool, plen+1); buffer[plen] = 0; if (plaintext) { while (remaining > 0) { received = remaining; rv = apr_socket_recv(sock, buffer+at, &received); if (received > 0 ) { remaining -= received; at += received; } } ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "Websocket: Frame contained %" APR_OFF_T_FMT " bytes, pushed to Lua stack", at); } else { rv = lua_websocket_readbytes(r->connection, buffer, remaining); ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "Websocket: SSL Frame contained %" APR_SIZE_T_FMT " bytes, "\ "pushed to Lua stack", remaining); } if (mask) { for (n = 0; n < plen; n++) { buffer[n] ^= mask_bytes[n%4]; } } lua_pushlstring(L, buffer, (size_t) plen); /* push to stack */ lua_pushboolean(L, fin); /* push FIN bit to stack as boolean */ return 2; } /* Decide if we need to react to the opcode or not */ if (opcode == 0x09) { /* ping */ char frame[2]; plen = 2; frame[0] = 0x8A; frame[1] = 0; apr_socket_send(sock, frame, &plen); /* Pong! */ do_read = 1; } } } } return 0; }
196
17,931
0
*/ static void CVE_2015_1158_PATCHED_authenticate_job(cupsd_client_t *con, /* I - Client connection */ ipp_attribute_t *uri) /* I - Job URI */ { ipp_attribute_t *attr, /* job-id attribute */ *auth_info; /* auth-info attribute */ int jobid; /* Job ID */ cupsd_job_t *job; /* Current job */ char scheme[HTTP_MAX_URI], /* Method portion of URI */ username[HTTP_MAX_URI], /* Username portion of URI */ host[HTTP_MAX_URI], /* Host portion of URI */ resource[HTTP_MAX_URI]; /* Resource portion of URI */ int port; /* Port portion of URI */ cupsdLogMessage(CUPSD_LOG_DEBUG2, "CVE_2015_1158_PATCHED_authenticate_job(%p[%d], %s)", con, con->number, uri->values[0].string.text); /* * Start with "everything is OK" status... */ con->response->request.status.status_code = IPP_OK; /* * See if we have a job URI or a printer URI... */ if (!strcmp(uri->name, "printer-uri")) { /* * Got a printer URI; see if we also have a job-id attribute... */ if ((attr = ippFindAttribute(con->request, "job-id", IPP_TAG_INTEGER)) == NULL) { send_ipp_status(con, IPP_BAD_REQUEST, _("Got a printer-uri attribute but no job-id.")); return; } jobid = attr->values[0].integer; } else { /* * Got a job URI; parse it to get the job ID... */ httpSeparateURI(HTTP_URI_CODING_ALL, uri->values[0].string.text, scheme, sizeof(scheme), username, sizeof(username), host, sizeof(host), &port, resource, sizeof(resource)); if (strncmp(resource, "/jobs/", 6)) { /* * Not a valid URI! */ send_ipp_status(con, IPP_BAD_REQUEST, _("Bad job-uri \"%s\"."), uri->values[0].string.text); return; } jobid = atoi(resource + 6); } /* * See if the job exists... */ if ((job = cupsdFindJob(jobid)) == NULL) { /* * Nope - return a "not found" error... */ send_ipp_status(con, IPP_NOT_FOUND, _("Job #%d does not exist."), jobid); return; } /* * See if the job has been completed... */ if (job->state_value != IPP_JOB_HELD) { /* * Return a "not-possible" error... */ send_ipp_status(con, IPP_NOT_POSSIBLE, _("Job #%d is not held for authentication."), jobid); return; } /* * See if we have already authenticated... */ auth_info = ippFindAttribute(con->request, "auth-info", IPP_TAG_TEXT); if (!con->username[0] && !auth_info) { cupsd_printer_t *printer; /* Job destination */ /* * No auth data. If we need to authenticate via Kerberos, send a * HTTP auth challenge, otherwise just return an IPP error... */ printer = cupsdFindDest(job->dest); if (printer && printer->num_auth_info_required > 0 && !strcmp(printer->auth_info_required[0], "negotiate")) send_http_error(con, HTTP_UNAUTHORIZED, printer); else send_ipp_status(con, IPP_NOT_AUTHORIZED, _("No authentication information provided.")); return; } /* * See if the job is owned by the requesting user... */ if (!validate_user(job, con, job->username, username, sizeof(username))) { send_http_error(con, con->username[0] ? HTTP_FORBIDDEN : HTTP_UNAUTHORIZED, cupsdFindDest(job->dest)); return; } /* * Save the authentication information for this job... */ save_auth_info(con, job, auth_info); /* * Reset the job-hold-until value to "no-hold"... */ if ((attr = ippFindAttribute(job->attrs, "job-hold-until", IPP_TAG_KEYWORD)) == NULL) attr = ippFindAttribute(job->attrs, "job-hold-until", IPP_TAG_NAME); if (attr) { ippSetValueTag(job->attrs, &attr, IPP_TAG_KEYWORD); ippSetString(job->attrs, &attr, 0, "no-hold"); } /* * Release the job and return... */ cupsdReleaseJob(job); cupsdAddEvent(CUPSD_EVENT_JOB_STATE, NULL, job, "Job authenticated by user"); cupsdLogJob(job, CUPSD_LOG_INFO, "Authenticated by \"%s\".", con->username); cupsdCheckJobs(); }
197
135,862
0
static int CVE_2015_3395_PATCHED_msrle_decode_pal4(AVCodecContext *avctx, AVPicture *pic, GetByteContext *gb) { unsigned char rle_code; unsigned char extra_byte, odd_pixel; unsigned char stream_byte; unsigned int pixel_ptr = 0; int line = avctx->height - 1; int i; while (line >= 0 && pixel_ptr <= avctx->width) { if (bytestream2_get_bytes_left(gb) <= 0) { av_log(avctx, AV_LOG_ERROR, "MS RLE: bytestream overrun, %dx%d left\n", avctx->width - pixel_ptr, line); return AVERROR_INVALIDDATA; } rle_code = stream_byte = bytestream2_get_byteu(gb); if (rle_code == 0) { /* fetch the next byte to see how to handle escape code */ stream_byte = bytestream2_get_byte(gb); if (stream_byte == 0) { /* line is done, goto the next one */ line--; pixel_ptr = 0; } else if (stream_byte == 1) { /* decode is done */ return 0; } else if (stream_byte == 2) { /* reposition frame decode coordinates */ stream_byte = bytestream2_get_byte(gb); pixel_ptr += stream_byte; stream_byte = bytestream2_get_byte(gb); } else { // copy pixels from encoded stream odd_pixel = stream_byte & 1; rle_code = (stream_byte + 1) / 2; extra_byte = rle_code & 0x01; if (pixel_ptr + 2*rle_code - odd_pixel > avctx->width || bytestream2_get_bytes_left(gb) < rle_code) { av_log(avctx, AV_LOG_ERROR, "MS RLE: frame/stream ptr just went out of bounds (copy)\n"); return AVERROR_INVALIDDATA; } for (i = 0; i < rle_code; i++) { if (pixel_ptr >= avctx->width) break; stream_byte = bytestream2_get_byteu(gb); pic->data[0][line * pic->linesize[0] + pixel_ptr] = stream_byte >> 4; pixel_ptr++; if (i + 1 == rle_code && odd_pixel) break; if (pixel_ptr >= avctx->width) break; pic->data[0][line * pic->linesize[0] + pixel_ptr] = stream_byte & 0x0F; pixel_ptr++; } // if the RLE code is odd, skip a byte in the stream if (extra_byte) bytestream2_skip(gb, 1); } } else { // decode a run of data if (pixel_ptr + rle_code > avctx->width + 1) { av_log(avctx, AV_LOG_ERROR, "MS RLE: frame ptr just went out of bounds (run)\n"); return AVERROR_INVALIDDATA; } stream_byte = bytestream2_get_byte(gb); for (i = 0; i < rle_code; i++) { if (pixel_ptr >= avctx->width) break; if ((i & 1) == 0) pic->data[0][line * pic->linesize[0] + pixel_ptr] = stream_byte >> 4; else pic->data[0][line * pic->linesize[0] + pixel_ptr] = stream_byte & 0x0F; pixel_ptr++; } } } /* one last sanity check on the way out */ if (bytestream2_get_bytes_left(gb)) { av_log(avctx, AV_LOG_ERROR, "MS RLE: ended frame decode with %d bytes left over\n", bytestream2_get_bytes_left(gb)); return AVERROR_INVALIDDATA; } return 0; }
198
175,730
0
static int CVE_2015_3809_PATCHED_dissect_lbmr_pser(tvbuff_t * tvb, int offset, packet_info * pinfo, proto_tree * tree) { int hdr_len = 0; int len = 0; int topic_len = 0; proto_tree * flags_tree = NULL; proto_item * flags_item = NULL; int curr_offset = offset; guint16 flags = 0; hdr_len = (int)tvb_get_ntohs(tvb, curr_offset + O_LBMR_PSER_T_LEN); flags = tvb_get_ntohs(tvb, curr_offset + O_LBMR_PSER_T_FLAGS); topic_len = hdr_len - L_LBMR_PSER_T; proto_tree_add_item(tree, hf_lbmr_pser_dep_type, tvb, offset + O_LBMR_PSER_T_DEP_TYPE, L_LBMR_PSER_T_DEP_TYPE, ENC_BIG_ENDIAN); proto_tree_add_item(tree, hf_lbmr_pser_len, tvb, offset + O_LBMR_PSER_T_LEN, L_LBMR_PSER_T_LEN, ENC_BIG_ENDIAN); flags_item = proto_tree_add_none_format(tree, hf_lbmr_pser_flags, tvb, offset + O_LBMR_PSER_T_FLAGS, L_LBMR_PSER_T_FLAGS, "Flags (0x%04x)", flags); flags_tree = proto_item_add_subtree(flags_item, ett_lbmr_pser_flags); proto_tree_add_item(flags_tree, hf_lbmr_pser_flags_option, tvb, offset + O_LBMR_PSER_T_FLAGS, L_LBMR_PSER_T_FLAGS, ENC_BIG_ENDIAN); proto_tree_add_item(tree, hf_lbmr_pser_source_ip, tvb, offset + O_LBMR_PSER_T_SOURCE_IP, L_LBMR_PSER_T_SOURCE_IP, ENC_BIG_ENDIAN); proto_tree_add_item(tree, hf_lbmr_pser_store_ip, tvb, offset + O_LBMR_PSER_T_STORE_IP, L_LBMR_PSER_T_STORE_IP, ENC_BIG_ENDIAN); proto_tree_add_item(tree, hf_lbmr_pser_transport_idx, tvb, offset + O_LBMR_PSER_T_TRANSPORT_IDX, L_LBMR_PSER_T_TRANSPORT_IDX, ENC_BIG_ENDIAN); proto_tree_add_item(tree, hf_lbmr_pser_topic_idx, tvb, offset + O_LBMR_PSER_T_TOPIC_IDX, L_LBMR_PSER_T_TOPIC_IDX, ENC_BIG_ENDIAN); proto_tree_add_item(tree, hf_lbmr_pser_source_port, tvb, offset + O_LBMR_PSER_T_SOURCE_PORT, L_LBMR_PSER_T_SOURCE_PORT, ENC_BIG_ENDIAN); proto_tree_add_item(tree, hf_lbmr_pser_store_port, tvb, offset + O_LBMR_PSER_T_STORE_PORT, L_LBMR_PSER_T_STORE_PORT, ENC_BIG_ENDIAN); proto_tree_add_item(tree, hf_lbmr_pser_topic, tvb, offset + O_LBMR_PSER_T_TOPIC, topic_len, ENC_ASCII|ENC_NA); curr_offset += hdr_len; len = hdr_len; if ((flags & LBMR_PSER_OPT_FLAG) != 0) { proto_tree * opts_tree = NULL; proto_item * opts_item = NULL; proto_tree * optlen_tree = NULL; proto_tree * optlen_item = NULL; guint16 opt_len = 0; opt_len = tvb_get_ntohs(tvb, curr_offset + O_LBMR_PSER_OPTLEN_T_OPTLEN); opts_item = proto_tree_add_item(tree, hf_lbmr_pser_opts, tvb, curr_offset, -1, ENC_NA); opts_tree = proto_item_add_subtree(opts_item, ett_lbmr_pser_opts); optlen_item = proto_tree_add_item(opts_tree, hf_lbmr_pser_optlen, tvb, curr_offset, L_LBMR_PSER_OPTLEN_T, ENC_NA); optlen_tree = proto_item_add_subtree(optlen_item, ett_lbmr_pser_opt_len); proto_tree_add_item(optlen_tree, hf_lbmr_pser_optlen_type, tvb, curr_offset + O_LBMR_PSER_OPTLEN_T_TYPE, L_LBMR_PSER_OPTLEN_T_TYPE, ENC_BIG_ENDIAN); proto_tree_add_item(optlen_tree, hf_lbmr_pser_optlen_optlen, tvb, curr_offset + O_LBMR_PSER_OPTLEN_T_OPTLEN, L_LBMR_PSER_OPTLEN_T_OPTLEN, ENC_BIG_ENDIAN); proto_item_set_len(opts_item, opt_len); len += L_LBMR_PSER_OPTLEN_T; curr_offset += L_LBMR_PSER_OPTLEN_T; opt_len -= L_LBMR_PSER_OPTLEN_T; while (opt_len > 0) { proto_tree * ctxinst_tree = NULL; proto_item * ctxinst_item = NULL; guint8 opt_type = tvb_get_guint8(tvb, curr_offset + O_LBMR_PSER_OPT_HDR_T_TYPE); guint8 option_len = tvb_get_guint8(tvb, curr_offset + O_LBMR_PSER_OPT_HDR_T_LEN); switch (opt_type) { case LBMR_PSER_OPT_SRC_CTXINST_TYPE: case LBMR_PSER_OPT_STORE_CTXINST_TYPE: ctxinst_item = proto_tree_add_item(opts_tree, hf_lbmr_pser_opt_ctxinst, tvb, curr_offset, L_LBMR_PSER_OPT_CTXINST_T, ENC_NA); ctxinst_tree = proto_item_add_subtree(ctxinst_item, ett_lbmr_pser_opt_ctxinst); proto_tree_add_item(ctxinst_tree, hf_lbmr_pser_opt_ctxinst_len, tvb, curr_offset + O_LBMR_PSER_OPT_CTXINST_T_LEN, L_LBMR_PSER_OPT_CTXINST_T_LEN, ENC_BIG_ENDIAN); proto_tree_add_item(ctxinst_tree, hf_lbmr_pser_opt_ctxinst_type, tvb, curr_offset + O_LBMR_PSER_OPT_CTXINST_T_TYPE, L_LBMR_PSER_OPT_CTXINST_T_TYPE, ENC_BIG_ENDIAN); proto_tree_add_item(ctxinst_tree, hf_lbmr_pser_opt_ctxinst_ctxinst, tvb, curr_offset + O_LBMR_PSER_OPT_CTXINST_T_CTXINST, L_LBMR_PSER_OPT_CTXINST_T_CTXINST, ENC_NA); len += L_LBMR_PSER_OPT_CTXINST_T; curr_offset += L_LBMR_PSER_OPT_CTXINST_T; opt_len -= L_LBMR_PSER_OPT_CTXINST_T; break; default: len += option_len; curr_offset += option_len; opt_len -= option_len; expert_add_info_format(pinfo, NULL, &ei_lbmr_analysis_invalid_value, "Unknown LBMR PSER option 0x%02x", opt_type); if (option_len == 0) { return (len); } break; } } } return (len); }
199