id
stringlengths
25
30
content
stringlengths
14
942k
max_stars_repo_path
stringlengths
49
55
crossvul-cpp_data_good_3351_7
/* * XDR support for nfsd * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #include "vfs.h" #include "xdr.h" #include "auth.h" #define NFSDDBG_FACILITY NFSDDBG_XDR /* * Mapping of S_IF* types to NFS file types */ static u32 nfs_ftypes[] = { NFNON, NFCHR, NFCHR, NFBAD, NFDIR, NFBAD, NFBLK, NFBAD, NFREG, NFBAD, NFLNK, NFBAD, NFSOCK, NFBAD, NFLNK, NFBAD, }; /* * XDR functions for basic NFS types */ static __be32 * decode_fh(__be32 *p, struct svc_fh *fhp) { fh_init(fhp, NFS_FHSIZE); memcpy(&fhp->fh_handle.fh_base, p, NFS_FHSIZE); fhp->fh_handle.fh_size = NFS_FHSIZE; /* FIXME: Look up export pointer here and verify * Sun Secure RPC if requested */ return p + (NFS_FHSIZE >> 2); } /* Helper function for NFSv2 ACL code */ __be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp) { return decode_fh(p, fhp); } static __be32 * encode_fh(__be32 *p, struct svc_fh *fhp) { memcpy(p, &fhp->fh_handle.fh_base, NFS_FHSIZE); return p + (NFS_FHSIZE>> 2); } /* * Decode a file name and make sure that the path contains * no slashes or null bytes. */ static __be32 * decode_filename(__be32 *p, char **namp, unsigned int *lenp) { char *name; unsigned int i; if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS_MAXNAMLEN)) != NULL) { for (i = 0, name = *namp; i < *lenp; i++, name++) { if (*name == '\0' || *name == '/') return NULL; } } return p; } static __be32 * decode_pathname(__be32 *p, char **namp, unsigned int *lenp) { char *name; unsigned int i; if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS_MAXPATHLEN)) != NULL) { for (i = 0, name = *namp; i < *lenp; i++, name++) { if (*name == '\0') return NULL; } } return p; } static __be32 * decode_sattr(__be32 *p, struct iattr *iap) { u32 tmp, tmp1; iap->ia_valid = 0; /* Sun client bug compatibility check: some sun clients seem to * put 0xffff in the mode field when they mean 0xffffffff. * Quoting the 4.4BSD nfs server code: Nah nah nah nah na nah. */ if ((tmp = ntohl(*p++)) != (u32)-1 && tmp != 0xffff) { iap->ia_valid |= ATTR_MODE; iap->ia_mode = tmp; } if ((tmp = ntohl(*p++)) != (u32)-1) { iap->ia_uid = make_kuid(&init_user_ns, tmp); if (uid_valid(iap->ia_uid)) iap->ia_valid |= ATTR_UID; } if ((tmp = ntohl(*p++)) != (u32)-1) { iap->ia_gid = make_kgid(&init_user_ns, tmp); if (gid_valid(iap->ia_gid)) iap->ia_valid |= ATTR_GID; } if ((tmp = ntohl(*p++)) != (u32)-1) { iap->ia_valid |= ATTR_SIZE; iap->ia_size = tmp; } tmp = ntohl(*p++); tmp1 = ntohl(*p++); if (tmp != (u32)-1 && tmp1 != (u32)-1) { iap->ia_valid |= ATTR_ATIME | ATTR_ATIME_SET; iap->ia_atime.tv_sec = tmp; iap->ia_atime.tv_nsec = tmp1 * 1000; } tmp = ntohl(*p++); tmp1 = ntohl(*p++); if (tmp != (u32)-1 && tmp1 != (u32)-1) { iap->ia_valid |= ATTR_MTIME | ATTR_MTIME_SET; iap->ia_mtime.tv_sec = tmp; iap->ia_mtime.tv_nsec = tmp1 * 1000; /* * Passing the invalid value useconds=1000000 for mtime * is a Sun convention for "set both mtime and atime to * current server time". It's needed to make permissions * checks for the "touch" program across v2 mounts to * Solaris and Irix boxes work correctly. See description of * sattr in section 6.1 of "NFS Illustrated" by * Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5 */ if (tmp1 == 1000000) iap->ia_valid &= ~(ATTR_ATIME_SET|ATTR_MTIME_SET); } return p; } static __be32 * encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat) { struct dentry *dentry = fhp->fh_dentry; int type; struct timespec time; u32 f; type = (stat->mode & S_IFMT); *p++ = htonl(nfs_ftypes[type >> 12]); *p++ = htonl((u32) stat->mode); *p++ = htonl((u32) stat->nlink); *p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid)); *p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid)); if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) { *p++ = htonl(NFS_MAXPATHLEN); } else { *p++ = htonl((u32) stat->size); } *p++ = htonl((u32) stat->blksize); if (S_ISCHR(type) || S_ISBLK(type)) *p++ = htonl(new_encode_dev(stat->rdev)); else *p++ = htonl(0xffffffff); *p++ = htonl((u32) stat->blocks); switch (fsid_source(fhp)) { default: case FSIDSOURCE_DEV: *p++ = htonl(new_encode_dev(stat->dev)); break; case FSIDSOURCE_FSID: *p++ = htonl((u32) fhp->fh_export->ex_fsid); break; case FSIDSOURCE_UUID: f = ((u32*)fhp->fh_export->ex_uuid)[0]; f ^= ((u32*)fhp->fh_export->ex_uuid)[1]; f ^= ((u32*)fhp->fh_export->ex_uuid)[2]; f ^= ((u32*)fhp->fh_export->ex_uuid)[3]; *p++ = htonl(f); break; } *p++ = htonl((u32) stat->ino); *p++ = htonl((u32) stat->atime.tv_sec); *p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0); lease_get_mtime(d_inode(dentry), &time); *p++ = htonl((u32) time.tv_sec); *p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0); *p++ = htonl((u32) stat->ctime.tv_sec); *p++ = htonl(stat->ctime.tv_nsec ? stat->ctime.tv_nsec / 1000 : 0); return p; } /* Helper function for NFSv2 ACL code */ __be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat) { return encode_fattr(rqstp, p, fhp, stat); } /* * XDR decode functions */ int nfssvc_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_argsize_check(rqstp, p); } int nfssvc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *args) { p = decode_fh(p, &args->fh); if (!p) return 0; return xdr_argsize_check(rqstp, p); } int nfssvc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_sattrargs *args) { p = decode_fh(p, &args->fh); if (!p) return 0; p = decode_sattr(p, &args->attrs); return xdr_argsize_check(rqstp, p); } int nfssvc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_diropargs *args) { if (!(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; return xdr_argsize_check(rqstp, p); } int nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readargs *args) { unsigned int len; int v; p = decode_fh(p, &args->fh); if (!p) return 0; args->offset = ntohl(*p++); len = args->count = ntohl(*p++); p++; /* totalcount - unused */ if (!xdr_argsize_check(rqstp, p)) return 0; len = min_t(unsigned int, len, NFSSVC_MAXBLKSIZE_V2); /* set up somewhere to store response. * We take pages, put them on reslist and include in iovec */ v=0; while (len > 0) { struct page *p = *(rqstp->rq_next_page++); rqstp->rq_vec[v].iov_base = page_address(p); rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE); len -= rqstp->rq_vec[v].iov_len; v++; } args->vlen = v; return 1; } int nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_writeargs *args) { unsigned int len, hdr, dlen; struct kvec *head = rqstp->rq_arg.head; int v; p = decode_fh(p, &args->fh); if (!p) return 0; p++; /* beginoffset */ args->offset = ntohl(*p++); /* offset */ p++; /* totalcount */ len = args->len = ntohl(*p++); /* * The protocol specifies a maximum of 8192 bytes. */ if (len > NFSSVC_MAXBLKSIZE_V2) return 0; /* * Check to make sure that we got the right number of * bytes. */ hdr = (void*)p - head->iov_base; if (hdr > head->iov_len) return 0; dlen = head->iov_len + rqstp->rq_arg.page_len - hdr; /* * Round the length of the data which was specified up to * the next multiple of XDR units and then compare that * against the length which was actually received. * Note that when RPCSEC/GSS (for example) is used, the * data buffer can be padded so dlen might be larger * than required. It must never be smaller. */ if (dlen < XDR_QUADLEN(len)*4) return 0; rqstp->rq_vec[0].iov_base = (void*)p; rqstp->rq_vec[0].iov_len = head->iov_len - hdr; v = 0; while (len > rqstp->rq_vec[v].iov_len) { len -= rqstp->rq_vec[v].iov_len; v++; rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_pages[v]); rqstp->rq_vec[v].iov_len = PAGE_SIZE; } rqstp->rq_vec[v].iov_len = len; args->vlen = v + 1; return 1; } int nfssvc_decode_createargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_createargs *args) { if ( !(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; p = decode_sattr(p, &args->attrs); return xdr_argsize_check(rqstp, p); } int nfssvc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_renameargs *args) { if (!(p = decode_fh(p, &args->ffh)) || !(p = decode_filename(p, &args->fname, &args->flen)) || !(p = decode_fh(p, &args->tfh)) || !(p = decode_filename(p, &args->tname, &args->tlen))) return 0; return xdr_argsize_check(rqstp, p); } int nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readlinkargs *args) { p = decode_fh(p, &args->fh); if (!p) return 0; if (!xdr_argsize_check(rqstp, p)) return 0; args->buffer = page_address(*(rqstp->rq_next_page++)); return 1; } int nfssvc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_linkargs *args) { if (!(p = decode_fh(p, &args->ffh)) || !(p = decode_fh(p, &args->tfh)) || !(p = decode_filename(p, &args->tname, &args->tlen))) return 0; return xdr_argsize_check(rqstp, p); } int nfssvc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_symlinkargs *args) { if ( !(p = decode_fh(p, &args->ffh)) || !(p = decode_filename(p, &args->fname, &args->flen)) || !(p = decode_pathname(p, &args->tname, &args->tlen))) return 0; p = decode_sattr(p, &args->attrs); return xdr_argsize_check(rqstp, p); } int nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readdirargs *args) { p = decode_fh(p, &args->fh); if (!p) return 0; args->cookie = ntohl(*p++); args->count = ntohl(*p++); args->count = min_t(u32, args->count, PAGE_SIZE); if (!xdr_argsize_check(rqstp, p)) return 0; args->buffer = page_address(*(rqstp->rq_next_page++)); return 1; } /* * XDR encode functions */ int nfssvc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_ressize_check(rqstp, p); } int nfssvc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p, struct nfsd_attrstat *resp) { p = encode_fattr(rqstp, p, &resp->fh, &resp->stat); return xdr_ressize_check(rqstp, p); } int nfssvc_encode_diropres(struct svc_rqst *rqstp, __be32 *p, struct nfsd_diropres *resp) { p = encode_fh(p, &resp->fh); p = encode_fattr(rqstp, p, &resp->fh, &resp->stat); return xdr_ressize_check(rqstp, p); } int nfssvc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readlinkres *resp) { *p++ = htonl(resp->len); xdr_ressize_check(rqstp, p); rqstp->rq_res.page_len = resp->len; if (resp->len & 3) { /* need to pad the tail */ rqstp->rq_res.tail[0].iov_base = p; *p = 0; rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3); } return 1; } int nfssvc_encode_readres(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readres *resp) { p = encode_fattr(rqstp, p, &resp->fh, &resp->stat); *p++ = htonl(resp->count); xdr_ressize_check(rqstp, p); /* now update rqstp->rq_res to reflect data as well */ rqstp->rq_res.page_len = resp->count; if (resp->count & 3) { /* need to pad the tail */ rqstp->rq_res.tail[0].iov_base = p; *p = 0; rqstp->rq_res.tail[0].iov_len = 4 - (resp->count&3); } return 1; } int nfssvc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readdirres *resp) { xdr_ressize_check(rqstp, p); p = resp->buffer; *p++ = 0; /* no more entries */ *p++ = htonl((resp->common.err == nfserr_eof)); rqstp->rq_res.page_len = (((unsigned long)p-1) & ~PAGE_MASK)+1; return 1; } int nfssvc_encode_statfsres(struct svc_rqst *rqstp, __be32 *p, struct nfsd_statfsres *resp) { struct kstatfs *stat = &resp->stats; *p++ = htonl(NFSSVC_MAXBLKSIZE_V2); /* max transfer size */ *p++ = htonl(stat->f_bsize); *p++ = htonl(stat->f_blocks); *p++ = htonl(stat->f_bfree); *p++ = htonl(stat->f_bavail); return xdr_ressize_check(rqstp, p); } int nfssvc_encode_entry(void *ccdv, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct readdir_cd *ccd = ccdv; struct nfsd_readdirres *cd = container_of(ccd, struct nfsd_readdirres, common); __be32 *p = cd->buffer; int buflen, slen; /* dprintk("nfsd: entry(%.*s off %ld ino %ld)\n", namlen, name, offset, ino); */ if (offset > ~((u32) 0)) { cd->common.err = nfserr_fbig; return -EINVAL; } if (cd->offset) *cd->offset = htonl(offset); /* truncate filename */ namlen = min(namlen, NFS2_MAXNAMLEN); slen = XDR_QUADLEN(namlen); if ((buflen = cd->buflen - slen - 4) < 0) { cd->common.err = nfserr_toosmall; return -EINVAL; } if (ino > ~((u32) 0)) { cd->common.err = nfserr_fbig; return -EINVAL; } *p++ = xdr_one; /* mark entry present */ *p++ = htonl((u32) ino); /* file id */ p = xdr_encode_array(p, name, namlen);/* name length & name */ cd->offset = p; /* remember pointer */ *p++ = htonl(~0U); /* offset of next entry */ cd->buflen = buflen; cd->buffer = p; cd->common.err = nfs_ok; return 0; } /* * XDR release functions */ int nfssvc_release_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *resp) { fh_put(&resp->fh); return 1; }
./CrossVul/dataset_final_sorted/CWE-404/c/good_3351_7
crossvul-cpp_data_good_3351_2
/* * linux/fs/nfs/callback.c * * Copyright (C) 2004 Trond Myklebust * * NFSv4 callback handling */ #include <linux/completion.h> #include <linux/ip.h> #include <linux/module.h> #include <linux/sched/signal.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/svcsock.h> #include <linux/nfs_fs.h> #include <linux/errno.h> #include <linux/mutex.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/sunrpc/svcauth_gss.h> #include <linux/sunrpc/bc_xprt.h> #include <net/inet_sock.h> #include "nfs4_fs.h" #include "callback.h" #include "internal.h" #include "netns.h" #define NFSDBG_FACILITY NFSDBG_CALLBACK struct nfs_callback_data { unsigned int users; struct svc_serv *serv; }; static struct nfs_callback_data nfs_callback_info[NFS4_MAX_MINOR_VERSION + 1]; static DEFINE_MUTEX(nfs_callback_mutex); static struct svc_program nfs4_callback_program; static int nfs4_callback_up_net(struct svc_serv *serv, struct net *net) { int ret; struct nfs_net *nn = net_generic(net, nfs_net_id); ret = svc_create_xprt(serv, "tcp", net, PF_INET, nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); if (ret <= 0) goto out_err; nn->nfs_callback_tcpport = ret; dprintk("NFS: Callback listener port = %u (af %u, net %p)\n", nn->nfs_callback_tcpport, PF_INET, net); ret = svc_create_xprt(serv, "tcp", net, PF_INET6, nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); if (ret > 0) { nn->nfs_callback_tcpport6 = ret; dprintk("NFS: Callback listener port = %u (af %u, net %p)\n", nn->nfs_callback_tcpport6, PF_INET6, net); } else if (ret != -EAFNOSUPPORT) goto out_err; return 0; out_err: return (ret) ? ret : -ENOMEM; } /* * This is the NFSv4 callback kernel thread. */ static int nfs4_callback_svc(void *vrqstp) { int err; struct svc_rqst *rqstp = vrqstp; set_freezable(); while (!kthread_freezable_should_stop(NULL)) { if (signal_pending(current)) flush_signals(current); /* * Listen for a request on the socket */ err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT); if (err == -EAGAIN || err == -EINTR) continue; svc_process(rqstp); } svc_exit_thread(rqstp); module_put_and_exit(0); return 0; } #if defined(CONFIG_NFS_V4_1) /* * The callback service for NFSv4.1 callbacks */ static int nfs41_callback_svc(void *vrqstp) { struct svc_rqst *rqstp = vrqstp; struct svc_serv *serv = rqstp->rq_server; struct rpc_rqst *req; int error; DEFINE_WAIT(wq); set_freezable(); while (!kthread_freezable_should_stop(NULL)) { if (signal_pending(current)) flush_signals(current); prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); spin_lock_bh(&serv->sv_cb_lock); if (!list_empty(&serv->sv_cb_list)) { req = list_first_entry(&serv->sv_cb_list, struct rpc_rqst, rq_bc_list); list_del(&req->rq_bc_list); spin_unlock_bh(&serv->sv_cb_lock); finish_wait(&serv->sv_cb_waitq, &wq); dprintk("Invoking bc_svc_process()\n"); error = bc_svc_process(serv, req, rqstp); dprintk("bc_svc_process() returned w/ error code= %d\n", error); } else { spin_unlock_bh(&serv->sv_cb_lock); if (!kthread_should_stop()) schedule(); finish_wait(&serv->sv_cb_waitq, &wq); } } svc_exit_thread(rqstp); module_put_and_exit(0); return 0; } static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt, struct svc_serv *serv) { if (minorversion) /* * Save the svc_serv in the transport so that it can * be referenced when the session backchannel is initialized */ xprt->bc_serv = serv; } #else static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt, struct svc_serv *serv) { } #endif /* CONFIG_NFS_V4_1 */ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt, struct svc_serv *serv) { int nrservs = nfs_callback_nr_threads; int ret; nfs_callback_bc_serv(minorversion, xprt, serv); if (nrservs < NFS4_MIN_NR_CALLBACK_THREADS) nrservs = NFS4_MIN_NR_CALLBACK_THREADS; if (serv->sv_nrthreads-1 == nrservs) return 0; ret = serv->sv_ops->svo_setup(serv, NULL, nrservs); if (ret) { serv->sv_ops->svo_setup(serv, NULL, 0); return ret; } dprintk("nfs_callback_up: service started\n"); return 0; } static void nfs_callback_down_net(u32 minorversion, struct svc_serv *serv, struct net *net) { struct nfs_net *nn = net_generic(net, nfs_net_id); if (--nn->cb_users[minorversion]) return; dprintk("NFS: destroy per-net callback data; net=%p\n", net); svc_shutdown_net(serv, net); } static int nfs_callback_up_net(int minorversion, struct svc_serv *serv, struct net *net, struct rpc_xprt *xprt) { struct nfs_net *nn = net_generic(net, nfs_net_id); int ret; if (nn->cb_users[minorversion]++) return 0; dprintk("NFS: create per-net callback data; net=%p\n", net); ret = svc_bind(serv, net); if (ret < 0) { printk(KERN_WARNING "NFS: bind callback service failed\n"); goto err_bind; } ret = -EPROTONOSUPPORT; if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0) ret = nfs4_callback_up_net(serv, net); else if (xprt->ops->bc_up) ret = xprt->ops->bc_up(serv, net); if (ret < 0) { printk(KERN_ERR "NFS: callback service start failed\n"); goto err_socks; } return 0; err_socks: svc_rpcb_cleanup(serv, net); err_bind: nn->cb_users[minorversion]--; dprintk("NFS: Couldn't create callback socket: err = %d; " "net = %p\n", ret, net); return ret; } static struct svc_serv_ops nfs40_cb_sv_ops = { .svo_function = nfs4_callback_svc, .svo_enqueue_xprt = svc_xprt_do_enqueue, .svo_setup = svc_set_num_threads_sync, .svo_module = THIS_MODULE, }; #if defined(CONFIG_NFS_V4_1) static struct svc_serv_ops nfs41_cb_sv_ops = { .svo_function = nfs41_callback_svc, .svo_enqueue_xprt = svc_xprt_do_enqueue, .svo_setup = svc_set_num_threads_sync, .svo_module = THIS_MODULE, }; static struct svc_serv_ops *nfs4_cb_sv_ops[] = { [0] = &nfs40_cb_sv_ops, [1] = &nfs41_cb_sv_ops, }; #else static struct svc_serv_ops *nfs4_cb_sv_ops[] = { [0] = &nfs40_cb_sv_ops, [1] = NULL, }; #endif static struct svc_serv *nfs_callback_create_svc(int minorversion) { struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion]; struct svc_serv *serv; struct svc_serv_ops *sv_ops; /* * Check whether we're already up and running. */ if (cb_info->serv) { /* * Note: increase service usage, because later in case of error * svc_destroy() will be called. */ svc_get(cb_info->serv); return cb_info->serv; } switch (minorversion) { case 0: sv_ops = nfs4_cb_sv_ops[0]; break; default: sv_ops = nfs4_cb_sv_ops[1]; } if (sv_ops == NULL) return ERR_PTR(-ENOTSUPP); /* * Sanity check: if there's no task, * we should be the first user ... */ if (cb_info->users) printk(KERN_WARNING "nfs_callback_create_svc: no kthread, %d users??\n", cb_info->users); serv = svc_create_pooled(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops); if (!serv) { printk(KERN_ERR "nfs_callback_create_svc: create service failed\n"); return ERR_PTR(-ENOMEM); } cb_info->serv = serv; /* As there is only one thread we need to over-ride the * default maximum of 80 connections */ serv->sv_maxconn = 1024; dprintk("nfs_callback_create_svc: service created\n"); return serv; } /* * Bring up the callback thread if it is not already up. */ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt) { struct svc_serv *serv; struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion]; int ret; struct net *net = xprt->xprt_net; mutex_lock(&nfs_callback_mutex); serv = nfs_callback_create_svc(minorversion); if (IS_ERR(serv)) { ret = PTR_ERR(serv); goto err_create; } ret = nfs_callback_up_net(minorversion, serv, net, xprt); if (ret < 0) goto err_net; ret = nfs_callback_start_svc(minorversion, xprt, serv); if (ret < 0) goto err_start; cb_info->users++; /* * svc_create creates the svc_serv with sv_nrthreads == 1, and then * svc_prepare_thread increments that. So we need to call svc_destroy * on both success and failure so that the refcount is 1 when the * thread exits. */ err_net: if (!cb_info->users) cb_info->serv = NULL; svc_destroy(serv); err_create: mutex_unlock(&nfs_callback_mutex); return ret; err_start: nfs_callback_down_net(minorversion, serv, net); dprintk("NFS: Couldn't create server thread; err = %d\n", ret); goto err_net; } /* * Kill the callback thread if it's no longer being used. */ void nfs_callback_down(int minorversion, struct net *net) { struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion]; struct svc_serv *serv; mutex_lock(&nfs_callback_mutex); serv = cb_info->serv; nfs_callback_down_net(minorversion, serv, net); cb_info->users--; if (cb_info->users == 0) { svc_get(serv); serv->sv_ops->svo_setup(serv, NULL, 0); svc_destroy(serv); dprintk("nfs_callback_down: service destroyed\n"); cb_info->serv = NULL; } mutex_unlock(&nfs_callback_mutex); } /* Boolean check of RPC_AUTH_GSS principal */ int check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp) { char *p = rqstp->rq_cred.cr_principal; if (rqstp->rq_authop->flavour != RPC_AUTH_GSS) return 1; /* No RPC_AUTH_GSS on NFSv4.1 back channel yet */ if (clp->cl_minorversion != 0) return 0; /* * It might just be a normal user principal, in which case * userspace won't bother to tell us the name at all. */ if (p == NULL) return 0; /* * Did we get the acceptor from userland during the SETCLIENID * negotiation? */ if (clp->cl_acceptor) return !strcmp(p, clp->cl_acceptor); /* * Otherwise try to verify it using the cl_hostname. Note that this * doesn't work if a non-canonical hostname was used in the devname. */ /* Expect a GSS_C_NT_HOSTBASED_NAME like "nfs@serverhostname" */ if (memcmp(p, "nfs@", 4) != 0) return 0; p += 4; if (strcmp(p, clp->cl_hostname) != 0) return 0; return 1; } /* * pg_authenticate method for nfsv4 callback threads. * * The authflavor has been negotiated, so an incorrect flavor is a server * bug. Deny packets with incorrect authflavor. * * All other checking done after NFS decoding where the nfs_client can be * found in nfs4_callback_compound */ static int nfs_callback_authenticate(struct svc_rqst *rqstp) { switch (rqstp->rq_authop->flavour) { case RPC_AUTH_NULL: if (rqstp->rq_proc != CB_NULL) return SVC_DENIED; break; case RPC_AUTH_GSS: /* No RPC_AUTH_GSS support yet in NFSv4.1 */ if (svc_is_backchannel(rqstp)) return SVC_DENIED; } return SVC_OK; } /* * Define NFS4 callback program */ static struct svc_version *nfs4_callback_version[] = { [1] = &nfs4_callback_version1, [4] = &nfs4_callback_version4, }; static struct svc_stat nfs4_callback_stats; static struct svc_program nfs4_callback_program = { .pg_prog = NFS4_CALLBACK, /* RPC service number */ .pg_nvers = ARRAY_SIZE(nfs4_callback_version), /* Number of entries */ .pg_vers = nfs4_callback_version, /* version table */ .pg_name = "NFSv4 callback", /* service name */ .pg_class = "nfs", /* authentication class */ .pg_stats = &nfs4_callback_stats, .pg_authenticate = nfs_callback_authenticate, };
./CrossVul/dataset_final_sorted/CWE-404/c/good_3351_2
crossvul-cpp_data_bad_3351_8
/* * File operations used by nfsd. Some of these have been ripped from * other parts of the kernel because they weren't exported, others * are partial duplicates with added or changed functionality. * * Note that several functions dget() the dentry upon which they want * to act, most notably those that create directory entries. Response * dentry's are dput()'d if necessary in the release callback. * So if you notice code paths that apparently fail to dput() the * dentry, don't worry--they have been taken care of. * * Copyright (C) 1995-1999 Olaf Kirch <okir@monad.swb.de> * Zerocpy NFS support (C) 2002 Hirokazu Takahashi <taka@valinux.co.jp> */ #include <linux/fs.h> #include <linux/file.h> #include <linux/splice.h> #include <linux/falloc.h> #include <linux/fcntl.h> #include <linux/namei.h> #include <linux/delay.h> #include <linux/fsnotify.h> #include <linux/posix_acl_xattr.h> #include <linux/xattr.h> #include <linux/jhash.h> #include <linux/ima.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/exportfs.h> #include <linux/writeback.h> #include <linux/security.h> #ifdef CONFIG_NFSD_V3 #include "xdr3.h" #endif /* CONFIG_NFSD_V3 */ #ifdef CONFIG_NFSD_V4 #include "../internal.h" #include "acl.h" #include "idmap.h" #endif /* CONFIG_NFSD_V4 */ #include "nfsd.h" #include "vfs.h" #include "trace.h" #define NFSDDBG_FACILITY NFSDDBG_FILEOP /* * This is a cache of readahead params that help us choose the proper * readahead strategy. Initially, we set all readahead parameters to 0 * and let the VFS handle things. * If you increase the number of cached files very much, you'll need to * add a hash table here. */ struct raparms { struct raparms *p_next; unsigned int p_count; ino_t p_ino; dev_t p_dev; int p_set; struct file_ra_state p_ra; unsigned int p_hindex; }; struct raparm_hbucket { struct raparms *pb_head; spinlock_t pb_lock; } ____cacheline_aligned_in_smp; #define RAPARM_HASH_BITS 4 #define RAPARM_HASH_SIZE (1<<RAPARM_HASH_BITS) #define RAPARM_HASH_MASK (RAPARM_HASH_SIZE-1) static struct raparm_hbucket raparm_hash[RAPARM_HASH_SIZE]; /* * Called from nfsd_lookup and encode_dirent. Check if we have crossed * a mount point. * Returns -EAGAIN or -ETIMEDOUT leaving *dpp and *expp unchanged, * or nfs_ok having possibly changed *dpp and *expp */ int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp, struct svc_export **expp) { struct svc_export *exp = *expp, *exp2 = NULL; struct dentry *dentry = *dpp; struct path path = {.mnt = mntget(exp->ex_path.mnt), .dentry = dget(dentry)}; int err = 0; err = follow_down(&path); if (err < 0) goto out; exp2 = rqst_exp_get_by_name(rqstp, &path); if (IS_ERR(exp2)) { err = PTR_ERR(exp2); /* * We normally allow NFS clients to continue * "underneath" a mountpoint that is not exported. * The exception is V4ROOT, where no traversal is ever * allowed without an explicit export of the new * directory. */ if (err == -ENOENT && !(exp->ex_flags & NFSEXP_V4ROOT)) err = 0; path_put(&path); goto out; } if (nfsd_v4client(rqstp) || (exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2)) { /* successfully crossed mount point */ /* * This is subtle: path.dentry is *not* on path.mnt * at this point. The only reason we are safe is that * original mnt is pinned down by exp, so we should * put path *before* putting exp */ *dpp = path.dentry; path.dentry = dentry; *expp = exp2; exp2 = exp; } path_put(&path); exp_put(exp2); out: return err; } static void follow_to_parent(struct path *path) { struct dentry *dp; while (path->dentry == path->mnt->mnt_root && follow_up(path)) ; dp = dget_parent(path->dentry); dput(path->dentry); path->dentry = dp; } static int nfsd_lookup_parent(struct svc_rqst *rqstp, struct dentry *dparent, struct svc_export **exp, struct dentry **dentryp) { struct svc_export *exp2; struct path path = {.mnt = mntget((*exp)->ex_path.mnt), .dentry = dget(dparent)}; follow_to_parent(&path); exp2 = rqst_exp_parent(rqstp, &path); if (PTR_ERR(exp2) == -ENOENT) { *dentryp = dget(dparent); } else if (IS_ERR(exp2)) { path_put(&path); return PTR_ERR(exp2); } else { *dentryp = dget(path.dentry); exp_put(*exp); *exp = exp2; } path_put(&path); return 0; } /* * For nfsd purposes, we treat V4ROOT exports as though there was an * export at *every* directory. */ int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp) { if (d_mountpoint(dentry)) return 1; if (nfsd4_is_junction(dentry)) return 1; if (!(exp->ex_flags & NFSEXP_V4ROOT)) return 0; return d_inode(dentry) != NULL; } __be32 nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name, unsigned int len, struct svc_export **exp_ret, struct dentry **dentry_ret) { struct svc_export *exp; struct dentry *dparent; struct dentry *dentry; int host_err; dprintk("nfsd: nfsd_lookup(fh %s, %.*s)\n", SVCFH_fmt(fhp), len,name); dparent = fhp->fh_dentry; exp = exp_get(fhp->fh_export); /* Lookup the name, but don't follow links */ if (isdotent(name, len)) { if (len==1) dentry = dget(dparent); else if (dparent != exp->ex_path.dentry) dentry = dget_parent(dparent); else if (!EX_NOHIDE(exp) && !nfsd_v4client(rqstp)) dentry = dget(dparent); /* .. == . just like at / */ else { /* checking mountpoint crossing is very different when stepping up */ host_err = nfsd_lookup_parent(rqstp, dparent, &exp, &dentry); if (host_err) goto out_nfserr; } } else { /* * In the nfsd4_open() case, this may be held across * subsequent open and delegation acquisition which may * need to take the child's i_mutex: */ fh_lock_nested(fhp, I_MUTEX_PARENT); dentry = lookup_one_len(name, dparent, len); host_err = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_nfserr; if (nfsd_mountpoint(dentry, exp)) { /* * We don't need the i_mutex after all. It's * still possible we could open this (regular * files can be mountpoints too), but the * i_mutex is just there to prevent renames of * something that we might be about to delegate, * and a mountpoint won't be renamed: */ fh_unlock(fhp); if ((host_err = nfsd_cross_mnt(rqstp, &dentry, &exp))) { dput(dentry); goto out_nfserr; } } } *dentry_ret = dentry; *exp_ret = exp; return 0; out_nfserr: exp_put(exp); return nfserrno(host_err); } /* * Look up one component of a pathname. * N.B. After this call _both_ fhp and resfh need an fh_put * * If the lookup would cross a mountpoint, and the mounted filesystem * is exported to the client with NFSEXP_NOHIDE, then the lookup is * accepted as it stands and the mounted directory is * returned. Otherwise the covered directory is returned. * NOTE: this mountpoint crossing is not supported properly by all * clients and is explicitly disallowed for NFSv3 * NeilBrown <neilb@cse.unsw.edu.au> */ __be32 nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name, unsigned int len, struct svc_fh *resfh) { struct svc_export *exp; struct dentry *dentry; __be32 err; err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC); if (err) return err; err = nfsd_lookup_dentry(rqstp, fhp, name, len, &exp, &dentry); if (err) return err; err = check_nfsd_access(exp, rqstp); if (err) goto out; /* * Note: we compose the file handle now, but as the * dentry may be negative, it may need to be updated. */ err = fh_compose(resfh, exp, dentry, fhp); if (!err && d_really_is_negative(dentry)) err = nfserr_noent; out: dput(dentry); exp_put(exp); return err; } /* * Commit metadata changes to stable storage. */ static int commit_metadata(struct svc_fh *fhp) { struct inode *inode = d_inode(fhp->fh_dentry); const struct export_operations *export_ops = inode->i_sb->s_export_op; if (!EX_ISSYNC(fhp->fh_export)) return 0; if (export_ops->commit_metadata) return export_ops->commit_metadata(inode); return sync_inode_metadata(inode, 1); } /* * Go over the attributes and take care of the small differences between * NFS semantics and what Linux expects. */ static void nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap) { /* sanitize the mode change */ if (iap->ia_valid & ATTR_MODE) { iap->ia_mode &= S_IALLUGO; iap->ia_mode |= (inode->i_mode & ~S_IALLUGO); } /* Revoke setuid/setgid on chown */ if (!S_ISDIR(inode->i_mode) && ((iap->ia_valid & ATTR_UID) || (iap->ia_valid & ATTR_GID))) { iap->ia_valid |= ATTR_KILL_PRIV; if (iap->ia_valid & ATTR_MODE) { /* we're setting mode too, just clear the s*id bits */ iap->ia_mode &= ~S_ISUID; if (iap->ia_mode & S_IXGRP) iap->ia_mode &= ~S_ISGID; } else { /* set ATTR_KILL_* bits and let VFS handle it */ iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID); } } } static __be32 nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap) { struct inode *inode = d_inode(fhp->fh_dentry); int host_err; if (iap->ia_size < inode->i_size) { __be32 err; err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry, NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE); if (err) return err; } host_err = get_write_access(inode); if (host_err) goto out_nfserrno; host_err = locks_verify_truncate(inode, NULL, iap->ia_size); if (host_err) goto out_put_write_access; return 0; out_put_write_access: put_write_access(inode); out_nfserrno: return nfserrno(host_err); } /* * Set various file attributes. After this call fhp needs an fh_put. */ __be32 nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, int check_guard, time_t guardtime) { struct dentry *dentry; struct inode *inode; int accmode = NFSD_MAY_SATTR; umode_t ftype = 0; __be32 err; int host_err; bool get_write_count; bool size_change = (iap->ia_valid & ATTR_SIZE); if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE)) accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE; if (iap->ia_valid & ATTR_SIZE) ftype = S_IFREG; /* Callers that do fh_verify should do the fh_want_write: */ get_write_count = !fhp->fh_dentry; /* Get inode */ err = fh_verify(rqstp, fhp, ftype, accmode); if (err) return err; if (get_write_count) { host_err = fh_want_write(fhp); if (host_err) goto out; } dentry = fhp->fh_dentry; inode = d_inode(dentry); /* Ignore any mode updates on symlinks */ if (S_ISLNK(inode->i_mode)) iap->ia_valid &= ~ATTR_MODE; if (!iap->ia_valid) return 0; nfsd_sanitize_attrs(inode, iap); if (check_guard && guardtime != inode->i_ctime.tv_sec) return nfserr_notsync; /* * The size case is special, it changes the file in addition to the * attributes, and file systems don't expect it to be mixed with * "random" attribute changes. We thus split out the size change * into a separate call to ->setattr, and do the rest as a separate * setattr call. */ if (size_change) { err = nfsd_get_write_access(rqstp, fhp, iap); if (err) return err; } fh_lock(fhp); if (size_change) { /* * RFC5661, Section 18.30.4: * Changing the size of a file with SETATTR indirectly * changes the time_modify and change attributes. * * (and similar for the older RFCs) */ struct iattr size_attr = { .ia_valid = ATTR_SIZE | ATTR_CTIME | ATTR_MTIME, .ia_size = iap->ia_size, }; host_err = notify_change(dentry, &size_attr, NULL); if (host_err) goto out_unlock; iap->ia_valid &= ~ATTR_SIZE; /* * Avoid the additional setattr call below if the only other * attribute that the client sends is the mtime, as we update * it as part of the size change above. */ if ((iap->ia_valid & ~ATTR_MTIME) == 0) goto out_unlock; } iap->ia_valid |= ATTR_CTIME; host_err = notify_change(dentry, iap, NULL); out_unlock: fh_unlock(fhp); if (size_change) put_write_access(inode); out: if (!host_err) host_err = commit_metadata(fhp); return nfserrno(host_err); } #if defined(CONFIG_NFSD_V4) /* * NFS junction information is stored in an extended attribute. */ #define NFSD_JUNCTION_XATTR_NAME XATTR_TRUSTED_PREFIX "junction.nfs" /** * nfsd4_is_junction - Test if an object could be an NFS junction * * @dentry: object to test * * Returns 1 if "dentry" appears to contain NFS junction information. * Otherwise 0 is returned. */ int nfsd4_is_junction(struct dentry *dentry) { struct inode *inode = d_inode(dentry); if (inode == NULL) return 0; if (inode->i_mode & S_IXUGO) return 0; if (!(inode->i_mode & S_ISVTX)) return 0; if (vfs_getxattr(dentry, NFSD_JUNCTION_XATTR_NAME, NULL, 0) <= 0) return 0; return 1; } #ifdef CONFIG_NFSD_V4_SECURITY_LABEL __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp, struct xdr_netobj *label) { __be32 error; int host_error; struct dentry *dentry; error = fh_verify(rqstp, fhp, 0 /* S_IFREG */, NFSD_MAY_SATTR); if (error) return error; dentry = fhp->fh_dentry; inode_lock(d_inode(dentry)); host_error = security_inode_setsecctx(dentry, label->data, label->len); inode_unlock(d_inode(dentry)); return nfserrno(host_error); } #else __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp, struct xdr_netobj *label) { return nfserr_notsupp; } #endif __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst, u64 dst_pos, u64 count) { return nfserrno(do_clone_file_range(src, src_pos, dst, dst_pos, count)); } ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, u64 dst_pos, u64 count) { /* * Limit copy to 4MB to prevent indefinitely blocking an nfsd * thread and client rpc slot. The choice of 4MB is somewhat * arbitrary. We might instead base this on r/wsize, or make it * tunable, or use a time instead of a byte limit, or implement * asynchronous copy. In theory a client could also recognize a * limit like this and pipeline multiple COPY requests. */ count = min_t(u64, count, 1 << 22); return vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0); } __be32 nfsd4_vfs_fallocate(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, loff_t offset, loff_t len, int flags) { int error; if (!S_ISREG(file_inode(file)->i_mode)) return nfserr_inval; error = vfs_fallocate(file, flags, offset, len); if (!error) error = commit_metadata(fhp); return nfserrno(error); } #endif /* defined(CONFIG_NFSD_V4) */ #ifdef CONFIG_NFSD_V3 /* * Check server access rights to a file system object */ struct accessmap { u32 access; int how; }; static struct accessmap nfs3_regaccess[] = { { NFS3_ACCESS_READ, NFSD_MAY_READ }, { NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC }, { NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_TRUNC }, { NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE }, { 0, 0 } }; static struct accessmap nfs3_diraccess[] = { { NFS3_ACCESS_READ, NFSD_MAY_READ }, { NFS3_ACCESS_LOOKUP, NFSD_MAY_EXEC }, { NFS3_ACCESS_MODIFY, NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC}, { NFS3_ACCESS_EXTEND, NFSD_MAY_EXEC|NFSD_MAY_WRITE }, { NFS3_ACCESS_DELETE, NFSD_MAY_REMOVE }, { 0, 0 } }; static struct accessmap nfs3_anyaccess[] = { /* Some clients - Solaris 2.6 at least, make an access call * to the server to check for access for things like /dev/null * (which really, the server doesn't care about). So * We provide simple access checking for them, looking * mainly at mode bits, and we make sure to ignore read-only * filesystem checks */ { NFS3_ACCESS_READ, NFSD_MAY_READ }, { NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC }, { NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS }, { NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS }, { 0, 0 } }; __be32 nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *supported) { struct accessmap *map; struct svc_export *export; struct dentry *dentry; u32 query, result = 0, sresult = 0; __be32 error; error = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP); if (error) goto out; export = fhp->fh_export; dentry = fhp->fh_dentry; if (d_is_reg(dentry)) map = nfs3_regaccess; else if (d_is_dir(dentry)) map = nfs3_diraccess; else map = nfs3_anyaccess; query = *access; for (; map->access; map++) { if (map->access & query) { __be32 err2; sresult |= map->access; err2 = nfsd_permission(rqstp, export, dentry, map->how); switch (err2) { case nfs_ok: result |= map->access; break; /* the following error codes just mean the access was not allowed, * rather than an error occurred */ case nfserr_rofs: case nfserr_acces: case nfserr_perm: /* simply don't "or" in the access bit. */ break; default: error = err2; goto out; } } } *access = result; if (supported) *supported = sresult; out: return error; } #endif /* CONFIG_NFSD_V3 */ static int nfsd_open_break_lease(struct inode *inode, int access) { unsigned int mode; if (access & NFSD_MAY_NOT_BREAK_LEASE) return 0; mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY; return break_lease(inode, mode | O_NONBLOCK); } /* * Open an existing file or directory. * The may_flags argument indicates the type of open (read/write/lock) * and additional flags. * N.B. After this call fhp needs an fh_put */ __be32 nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int may_flags, struct file **filp) { struct path path; struct inode *inode; struct file *file; int flags = O_RDONLY|O_LARGEFILE; __be32 err; int host_err = 0; validate_process_creds(); /* * If we get here, then the client has already done an "open", * and (hopefully) checked permission - so allow OWNER_OVERRIDE * in case a chmod has now revoked permission. * * Arguably we should also allow the owner override for * directories, but we never have and it doesn't seem to have * caused anyone a problem. If we were to change this, note * also that our filldir callbacks would need a variant of * lookup_one_len that doesn't check permissions. */ if (type == S_IFREG) may_flags |= NFSD_MAY_OWNER_OVERRIDE; err = fh_verify(rqstp, fhp, type, may_flags); if (err) goto out; path.mnt = fhp->fh_export->ex_path.mnt; path.dentry = fhp->fh_dentry; inode = d_inode(path.dentry); /* Disallow write access to files with the append-only bit set * or any access when mandatory locking enabled */ err = nfserr_perm; if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE)) goto out; /* * We must ignore files (but only files) which might have mandatory * locks on them because there is no way to know if the accesser has * the lock. */ if (S_ISREG((inode)->i_mode) && mandatory_lock(inode)) goto out; if (!inode->i_fop) goto out; host_err = nfsd_open_break_lease(inode, may_flags); if (host_err) /* NOMEM or WOULDBLOCK */ goto out_nfserr; if (may_flags & NFSD_MAY_WRITE) { if (may_flags & NFSD_MAY_READ) flags = O_RDWR|O_LARGEFILE; else flags = O_WRONLY|O_LARGEFILE; } file = dentry_open(&path, flags, current_cred()); if (IS_ERR(file)) { host_err = PTR_ERR(file); goto out_nfserr; } host_err = ima_file_check(file, may_flags, 0); if (host_err) { fput(file); goto out_nfserr; } if (may_flags & NFSD_MAY_64BIT_COOKIE) file->f_mode |= FMODE_64BITHASH; else file->f_mode |= FMODE_32BITHASH; *filp = file; out_nfserr: err = nfserrno(host_err); out: validate_process_creds(); return err; } struct raparms * nfsd_init_raparms(struct file *file) { struct inode *inode = file_inode(file); dev_t dev = inode->i_sb->s_dev; ino_t ino = inode->i_ino; struct raparms *ra, **rap, **frap = NULL; int depth = 0; unsigned int hash; struct raparm_hbucket *rab; hash = jhash_2words(dev, ino, 0xfeedbeef) & RAPARM_HASH_MASK; rab = &raparm_hash[hash]; spin_lock(&rab->pb_lock); for (rap = &rab->pb_head; (ra = *rap); rap = &ra->p_next) { if (ra->p_ino == ino && ra->p_dev == dev) goto found; depth++; if (ra->p_count == 0) frap = rap; } depth = nfsdstats.ra_size; if (!frap) { spin_unlock(&rab->pb_lock); return NULL; } rap = frap; ra = *frap; ra->p_dev = dev; ra->p_ino = ino; ra->p_set = 0; ra->p_hindex = hash; found: if (rap != &rab->pb_head) { *rap = ra->p_next; ra->p_next = rab->pb_head; rab->pb_head = ra; } ra->p_count++; nfsdstats.ra_depth[depth*10/nfsdstats.ra_size]++; spin_unlock(&rab->pb_lock); if (ra->p_set) file->f_ra = ra->p_ra; return ra; } void nfsd_put_raparams(struct file *file, struct raparms *ra) { struct raparm_hbucket *rab = &raparm_hash[ra->p_hindex]; spin_lock(&rab->pb_lock); ra->p_ra = file->f_ra; ra->p_set = 1; ra->p_count--; spin_unlock(&rab->pb_lock); } /* * Grab and keep cached pages associated with a file in the svc_rqst * so that they can be passed to the network sendmsg/sendpage routines * directly. They will be released after the sending has completed. */ static int nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { struct svc_rqst *rqstp = sd->u.data; struct page **pp = rqstp->rq_next_page; struct page *page = buf->page; size_t size; size = sd->len; if (rqstp->rq_res.page_len == 0) { get_page(page); put_page(*rqstp->rq_next_page); *(rqstp->rq_next_page++) = page; rqstp->rq_res.page_base = buf->offset; rqstp->rq_res.page_len = size; } else if (page != pp[-1]) { get_page(page); if (*rqstp->rq_next_page) put_page(*rqstp->rq_next_page); *(rqstp->rq_next_page++) = page; rqstp->rq_res.page_len += size; } else rqstp->rq_res.page_len += size; return size; } static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd) { return __splice_from_pipe(pipe, sd, nfsd_splice_actor); } static __be32 nfsd_finish_read(struct file *file, unsigned long *count, int host_err) { if (host_err >= 0) { nfsdstats.io_read += host_err; *count = host_err; fsnotify_access(file); return 0; } else return nfserrno(host_err); } __be32 nfsd_splice_read(struct svc_rqst *rqstp, struct file *file, loff_t offset, unsigned long *count) { struct splice_desc sd = { .len = 0, .total_len = *count, .pos = offset, .u.data = rqstp, }; int host_err; rqstp->rq_next_page = rqstp->rq_respages + 1; host_err = splice_direct_to_actor(file, &sd, nfsd_direct_splice_actor); return nfsd_finish_read(file, count, host_err); } __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen, unsigned long *count) { mm_segment_t oldfs; int host_err; oldfs = get_fs(); set_fs(KERNEL_DS); host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset, 0); set_fs(oldfs); return nfsd_finish_read(file, count, host_err); } static __be32 nfsd_vfs_read(struct svc_rqst *rqstp, struct file *file, loff_t offset, struct kvec *vec, int vlen, unsigned long *count) { if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &rqstp->rq_flags)) return nfsd_splice_read(rqstp, file, offset, count); else return nfsd_readv(file, offset, vec, vlen, count); } /* * Gathered writes: If another process is currently writing to the file, * there's a high chance this is another nfsd (triggered by a bulk write * from a client's biod). Rather than syncing the file with each write * request, we sleep for 10 msec. * * I don't know if this roughly approximates C. Juszak's idea of * gathered writes, but it's a nice and simple solution (IMHO), and it * seems to work:-) * * Note: we do this only in the NFSv2 case, since v3 and higher have a * better tool (separate unstable writes and commits) for solving this * problem. */ static int wait_for_concurrent_writes(struct file *file) { struct inode *inode = file_inode(file); static ino_t last_ino; static dev_t last_dev; int err = 0; if (atomic_read(&inode->i_writecount) > 1 || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) { dprintk("nfsd: write defer %d\n", task_pid_nr(current)); msleep(10); dprintk("nfsd: write resume %d\n", task_pid_nr(current)); } if (inode->i_state & I_DIRTY) { dprintk("nfsd: write sync %d\n", task_pid_nr(current)); err = vfs_fsync(file, 0); } last_ino = inode->i_ino; last_dev = inode->i_sb->s_dev; return err; } __be32 nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, loff_t offset, struct kvec *vec, int vlen, unsigned long *cnt, int stable) { struct svc_export *exp; mm_segment_t oldfs; __be32 err = 0; int host_err; int use_wgather; loff_t pos = offset; unsigned int pflags = current->flags; int flags = 0; if (test_bit(RQ_LOCAL, &rqstp->rq_flags)) /* * We want less throttling in balance_dirty_pages() * and shrink_inactive_list() so that nfs to * localhost doesn't cause nfsd to lock up due to all * the client's dirty pages or its congested queue. */ current->flags |= PF_LESS_THROTTLE; exp = fhp->fh_export; use_wgather = (rqstp->rq_vers == 2) && EX_WGATHER(exp); if (!EX_ISSYNC(exp)) stable = NFS_UNSTABLE; if (stable && !use_wgather) flags |= RWF_SYNC; /* Write the data. */ oldfs = get_fs(); set_fs(KERNEL_DS); host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos, flags); set_fs(oldfs); if (host_err < 0) goto out_nfserr; *cnt = host_err; nfsdstats.io_write += host_err; fsnotify_modify(file); if (stable && use_wgather) host_err = wait_for_concurrent_writes(file); out_nfserr: dprintk("nfsd: write complete host_err=%d\n", host_err); if (host_err >= 0) err = 0; else err = nfserrno(host_err); if (test_bit(RQ_LOCAL, &rqstp->rq_flags)) current_restore_flags(pflags, PF_LESS_THROTTLE); return err; } /* * Read data from a file. count must contain the requested read count * on entry. On return, *count contains the number of bytes actually read. * N.B. After this call fhp needs an fh_put */ __be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset, struct kvec *vec, int vlen, unsigned long *count) { struct file *file; struct raparms *ra; __be32 err; trace_read_start(rqstp, fhp, offset, vlen); err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file); if (err) return err; ra = nfsd_init_raparms(file); trace_read_opened(rqstp, fhp, offset, vlen); err = nfsd_vfs_read(rqstp, file, offset, vec, vlen, count); trace_read_io_done(rqstp, fhp, offset, vlen); if (ra) nfsd_put_raparams(file, ra); fput(file); trace_read_done(rqstp, fhp, offset, vlen); return err; } /* * Write data to a file. * The stable flag requests synchronous writes. * N.B. After this call fhp needs an fh_put */ __be32 nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset, struct kvec *vec, int vlen, unsigned long *cnt, int stable) { struct file *file = NULL; __be32 err = 0; trace_write_start(rqstp, fhp, offset, vlen); err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_WRITE, &file); if (err) goto out; trace_write_opened(rqstp, fhp, offset, vlen); err = nfsd_vfs_write(rqstp, fhp, file, offset, vec, vlen, cnt, stable); trace_write_io_done(rqstp, fhp, offset, vlen); fput(file); out: trace_write_done(rqstp, fhp, offset, vlen); return err; } #ifdef CONFIG_NFSD_V3 /* * Commit all pending writes to stable storage. * * Note: we only guarantee that data that lies within the range specified * by the 'offset' and 'count' parameters will be synced. * * Unfortunately we cannot lock the file to make sure we return full WCC * data to the client, as locking happens lower down in the filesystem. */ __be32 nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset, unsigned long count) { struct file *file; loff_t end = LLONG_MAX; __be32 err = nfserr_inval; if (offset < 0) goto out; if (count != 0) { end = offset + (loff_t)count - 1; if (end < offset) goto out; } err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_WRITE|NFSD_MAY_NOT_BREAK_LEASE, &file); if (err) goto out; if (EX_ISSYNC(fhp->fh_export)) { int err2 = vfs_fsync_range(file, offset, end, 0); if (err2 != -EINVAL) err = nfserrno(err2); else err = nfserr_notsupp; } fput(file); out: return err; } #endif /* CONFIG_NFSD_V3 */ static __be32 nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *resfhp, struct iattr *iap) { /* * Mode has already been set earlier in create: */ iap->ia_valid &= ~ATTR_MODE; /* * Setting uid/gid works only for root. Irix appears to * send along the gid on create when it tries to implement * setgid directories via NFS: */ if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) iap->ia_valid &= ~(ATTR_UID|ATTR_GID); if (iap->ia_valid) return nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0); /* Callers expect file metadata to be committed here */ return nfserrno(commit_metadata(resfhp)); } /* HPUX client sometimes creates a file in mode 000, and sets size to 0. * setting size to 0 may fail for some specific file systems by the permission * checking which requires WRITE permission but the mode is 000. * we ignore the resizing(to 0) on the just new created file, since the size is * 0 after file created. * * call this only after vfs_create() is called. * */ static void nfsd_check_ignore_resizing(struct iattr *iap) { if ((iap->ia_valid & ATTR_SIZE) && (iap->ia_size == 0)) iap->ia_valid &= ~ATTR_SIZE; } /* The parent directory should already be locked: */ __be32 nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp, char *fname, int flen, struct iattr *iap, int type, dev_t rdev, struct svc_fh *resfhp) { struct dentry *dentry, *dchild; struct inode *dirp; __be32 err; __be32 err2; int host_err; dentry = fhp->fh_dentry; dirp = d_inode(dentry); dchild = dget(resfhp->fh_dentry); if (!fhp->fh_locked) { WARN_ONCE(1, "nfsd_create: parent %pd2 not locked!\n", dentry); err = nfserr_io; goto out; } err = nfsd_permission(rqstp, fhp->fh_export, dentry, NFSD_MAY_CREATE); if (err) goto out; if (!(iap->ia_valid & ATTR_MODE)) iap->ia_mode = 0; iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type; err = 0; host_err = 0; switch (type) { case S_IFREG: host_err = vfs_create(dirp, dchild, iap->ia_mode, true); if (!host_err) nfsd_check_ignore_resizing(iap); break; case S_IFDIR: host_err = vfs_mkdir(dirp, dchild, iap->ia_mode); break; case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: host_err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev); break; default: printk(KERN_WARNING "nfsd: bad file type %o in nfsd_create\n", type); host_err = -EINVAL; } if (host_err < 0) goto out_nfserr; err = nfsd_create_setattr(rqstp, resfhp, iap); /* * nfsd_create_setattr already committed the child. Transactional * filesystems had a chance to commit changes for both parent and * child simultaneously making the following commit_metadata a * noop. */ err2 = nfserrno(commit_metadata(fhp)); if (err2) err = err2; /* * Update the file handle to get the new inode info. */ if (!err) err = fh_update(resfhp); out: dput(dchild); return err; out_nfserr: err = nfserrno(host_err); goto out; } /* * Create a filesystem object (regular, directory, special). * Note that the parent directory is left locked. * * N.B. Every call to nfsd_create needs an fh_put for _both_ fhp and resfhp */ __be32 nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, char *fname, int flen, struct iattr *iap, int type, dev_t rdev, struct svc_fh *resfhp) { struct dentry *dentry, *dchild = NULL; struct inode *dirp; __be32 err; int host_err; if (isdotent(fname, flen)) return nfserr_exist; err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_NOP); if (err) return err; dentry = fhp->fh_dentry; dirp = d_inode(dentry); host_err = fh_want_write(fhp); if (host_err) return nfserrno(host_err); fh_lock_nested(fhp, I_MUTEX_PARENT); dchild = lookup_one_len(fname, dentry, flen); host_err = PTR_ERR(dchild); if (IS_ERR(dchild)) return nfserrno(host_err); err = fh_compose(resfhp, fhp->fh_export, dchild, fhp); /* * We unconditionally drop our ref to dchild as fh_compose will have * already grabbed its own ref for it. */ dput(dchild); if (err) return err; return nfsd_create_locked(rqstp, fhp, fname, flen, iap, type, rdev, resfhp); } #ifdef CONFIG_NFSD_V3 /* * NFSv3 and NFSv4 version of nfsd_create */ __be32 do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, char *fname, int flen, struct iattr *iap, struct svc_fh *resfhp, int createmode, u32 *verifier, bool *truncp, bool *created) { struct dentry *dentry, *dchild = NULL; struct inode *dirp; __be32 err; int host_err; __u32 v_mtime=0, v_atime=0; err = nfserr_perm; if (!flen) goto out; err = nfserr_exist; if (isdotent(fname, flen)) goto out; if (!(iap->ia_valid & ATTR_MODE)) iap->ia_mode = 0; err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC); if (err) goto out; dentry = fhp->fh_dentry; dirp = d_inode(dentry); host_err = fh_want_write(fhp); if (host_err) goto out_nfserr; fh_lock_nested(fhp, I_MUTEX_PARENT); /* * Compose the response file handle. */ dchild = lookup_one_len(fname, dentry, flen); host_err = PTR_ERR(dchild); if (IS_ERR(dchild)) goto out_nfserr; /* If file doesn't exist, check for permissions to create one */ if (d_really_is_negative(dchild)) { err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); if (err) goto out; } err = fh_compose(resfhp, fhp->fh_export, dchild, fhp); if (err) goto out; if (nfsd_create_is_exclusive(createmode)) { /* solaris7 gets confused (bugid 4218508) if these have * the high bit set, so just clear the high bits. If this is * ever changed to use different attrs for storing the * verifier, then do_open_lookup() will also need to be fixed * accordingly. */ v_mtime = verifier[0]&0x7fffffff; v_atime = verifier[1]&0x7fffffff; } if (d_really_is_positive(dchild)) { err = 0; switch (createmode) { case NFS3_CREATE_UNCHECKED: if (! d_is_reg(dchild)) goto out; else if (truncp) { /* in nfsv4, we need to treat this case a little * differently. we don't want to truncate the * file now; this would be wrong if the OPEN * fails for some other reason. furthermore, * if the size is nonzero, we should ignore it * according to spec! */ *truncp = (iap->ia_valid & ATTR_SIZE) && !iap->ia_size; } else { iap->ia_valid &= ATTR_SIZE; goto set_attr; } break; case NFS3_CREATE_EXCLUSIVE: if ( d_inode(dchild)->i_mtime.tv_sec == v_mtime && d_inode(dchild)->i_atime.tv_sec == v_atime && d_inode(dchild)->i_size == 0 ) { if (created) *created = 1; break; } case NFS4_CREATE_EXCLUSIVE4_1: if ( d_inode(dchild)->i_mtime.tv_sec == v_mtime && d_inode(dchild)->i_atime.tv_sec == v_atime && d_inode(dchild)->i_size == 0 ) { if (created) *created = 1; goto set_attr; } /* fallthru */ case NFS3_CREATE_GUARDED: err = nfserr_exist; } fh_drop_write(fhp); goto out; } host_err = vfs_create(dirp, dchild, iap->ia_mode, true); if (host_err < 0) { fh_drop_write(fhp); goto out_nfserr; } if (created) *created = 1; nfsd_check_ignore_resizing(iap); if (nfsd_create_is_exclusive(createmode)) { /* Cram the verifier into atime/mtime */ iap->ia_valid = ATTR_MTIME|ATTR_ATIME | ATTR_MTIME_SET|ATTR_ATIME_SET; /* XXX someone who knows this better please fix it for nsec */ iap->ia_mtime.tv_sec = v_mtime; iap->ia_atime.tv_sec = v_atime; iap->ia_mtime.tv_nsec = 0; iap->ia_atime.tv_nsec = 0; } set_attr: err = nfsd_create_setattr(rqstp, resfhp, iap); /* * nfsd_create_setattr already committed the child * (and possibly also the parent). */ if (!err) err = nfserrno(commit_metadata(fhp)); /* * Update the filehandle to get the new inode info. */ if (!err) err = fh_update(resfhp); out: fh_unlock(fhp); if (dchild && !IS_ERR(dchild)) dput(dchild); fh_drop_write(fhp); return err; out_nfserr: err = nfserrno(host_err); goto out; } #endif /* CONFIG_NFSD_V3 */ /* * Read a symlink. On entry, *lenp must contain the maximum path length that * fits into the buffer. On return, it contains the true length. * N.B. After this call fhp needs an fh_put */ __be32 nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) { mm_segment_t oldfs; __be32 err; int host_err; struct path path; err = fh_verify(rqstp, fhp, S_IFLNK, NFSD_MAY_NOP); if (err) goto out; path.mnt = fhp->fh_export->ex_path.mnt; path.dentry = fhp->fh_dentry; err = nfserr_inval; if (!d_is_symlink(path.dentry)) goto out; touch_atime(&path); /* N.B. Why does this call need a get_fs()?? * Remove the set_fs and watch the fireworks:-) --okir */ oldfs = get_fs(); set_fs(KERNEL_DS); host_err = vfs_readlink(path.dentry, (char __user *)buf, *lenp); set_fs(oldfs); if (host_err < 0) goto out_nfserr; *lenp = host_err; err = 0; out: return err; out_nfserr: err = nfserrno(host_err); goto out; } /* * Create a symlink and look up its inode * N.B. After this call _both_ fhp and resfhp need an fh_put */ __be32 nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *fname, int flen, char *path, struct svc_fh *resfhp) { struct dentry *dentry, *dnew; __be32 err, cerr; int host_err; err = nfserr_noent; if (!flen || path[0] == '\0') goto out; err = nfserr_exist; if (isdotent(fname, flen)) goto out; err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); if (err) goto out; host_err = fh_want_write(fhp); if (host_err) goto out_nfserr; fh_lock(fhp); dentry = fhp->fh_dentry; dnew = lookup_one_len(fname, dentry, flen); host_err = PTR_ERR(dnew); if (IS_ERR(dnew)) goto out_nfserr; host_err = vfs_symlink(d_inode(dentry), dnew, path); err = nfserrno(host_err); if (!err) err = nfserrno(commit_metadata(fhp)); fh_unlock(fhp); fh_drop_write(fhp); cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp); dput(dnew); if (err==0) err = cerr; out: return err; out_nfserr: err = nfserrno(host_err); goto out; } /* * Create a hardlink * N.B. After this call _both_ ffhp and tfhp need an fh_put */ __be32 nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *name, int len, struct svc_fh *tfhp) { struct dentry *ddir, *dnew, *dold; struct inode *dirp; __be32 err; int host_err; err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_CREATE); if (err) goto out; err = fh_verify(rqstp, tfhp, 0, NFSD_MAY_NOP); if (err) goto out; err = nfserr_isdir; if (d_is_dir(tfhp->fh_dentry)) goto out; err = nfserr_perm; if (!len) goto out; err = nfserr_exist; if (isdotent(name, len)) goto out; host_err = fh_want_write(tfhp); if (host_err) { err = nfserrno(host_err); goto out; } fh_lock_nested(ffhp, I_MUTEX_PARENT); ddir = ffhp->fh_dentry; dirp = d_inode(ddir); dnew = lookup_one_len(name, ddir, len); host_err = PTR_ERR(dnew); if (IS_ERR(dnew)) goto out_nfserr; dold = tfhp->fh_dentry; err = nfserr_noent; if (d_really_is_negative(dold)) goto out_dput; host_err = vfs_link(dold, dirp, dnew, NULL); if (!host_err) { err = nfserrno(commit_metadata(ffhp)); if (!err) err = nfserrno(commit_metadata(tfhp)); } else { if (host_err == -EXDEV && rqstp->rq_vers == 2) err = nfserr_acces; else err = nfserrno(host_err); } out_dput: dput(dnew); out_unlock: fh_unlock(ffhp); fh_drop_write(tfhp); out: return err; out_nfserr: err = nfserrno(host_err); goto out_unlock; } /* * Rename a file * N.B. After this call _both_ ffhp and tfhp need an fh_put */ __be32 nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, struct svc_fh *tfhp, char *tname, int tlen) { struct dentry *fdentry, *tdentry, *odentry, *ndentry, *trap; struct inode *fdir, *tdir; __be32 err; int host_err; err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_REMOVE); if (err) goto out; err = fh_verify(rqstp, tfhp, S_IFDIR, NFSD_MAY_CREATE); if (err) goto out; fdentry = ffhp->fh_dentry; fdir = d_inode(fdentry); tdentry = tfhp->fh_dentry; tdir = d_inode(tdentry); err = nfserr_perm; if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen)) goto out; host_err = fh_want_write(ffhp); if (host_err) { err = nfserrno(host_err); goto out; } /* cannot use fh_lock as we need deadlock protective ordering * so do it by hand */ trap = lock_rename(tdentry, fdentry); ffhp->fh_locked = tfhp->fh_locked = true; fill_pre_wcc(ffhp); fill_pre_wcc(tfhp); odentry = lookup_one_len(fname, fdentry, flen); host_err = PTR_ERR(odentry); if (IS_ERR(odentry)) goto out_nfserr; host_err = -ENOENT; if (d_really_is_negative(odentry)) goto out_dput_old; host_err = -EINVAL; if (odentry == trap) goto out_dput_old; ndentry = lookup_one_len(tname, tdentry, tlen); host_err = PTR_ERR(ndentry); if (IS_ERR(ndentry)) goto out_dput_old; host_err = -ENOTEMPTY; if (ndentry == trap) goto out_dput_new; host_err = -EXDEV; if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt) goto out_dput_new; if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry) goto out_dput_new; host_err = vfs_rename(fdir, odentry, tdir, ndentry, NULL, 0); if (!host_err) { host_err = commit_metadata(tfhp); if (!host_err) host_err = commit_metadata(ffhp); } out_dput_new: dput(ndentry); out_dput_old: dput(odentry); out_nfserr: err = nfserrno(host_err); /* * We cannot rely on fh_unlock on the two filehandles, * as that would do the wrong thing if the two directories * were the same, so again we do it by hand. */ fill_post_wcc(ffhp); fill_post_wcc(tfhp); unlock_rename(tdentry, fdentry); ffhp->fh_locked = tfhp->fh_locked = false; fh_drop_write(ffhp); out: return err; } /* * Unlink a file or directory * N.B. After this call fhp needs an fh_put */ __be32 nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, char *fname, int flen) { struct dentry *dentry, *rdentry; struct inode *dirp; __be32 err; int host_err; err = nfserr_acces; if (!flen || isdotent(fname, flen)) goto out; err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_REMOVE); if (err) goto out; host_err = fh_want_write(fhp); if (host_err) goto out_nfserr; fh_lock_nested(fhp, I_MUTEX_PARENT); dentry = fhp->fh_dentry; dirp = d_inode(dentry); rdentry = lookup_one_len(fname, dentry, flen); host_err = PTR_ERR(rdentry); if (IS_ERR(rdentry)) goto out_nfserr; if (d_really_is_negative(rdentry)) { dput(rdentry); err = nfserr_noent; goto out; } if (!type) type = d_inode(rdentry)->i_mode & S_IFMT; if (type != S_IFDIR) host_err = vfs_unlink(dirp, rdentry, NULL); else host_err = vfs_rmdir(dirp, rdentry); if (!host_err) host_err = commit_metadata(fhp); dput(rdentry); out_nfserr: err = nfserrno(host_err); out: return err; } /* * We do this buffering because we must not call back into the file * system's ->lookup() method from the filldir callback. That may well * deadlock a number of file systems. * * This is based heavily on the implementation of same in XFS. */ struct buffered_dirent { u64 ino; loff_t offset; int namlen; unsigned int d_type; char name[]; }; struct readdir_data { struct dir_context ctx; char *dirent; size_t used; int full; }; static int nfsd_buffered_filldir(struct dir_context *ctx, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct readdir_data *buf = container_of(ctx, struct readdir_data, ctx); struct buffered_dirent *de = (void *)(buf->dirent + buf->used); unsigned int reclen; reclen = ALIGN(sizeof(struct buffered_dirent) + namlen, sizeof(u64)); if (buf->used + reclen > PAGE_SIZE) { buf->full = 1; return -EINVAL; } de->namlen = namlen; de->offset = offset; de->ino = ino; de->d_type = d_type; memcpy(de->name, name, namlen); buf->used += reclen; return 0; } static __be32 nfsd_buffered_readdir(struct file *file, nfsd_filldir_t func, struct readdir_cd *cdp, loff_t *offsetp) { struct buffered_dirent *de; int host_err; int size; loff_t offset; struct readdir_data buf = { .ctx.actor = nfsd_buffered_filldir, .dirent = (void *)__get_free_page(GFP_KERNEL) }; if (!buf.dirent) return nfserrno(-ENOMEM); offset = *offsetp; while (1) { unsigned int reclen; cdp->err = nfserr_eof; /* will be cleared on successful read */ buf.used = 0; buf.full = 0; host_err = iterate_dir(file, &buf.ctx); if (buf.full) host_err = 0; if (host_err < 0) break; size = buf.used; if (!size) break; de = (struct buffered_dirent *)buf.dirent; while (size > 0) { offset = de->offset; if (func(cdp, de->name, de->namlen, de->offset, de->ino, de->d_type)) break; if (cdp->err != nfs_ok) break; reclen = ALIGN(sizeof(*de) + de->namlen, sizeof(u64)); size -= reclen; de = (struct buffered_dirent *)((char *)de + reclen); } if (size > 0) /* We bailed out early */ break; offset = vfs_llseek(file, 0, SEEK_CUR); } free_page((unsigned long)(buf.dirent)); if (host_err) return nfserrno(host_err); *offsetp = offset; return cdp->err; } /* * Read entries from a directory. * The NFSv3/4 verifier we ignore for now. */ __be32 nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp, struct readdir_cd *cdp, nfsd_filldir_t func) { __be32 err; struct file *file; loff_t offset = *offsetp; int may_flags = NFSD_MAY_READ; /* NFSv2 only supports 32 bit cookies */ if (rqstp->rq_vers > 2) may_flags |= NFSD_MAY_64BIT_COOKIE; err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file); if (err) goto out; offset = vfs_llseek(file, offset, SEEK_SET); if (offset < 0) { err = nfserrno((int)offset); goto out_close; } err = nfsd_buffered_readdir(file, func, cdp, offsetp); if (err == nfserr_eof || err == nfserr_toosmall) err = nfs_ok; /* can still be found in ->err */ out_close: fput(file); out: return err; } /* * Get file system stats * N.B. After this call fhp needs an fh_put */ __be32 nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access) { __be32 err; err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access); if (!err) { struct path path = { .mnt = fhp->fh_export->ex_path.mnt, .dentry = fhp->fh_dentry, }; if (vfs_statfs(&path, stat)) err = nfserr_io; } return err; } static int exp_rdonly(struct svc_rqst *rqstp, struct svc_export *exp) { return nfsexp_flags(rqstp, exp) & NFSEXP_READONLY; } /* * Check for a user's access permissions to this inode. */ __be32 nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp, struct dentry *dentry, int acc) { struct inode *inode = d_inode(dentry); int err; if ((acc & NFSD_MAY_MASK) == NFSD_MAY_NOP) return 0; #if 0 dprintk("nfsd: permission 0x%x%s%s%s%s%s%s%s mode 0%o%s%s%s\n", acc, (acc & NFSD_MAY_READ)? " read" : "", (acc & NFSD_MAY_WRITE)? " write" : "", (acc & NFSD_MAY_EXEC)? " exec" : "", (acc & NFSD_MAY_SATTR)? " sattr" : "", (acc & NFSD_MAY_TRUNC)? " trunc" : "", (acc & NFSD_MAY_LOCK)? " lock" : "", (acc & NFSD_MAY_OWNER_OVERRIDE)? " owneroverride" : "", inode->i_mode, IS_IMMUTABLE(inode)? " immut" : "", IS_APPEND(inode)? " append" : "", __mnt_is_readonly(exp->ex_path.mnt)? " ro" : ""); dprintk(" owner %d/%d user %d/%d\n", inode->i_uid, inode->i_gid, current_fsuid(), current_fsgid()); #endif /* Normally we reject any write/sattr etc access on a read-only file * system. But if it is IRIX doing check on write-access for a * device special file, we ignore rofs. */ if (!(acc & NFSD_MAY_LOCAL_ACCESS)) if (acc & (NFSD_MAY_WRITE | NFSD_MAY_SATTR | NFSD_MAY_TRUNC)) { if (exp_rdonly(rqstp, exp) || __mnt_is_readonly(exp->ex_path.mnt)) return nfserr_rofs; if (/* (acc & NFSD_MAY_WRITE) && */ IS_IMMUTABLE(inode)) return nfserr_perm; } if ((acc & NFSD_MAY_TRUNC) && IS_APPEND(inode)) return nfserr_perm; if (acc & NFSD_MAY_LOCK) { /* If we cannot rely on authentication in NLM requests, * just allow locks, otherwise require read permission, or * ownership */ if (exp->ex_flags & NFSEXP_NOAUTHNLM) return 0; else acc = NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE; } /* * The file owner always gets access permission for accesses that * would normally be checked at open time. This is to make * file access work even when the client has done a fchmod(fd, 0). * * However, `cp foo bar' should fail nevertheless when bar is * readonly. A sensible way to do this might be to reject all * attempts to truncate a read-only file, because a creat() call * always implies file truncation. * ... but this isn't really fair. A process may reasonably call * ftruncate on an open file descriptor on a file with perm 000. * We must trust the client to do permission checking - using "ACCESS" * with NFSv3. */ if ((acc & NFSD_MAY_OWNER_OVERRIDE) && uid_eq(inode->i_uid, current_fsuid())) return 0; /* This assumes NFSD_MAY_{READ,WRITE,EXEC} == MAY_{READ,WRITE,EXEC} */ err = inode_permission(inode, acc & (MAY_READ|MAY_WRITE|MAY_EXEC)); /* Allow read access to binaries even when mode 111 */ if (err == -EACCES && S_ISREG(inode->i_mode) && (acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE) || acc == (NFSD_MAY_READ | NFSD_MAY_READ_IF_EXEC))) err = inode_permission(inode, MAY_EXEC); return err? nfserrno(err) : 0; } void nfsd_racache_shutdown(void) { struct raparms *raparm, *last_raparm; unsigned int i; dprintk("nfsd: freeing readahead buffers.\n"); for (i = 0; i < RAPARM_HASH_SIZE; i++) { raparm = raparm_hash[i].pb_head; while(raparm) { last_raparm = raparm; raparm = raparm->p_next; kfree(last_raparm); } raparm_hash[i].pb_head = NULL; } } /* * Initialize readahead param cache */ int nfsd_racache_init(int cache_size) { int i; int j = 0; int nperbucket; struct raparms **raparm = NULL; if (raparm_hash[0].pb_head) return 0; nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE); nperbucket = max(2, nperbucket); cache_size = nperbucket * RAPARM_HASH_SIZE; dprintk("nfsd: allocating %d readahead buffers.\n", cache_size); for (i = 0; i < RAPARM_HASH_SIZE; i++) { spin_lock_init(&raparm_hash[i].pb_lock); raparm = &raparm_hash[i].pb_head; for (j = 0; j < nperbucket; j++) { *raparm = kzalloc(sizeof(struct raparms), GFP_KERNEL); if (!*raparm) goto out_nomem; raparm = &(*raparm)->p_next; } *raparm = NULL; } nfsdstats.ra_size = cache_size; return 0; out_nomem: dprintk("nfsd: kmalloc failed, freeing readahead buffers\n"); nfsd_racache_shutdown(); return -ENOMEM; }
./CrossVul/dataset_final_sorted/CWE-404/c/bad_3351_8
crossvul-cpp_data_good_3320_0
/* * hid-cp2112.c - Silicon Labs HID USB to SMBus master bridge * Copyright (c) 2013,2014 Uplogix, Inc. * David Barksdale <dbarksdale@uplogix.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ /* * The Silicon Labs CP2112 chip is a USB HID device which provides an * SMBus controller for talking to slave devices and 8 GPIO pins. The * host communicates with the CP2112 via raw HID reports. * * Data Sheet: * http://www.silabs.com/Support%20Documents/TechnicalDocs/CP2112.pdf * Programming Interface Specification: * http://www.silabs.com/Support%20Documents/TechnicalDocs/AN495.pdf */ #include <linux/gpio.h> #include <linux/gpio/driver.h> #include <linux/hid.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/nls.h> #include <linux/usb/ch9.h> #include "hid-ids.h" #define CP2112_REPORT_MAX_LENGTH 64 #define CP2112_GPIO_CONFIG_LENGTH 5 #define CP2112_GPIO_GET_LENGTH 2 #define CP2112_GPIO_SET_LENGTH 3 enum { CP2112_GPIO_CONFIG = 0x02, CP2112_GPIO_GET = 0x03, CP2112_GPIO_SET = 0x04, CP2112_GET_VERSION_INFO = 0x05, CP2112_SMBUS_CONFIG = 0x06, CP2112_DATA_READ_REQUEST = 0x10, CP2112_DATA_WRITE_READ_REQUEST = 0x11, CP2112_DATA_READ_FORCE_SEND = 0x12, CP2112_DATA_READ_RESPONSE = 0x13, CP2112_DATA_WRITE_REQUEST = 0x14, CP2112_TRANSFER_STATUS_REQUEST = 0x15, CP2112_TRANSFER_STATUS_RESPONSE = 0x16, CP2112_CANCEL_TRANSFER = 0x17, CP2112_LOCK_BYTE = 0x20, CP2112_USB_CONFIG = 0x21, CP2112_MANUFACTURER_STRING = 0x22, CP2112_PRODUCT_STRING = 0x23, CP2112_SERIAL_STRING = 0x24, }; enum { STATUS0_IDLE = 0x00, STATUS0_BUSY = 0x01, STATUS0_COMPLETE = 0x02, STATUS0_ERROR = 0x03, }; enum { STATUS1_TIMEOUT_NACK = 0x00, STATUS1_TIMEOUT_BUS = 0x01, STATUS1_ARBITRATION_LOST = 0x02, STATUS1_READ_INCOMPLETE = 0x03, STATUS1_WRITE_INCOMPLETE = 0x04, STATUS1_SUCCESS = 0x05, }; struct cp2112_smbus_config_report { u8 report; /* CP2112_SMBUS_CONFIG */ __be32 clock_speed; /* Hz */ u8 device_address; /* Stored in the upper 7 bits */ u8 auto_send_read; /* 1 = enabled, 0 = disabled */ __be16 write_timeout; /* ms, 0 = no timeout */ __be16 read_timeout; /* ms, 0 = no timeout */ u8 scl_low_timeout; /* 1 = enabled, 0 = disabled */ __be16 retry_time; /* # of retries, 0 = no limit */ } __packed; struct cp2112_usb_config_report { u8 report; /* CP2112_USB_CONFIG */ __le16 vid; /* Vendor ID */ __le16 pid; /* Product ID */ u8 max_power; /* Power requested in 2mA units */ u8 power_mode; /* 0x00 = bus powered 0x01 = self powered & regulator off 0x02 = self powered & regulator on */ u8 release_major; u8 release_minor; u8 mask; /* What fields to program */ } __packed; struct cp2112_read_req_report { u8 report; /* CP2112_DATA_READ_REQUEST */ u8 slave_address; __be16 length; } __packed; struct cp2112_write_read_req_report { u8 report; /* CP2112_DATA_WRITE_READ_REQUEST */ u8 slave_address; __be16 length; u8 target_address_length; u8 target_address[16]; } __packed; struct cp2112_write_req_report { u8 report; /* CP2112_DATA_WRITE_REQUEST */ u8 slave_address; u8 length; u8 data[61]; } __packed; struct cp2112_force_read_report { u8 report; /* CP2112_DATA_READ_FORCE_SEND */ __be16 length; } __packed; struct cp2112_xfer_status_report { u8 report; /* CP2112_TRANSFER_STATUS_RESPONSE */ u8 status0; /* STATUS0_* */ u8 status1; /* STATUS1_* */ __be16 retries; __be16 length; } __packed; struct cp2112_string_report { u8 dummy; /* force .string to be aligned */ u8 report; /* CP2112_*_STRING */ u8 length; /* length in bytes of everyting after .report */ u8 type; /* USB_DT_STRING */ wchar_t string[30]; /* UTF16_LITTLE_ENDIAN string */ } __packed; /* Number of times to request transfer status before giving up waiting for a transfer to complete. This may need to be changed if SMBUS clock, retries, or read/write/scl_low timeout settings are changed. */ static const int XFER_STATUS_RETRIES = 10; /* Time in ms to wait for a CP2112_DATA_READ_RESPONSE or CP2112_TRANSFER_STATUS_RESPONSE. */ static const int RESPONSE_TIMEOUT = 50; static const struct hid_device_id cp2112_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, { } }; MODULE_DEVICE_TABLE(hid, cp2112_devices); struct cp2112_device { struct i2c_adapter adap; struct hid_device *hdev; wait_queue_head_t wait; u8 read_data[61]; u8 read_length; u8 hwversion; int xfer_status; atomic_t read_avail; atomic_t xfer_avail; struct gpio_chip gc; u8 *in_out_buffer; struct mutex lock; struct gpio_desc *desc[8]; bool gpio_poll; struct delayed_work gpio_poll_worker; unsigned long irq_mask; u8 gpio_prev_state; }; static int gpio_push_pull = 0xFF; module_param(gpio_push_pull, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(gpio_push_pull, "GPIO push-pull configuration bitmask"); static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; int ret; mutex_lock(&dev->lock); ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error requesting GPIO config: %d\n", ret); goto exit; } buf[1] &= ~(1 << offset); buf[2] = gpio_push_pull; ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret < 0) { hid_err(hdev, "error setting GPIO config: %d\n", ret); goto exit; } ret = 0; exit: mutex_unlock(&dev->lock); return ret <= 0 ? ret : -EIO; } static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; int ret; mutex_lock(&dev->lock); buf[0] = CP2112_GPIO_SET; buf[1] = value ? 0xff : 0; buf[2] = 1 << offset; ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret < 0) hid_err(hdev, "error setting GPIO values: %d\n", ret); mutex_unlock(&dev->lock); } static int cp2112_gpio_get_all(struct gpio_chip *chip) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; int ret; mutex_lock(&dev->lock); ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret != CP2112_GPIO_GET_LENGTH) { hid_err(hdev, "error requesting GPIO values: %d\n", ret); ret = ret < 0 ? ret : -EIO; goto exit; } ret = buf[1]; exit: mutex_unlock(&dev->lock); return ret; } static int cp2112_gpio_get(struct gpio_chip *chip, unsigned int offset) { int ret; ret = cp2112_gpio_get_all(chip); if (ret < 0) return ret; return (ret >> offset) & 1; } static int cp2112_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; int ret; mutex_lock(&dev->lock); ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error requesting GPIO config: %d\n", ret); goto fail; } buf[1] |= 1 << offset; buf[2] = gpio_push_pull; ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret < 0) { hid_err(hdev, "error setting GPIO config: %d\n", ret); goto fail; } mutex_unlock(&dev->lock); /* * Set gpio value when output direction is already set, * as specified in AN495, Rev. 0.2, cpt. 4.4 */ cp2112_gpio_set(chip, offset, value); return 0; fail: mutex_unlock(&dev->lock); return ret < 0 ? ret : -EIO; } static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number, u8 *data, size_t count, unsigned char report_type) { u8 *buf; int ret; buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; ret = hid_hw_raw_request(hdev, report_number, buf, count, report_type, HID_REQ_GET_REPORT); memcpy(data, buf, count); kfree(buf); return ret; } static int cp2112_hid_output(struct hid_device *hdev, u8 *data, size_t count, unsigned char report_type) { u8 *buf; int ret; buf = kmemdup(data, count, GFP_KERNEL); if (!buf) return -ENOMEM; if (report_type == HID_OUTPUT_REPORT) ret = hid_hw_output_report(hdev, buf, count); else ret = hid_hw_raw_request(hdev, buf[0], buf, count, report_type, HID_REQ_SET_REPORT); kfree(buf); return ret; } static int cp2112_wait(struct cp2112_device *dev, atomic_t *avail) { int ret = 0; /* We have sent either a CP2112_TRANSFER_STATUS_REQUEST or a * CP2112_DATA_READ_FORCE_SEND and we are waiting for the response to * come in cp2112_raw_event or timeout. There will only be one of these * in flight at any one time. The timeout is extremely large and is a * last resort if the CP2112 has died. If we do timeout we don't expect * to receive the response which would cause data races, it's not like * we can do anything about it anyway. */ ret = wait_event_interruptible_timeout(dev->wait, atomic_read(avail), msecs_to_jiffies(RESPONSE_TIMEOUT)); if (-ERESTARTSYS == ret) return ret; if (!ret) return -ETIMEDOUT; atomic_set(avail, 0); return 0; } static int cp2112_xfer_status(struct cp2112_device *dev) { struct hid_device *hdev = dev->hdev; u8 buf[2]; int ret; buf[0] = CP2112_TRANSFER_STATUS_REQUEST; buf[1] = 0x01; atomic_set(&dev->xfer_avail, 0); ret = cp2112_hid_output(hdev, buf, 2, HID_OUTPUT_REPORT); if (ret < 0) { hid_warn(hdev, "Error requesting status: %d\n", ret); return ret; } ret = cp2112_wait(dev, &dev->xfer_avail); if (ret) return ret; return dev->xfer_status; } static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size) { struct hid_device *hdev = dev->hdev; struct cp2112_force_read_report report; int ret; if (size > sizeof(dev->read_data)) size = sizeof(dev->read_data); report.report = CP2112_DATA_READ_FORCE_SEND; report.length = cpu_to_be16(size); atomic_set(&dev->read_avail, 0); ret = cp2112_hid_output(hdev, &report.report, sizeof(report), HID_OUTPUT_REPORT); if (ret < 0) { hid_warn(hdev, "Error requesting data: %d\n", ret); return ret; } ret = cp2112_wait(dev, &dev->read_avail); if (ret) return ret; hid_dbg(hdev, "read %d of %zd bytes requested\n", dev->read_length, size); if (size > dev->read_length) size = dev->read_length; memcpy(data, dev->read_data, size); return dev->read_length; } static int cp2112_read_req(void *buf, u8 slave_address, u16 length) { struct cp2112_read_req_report *report = buf; if (length < 1 || length > 512) return -EINVAL; report->report = CP2112_DATA_READ_REQUEST; report->slave_address = slave_address << 1; report->length = cpu_to_be16(length); return sizeof(*report); } static int cp2112_write_read_req(void *buf, u8 slave_address, u16 length, u8 command, u8 *data, u8 data_length) { struct cp2112_write_read_req_report *report = buf; if (length < 1 || length > 512 || data_length > sizeof(report->target_address) - 1) return -EINVAL; report->report = CP2112_DATA_WRITE_READ_REQUEST; report->slave_address = slave_address << 1; report->length = cpu_to_be16(length); report->target_address_length = data_length + 1; report->target_address[0] = command; memcpy(&report->target_address[1], data, data_length); return data_length + 6; } static int cp2112_write_req(void *buf, u8 slave_address, u8 command, u8 *data, u8 data_length) { struct cp2112_write_req_report *report = buf; if (data_length > sizeof(report->data) - 1) return -EINVAL; report->report = CP2112_DATA_WRITE_REQUEST; report->slave_address = slave_address << 1; report->length = data_length + 1; report->data[0] = command; memcpy(&report->data[1], data, data_length); return data_length + 4; } static int cp2112_i2c_write_req(void *buf, u8 slave_address, u8 *data, u8 data_length) { struct cp2112_write_req_report *report = buf; if (data_length > sizeof(report->data)) return -EINVAL; report->report = CP2112_DATA_WRITE_REQUEST; report->slave_address = slave_address << 1; report->length = data_length; memcpy(report->data, data, data_length); return data_length + 3; } static int cp2112_i2c_write_read_req(void *buf, u8 slave_address, u8 *addr, int addr_length, int read_length) { struct cp2112_write_read_req_report *report = buf; if (read_length < 1 || read_length > 512 || addr_length > sizeof(report->target_address)) return -EINVAL; report->report = CP2112_DATA_WRITE_READ_REQUEST; report->slave_address = slave_address << 1; report->length = cpu_to_be16(read_length); report->target_address_length = addr_length; memcpy(report->target_address, addr, addr_length); return addr_length + 5; } static int cp2112_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct cp2112_device *dev = (struct cp2112_device *)adap->algo_data; struct hid_device *hdev = dev->hdev; u8 buf[64]; ssize_t count; ssize_t read_length = 0; u8 *read_buf = NULL; unsigned int retries; int ret; hid_dbg(hdev, "I2C %d messages\n", num); if (num == 1) { if (msgs->flags & I2C_M_RD) { hid_dbg(hdev, "I2C read %#04x len %d\n", msgs->addr, msgs->len); read_length = msgs->len; read_buf = msgs->buf; count = cp2112_read_req(buf, msgs->addr, msgs->len); } else { hid_dbg(hdev, "I2C write %#04x len %d\n", msgs->addr, msgs->len); count = cp2112_i2c_write_req(buf, msgs->addr, msgs->buf, msgs->len); } if (count < 0) return count; } else if (dev->hwversion > 1 && /* no repeated start in rev 1 */ num == 2 && msgs[0].addr == msgs[1].addr && !(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD)) { hid_dbg(hdev, "I2C write-read %#04x wlen %d rlen %d\n", msgs[0].addr, msgs[0].len, msgs[1].len); read_length = msgs[1].len; read_buf = msgs[1].buf; count = cp2112_i2c_write_read_req(buf, msgs[0].addr, msgs[0].buf, msgs[0].len, msgs[1].len); if (count < 0) return count; } else { hid_err(hdev, "Multi-message I2C transactions not supported\n"); return -EOPNOTSUPP; } ret = hid_hw_power(hdev, PM_HINT_FULLON); if (ret < 0) { hid_err(hdev, "power management error: %d\n", ret); return ret; } ret = cp2112_hid_output(hdev, buf, count, HID_OUTPUT_REPORT); if (ret < 0) { hid_warn(hdev, "Error starting transaction: %d\n", ret); goto power_normal; } for (retries = 0; retries < XFER_STATUS_RETRIES; ++retries) { ret = cp2112_xfer_status(dev); if (-EBUSY == ret) continue; if (ret < 0) goto power_normal; break; } if (XFER_STATUS_RETRIES <= retries) { hid_warn(hdev, "Transfer timed out, cancelling.\n"); buf[0] = CP2112_CANCEL_TRANSFER; buf[1] = 0x01; ret = cp2112_hid_output(hdev, buf, 2, HID_OUTPUT_REPORT); if (ret < 0) hid_warn(hdev, "Error cancelling transaction: %d\n", ret); ret = -ETIMEDOUT; goto power_normal; } for (count = 0; count < read_length;) { ret = cp2112_read(dev, read_buf + count, read_length - count); if (ret < 0) goto power_normal; if (ret == 0) { hid_err(hdev, "read returned 0\n"); ret = -EIO; goto power_normal; } count += ret; if (count > read_length) { /* * The hardware returned too much data. * This is mostly harmless because cp2112_read() * has a limit check so didn't overrun our * buffer. Nevertheless, we return an error * because something is seriously wrong and * it shouldn't go unnoticed. */ hid_err(hdev, "long read: %d > %zd\n", ret, read_length - count + ret); ret = -EIO; goto power_normal; } } /* return the number of transferred messages */ ret = num; power_normal: hid_hw_power(hdev, PM_HINT_NORMAL); hid_dbg(hdev, "I2C transfer finished: %d\n", ret); return ret; } static int cp2112_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct cp2112_device *dev = (struct cp2112_device *)adap->algo_data; struct hid_device *hdev = dev->hdev; u8 buf[64]; __le16 word; ssize_t count; size_t read_length = 0; unsigned int retries; int ret; hid_dbg(hdev, "%s addr 0x%x flags 0x%x cmd 0x%x size %d\n", read_write == I2C_SMBUS_WRITE ? "write" : "read", addr, flags, command, size); switch (size) { case I2C_SMBUS_BYTE: read_length = 1; if (I2C_SMBUS_READ == read_write) count = cp2112_read_req(buf, addr, read_length); else count = cp2112_write_req(buf, addr, command, NULL, 0); break; case I2C_SMBUS_BYTE_DATA: read_length = 1; if (I2C_SMBUS_READ == read_write) count = cp2112_write_read_req(buf, addr, read_length, command, NULL, 0); else count = cp2112_write_req(buf, addr, command, &data->byte, 1); break; case I2C_SMBUS_WORD_DATA: read_length = 2; word = cpu_to_le16(data->word); if (I2C_SMBUS_READ == read_write) count = cp2112_write_read_req(buf, addr, read_length, command, NULL, 0); else count = cp2112_write_req(buf, addr, command, (u8 *)&word, 2); break; case I2C_SMBUS_PROC_CALL: size = I2C_SMBUS_WORD_DATA; read_write = I2C_SMBUS_READ; read_length = 2; word = cpu_to_le16(data->word); count = cp2112_write_read_req(buf, addr, read_length, command, (u8 *)&word, 2); break; case I2C_SMBUS_I2C_BLOCK_DATA: size = I2C_SMBUS_BLOCK_DATA; /* fallthrough */ case I2C_SMBUS_BLOCK_DATA: if (I2C_SMBUS_READ == read_write) { count = cp2112_write_read_req(buf, addr, I2C_SMBUS_BLOCK_MAX, command, NULL, 0); } else { count = cp2112_write_req(buf, addr, command, data->block, data->block[0] + 1); } break; case I2C_SMBUS_BLOCK_PROC_CALL: size = I2C_SMBUS_BLOCK_DATA; read_write = I2C_SMBUS_READ; count = cp2112_write_read_req(buf, addr, I2C_SMBUS_BLOCK_MAX, command, data->block, data->block[0] + 1); break; default: hid_warn(hdev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } if (count < 0) return count; ret = hid_hw_power(hdev, PM_HINT_FULLON); if (ret < 0) { hid_err(hdev, "power management error: %d\n", ret); return ret; } ret = cp2112_hid_output(hdev, buf, count, HID_OUTPUT_REPORT); if (ret < 0) { hid_warn(hdev, "Error starting transaction: %d\n", ret); goto power_normal; } for (retries = 0; retries < XFER_STATUS_RETRIES; ++retries) { ret = cp2112_xfer_status(dev); if (-EBUSY == ret) continue; if (ret < 0) goto power_normal; break; } if (XFER_STATUS_RETRIES <= retries) { hid_warn(hdev, "Transfer timed out, cancelling.\n"); buf[0] = CP2112_CANCEL_TRANSFER; buf[1] = 0x01; ret = cp2112_hid_output(hdev, buf, 2, HID_OUTPUT_REPORT); if (ret < 0) hid_warn(hdev, "Error cancelling transaction: %d\n", ret); ret = -ETIMEDOUT; goto power_normal; } if (I2C_SMBUS_WRITE == read_write) { ret = 0; goto power_normal; } if (I2C_SMBUS_BLOCK_DATA == size) read_length = ret; ret = cp2112_read(dev, buf, read_length); if (ret < 0) goto power_normal; if (ret != read_length) { hid_warn(hdev, "short read: %d < %zd\n", ret, read_length); ret = -EIO; goto power_normal; } switch (size) { case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: data->byte = buf[0]; break; case I2C_SMBUS_WORD_DATA: data->word = le16_to_cpup((__le16 *)buf); break; case I2C_SMBUS_BLOCK_DATA: if (read_length > I2C_SMBUS_BLOCK_MAX) { ret = -EPROTO; goto power_normal; } memcpy(data->block, buf, read_length); break; } ret = 0; power_normal: hid_hw_power(hdev, PM_HINT_NORMAL); hid_dbg(hdev, "transfer finished: %d\n", ret); return ret; } static u32 cp2112_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_I2C_BLOCK | I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_PROC_CALL; } static const struct i2c_algorithm smbus_algorithm = { .master_xfer = cp2112_i2c_xfer, .smbus_xfer = cp2112_xfer, .functionality = cp2112_functionality, }; static int cp2112_get_usb_config(struct hid_device *hdev, struct cp2112_usb_config_report *cfg) { int ret; ret = cp2112_hid_get(hdev, CP2112_USB_CONFIG, (u8 *)cfg, sizeof(*cfg), HID_FEATURE_REPORT); if (ret != sizeof(*cfg)) { hid_err(hdev, "error reading usb config: %d\n", ret); if (ret < 0) return ret; return -EIO; } return 0; } static int cp2112_set_usb_config(struct hid_device *hdev, struct cp2112_usb_config_report *cfg) { int ret; BUG_ON(cfg->report != CP2112_USB_CONFIG); ret = cp2112_hid_output(hdev, (u8 *)cfg, sizeof(*cfg), HID_FEATURE_REPORT); if (ret != sizeof(*cfg)) { hid_err(hdev, "error writing usb config: %d\n", ret); if (ret < 0) return ret; return -EIO; } return 0; } static void chmod_sysfs_attrs(struct hid_device *hdev); #define CP2112_CONFIG_ATTR(name, store, format, ...) \ static ssize_t name##_store(struct device *kdev, \ struct device_attribute *attr, const char *buf, \ size_t count) \ { \ struct hid_device *hdev = to_hid_device(kdev); \ struct cp2112_usb_config_report cfg; \ int ret = cp2112_get_usb_config(hdev, &cfg); \ if (ret) \ return ret; \ store; \ ret = cp2112_set_usb_config(hdev, &cfg); \ if (ret) \ return ret; \ chmod_sysfs_attrs(hdev); \ return count; \ } \ static ssize_t name##_show(struct device *kdev, \ struct device_attribute *attr, char *buf) \ { \ struct hid_device *hdev = to_hid_device(kdev); \ struct cp2112_usb_config_report cfg; \ int ret = cp2112_get_usb_config(hdev, &cfg); \ if (ret) \ return ret; \ return scnprintf(buf, PAGE_SIZE, format, ##__VA_ARGS__); \ } \ static DEVICE_ATTR_RW(name); CP2112_CONFIG_ATTR(vendor_id, ({ u16 vid; if (sscanf(buf, "%hi", &vid) != 1) return -EINVAL; cfg.vid = cpu_to_le16(vid); cfg.mask = 0x01; }), "0x%04x\n", le16_to_cpu(cfg.vid)); CP2112_CONFIG_ATTR(product_id, ({ u16 pid; if (sscanf(buf, "%hi", &pid) != 1) return -EINVAL; cfg.pid = cpu_to_le16(pid); cfg.mask = 0x02; }), "0x%04x\n", le16_to_cpu(cfg.pid)); CP2112_CONFIG_ATTR(max_power, ({ int mA; if (sscanf(buf, "%i", &mA) != 1) return -EINVAL; cfg.max_power = (mA + 1) / 2; cfg.mask = 0x04; }), "%u mA\n", cfg.max_power * 2); CP2112_CONFIG_ATTR(power_mode, ({ if (sscanf(buf, "%hhi", &cfg.power_mode) != 1) return -EINVAL; cfg.mask = 0x08; }), "%u\n", cfg.power_mode); CP2112_CONFIG_ATTR(release_version, ({ if (sscanf(buf, "%hhi.%hhi", &cfg.release_major, &cfg.release_minor) != 2) return -EINVAL; cfg.mask = 0x10; }), "%u.%u\n", cfg.release_major, cfg.release_minor); #undef CP2112_CONFIG_ATTR struct cp2112_pstring_attribute { struct device_attribute attr; unsigned char report; }; static ssize_t pstr_store(struct device *kdev, struct device_attribute *kattr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(kdev); struct cp2112_pstring_attribute *attr = container_of(kattr, struct cp2112_pstring_attribute, attr); struct cp2112_string_report report; int ret; memset(&report, 0, sizeof(report)); ret = utf8s_to_utf16s(buf, count, UTF16_LITTLE_ENDIAN, report.string, ARRAY_SIZE(report.string)); report.report = attr->report; report.length = ret * sizeof(report.string[0]) + 2; report.type = USB_DT_STRING; ret = cp2112_hid_output(hdev, &report.report, report.length + 1, HID_FEATURE_REPORT); if (ret != report.length + 1) { hid_err(hdev, "error writing %s string: %d\n", kattr->attr.name, ret); if (ret < 0) return ret; return -EIO; } chmod_sysfs_attrs(hdev); return count; } static ssize_t pstr_show(struct device *kdev, struct device_attribute *kattr, char *buf) { struct hid_device *hdev = to_hid_device(kdev); struct cp2112_pstring_attribute *attr = container_of(kattr, struct cp2112_pstring_attribute, attr); struct cp2112_string_report report; u8 length; int ret; ret = cp2112_hid_get(hdev, attr->report, &report.report, sizeof(report) - 1, HID_FEATURE_REPORT); if (ret < 3) { hid_err(hdev, "error reading %s string: %d\n", kattr->attr.name, ret); if (ret < 0) return ret; return -EIO; } if (report.length < 2) { hid_err(hdev, "invalid %s string length: %d\n", kattr->attr.name, report.length); return -EIO; } length = report.length > ret - 1 ? ret - 1 : report.length; length = (length - 2) / sizeof(report.string[0]); ret = utf16s_to_utf8s(report.string, length, UTF16_LITTLE_ENDIAN, buf, PAGE_SIZE - 1); buf[ret++] = '\n'; return ret; } #define CP2112_PSTR_ATTR(name, _report) \ static struct cp2112_pstring_attribute dev_attr_##name = { \ .attr = __ATTR(name, (S_IWUSR | S_IRUGO), pstr_show, pstr_store), \ .report = _report, \ }; CP2112_PSTR_ATTR(manufacturer, CP2112_MANUFACTURER_STRING); CP2112_PSTR_ATTR(product, CP2112_PRODUCT_STRING); CP2112_PSTR_ATTR(serial, CP2112_SERIAL_STRING); #undef CP2112_PSTR_ATTR static const struct attribute_group cp2112_attr_group = { .attrs = (struct attribute *[]){ &dev_attr_vendor_id.attr, &dev_attr_product_id.attr, &dev_attr_max_power.attr, &dev_attr_power_mode.attr, &dev_attr_release_version.attr, &dev_attr_manufacturer.attr.attr, &dev_attr_product.attr.attr, &dev_attr_serial.attr.attr, NULL } }; /* Chmoding our sysfs attributes is simply a way to expose which fields in the * PROM have already been programmed. We do not depend on this preventing * writing to these attributes since the CP2112 will simply ignore writes to * already-programmed fields. This is why there is no sense in fixing this * racy behaviour. */ static void chmod_sysfs_attrs(struct hid_device *hdev) { struct attribute **attr; u8 buf[2]; int ret; ret = cp2112_hid_get(hdev, CP2112_LOCK_BYTE, buf, sizeof(buf), HID_FEATURE_REPORT); if (ret != sizeof(buf)) { hid_err(hdev, "error reading lock byte: %d\n", ret); return; } for (attr = cp2112_attr_group.attrs; *attr; ++attr) { umode_t mode = (buf[1] & 1) ? S_IWUSR | S_IRUGO : S_IRUGO; ret = sysfs_chmod_file(&hdev->dev.kobj, *attr, mode); if (ret < 0) hid_err(hdev, "error chmoding sysfs file %s\n", (*attr)->name); buf[1] >>= 1; } } static void cp2112_gpio_irq_ack(struct irq_data *d) { } static void cp2112_gpio_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); __clear_bit(d->hwirq, &dev->irq_mask); } static void cp2112_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); __set_bit(d->hwirq, &dev->irq_mask); } static void cp2112_gpio_poll_callback(struct work_struct *work) { struct cp2112_device *dev = container_of(work, struct cp2112_device, gpio_poll_worker.work); struct irq_data *d; u8 gpio_mask; u8 virqs = (u8)dev->irq_mask; u32 irq_type; int irq, virq, ret; ret = cp2112_gpio_get_all(&dev->gc); if (ret == -ENODEV) /* the hardware has been disconnected */ return; if (ret < 0) goto exit; gpio_mask = ret; while (virqs) { virq = ffs(virqs) - 1; virqs &= ~BIT(virq); if (!dev->gc.to_irq) break; irq = dev->gc.to_irq(&dev->gc, virq); d = irq_get_irq_data(irq); if (!d) continue; irq_type = irqd_get_trigger_type(d); if (gpio_mask & BIT(virq)) { /* Level High */ if (irq_type & IRQ_TYPE_LEVEL_HIGH) handle_nested_irq(irq); if ((irq_type & IRQ_TYPE_EDGE_RISING) && !(dev->gpio_prev_state & BIT(virq))) handle_nested_irq(irq); } else { /* Level Low */ if (irq_type & IRQ_TYPE_LEVEL_LOW) handle_nested_irq(irq); if ((irq_type & IRQ_TYPE_EDGE_FALLING) && (dev->gpio_prev_state & BIT(virq))) handle_nested_irq(irq); } } dev->gpio_prev_state = gpio_mask; exit: if (dev->gpio_poll) schedule_delayed_work(&dev->gpio_poll_worker, 10); } static unsigned int cp2112_gpio_irq_startup(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); cp2112_gpio_direction_input(gc, d->hwirq); if (!dev->gpio_poll) { dev->gpio_poll = true; schedule_delayed_work(&dev->gpio_poll_worker, 0); } cp2112_gpio_irq_unmask(d); return 0; } static void cp2112_gpio_irq_shutdown(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); cancel_delayed_work_sync(&dev->gpio_poll_worker); } static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type) { return 0; } static struct irq_chip cp2112_gpio_irqchip = { .name = "cp2112-gpio", .irq_startup = cp2112_gpio_irq_startup, .irq_shutdown = cp2112_gpio_irq_shutdown, .irq_ack = cp2112_gpio_irq_ack, .irq_mask = cp2112_gpio_irq_mask, .irq_unmask = cp2112_gpio_irq_unmask, .irq_set_type = cp2112_gpio_irq_type, }; static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev, int pin) { int ret; if (dev->desc[pin]) return -EINVAL; dev->desc[pin] = gpiochip_request_own_desc(&dev->gc, pin, "HID/I2C:Event"); if (IS_ERR(dev->desc[pin])) { dev_err(dev->gc.parent, "Failed to request GPIO\n"); return PTR_ERR(dev->desc[pin]); } ret = gpiochip_lock_as_irq(&dev->gc, pin); if (ret) { dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n"); goto err_desc; } ret = gpiod_to_irq(dev->desc[pin]); if (ret < 0) { dev_err(dev->gc.parent, "Failed to translate GPIO to IRQ\n"); goto err_lock; } return ret; err_lock: gpiochip_unlock_as_irq(&dev->gc, pin); err_desc: gpiochip_free_own_desc(dev->desc[pin]); dev->desc[pin] = NULL; return ret; } static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct cp2112_device *dev; u8 buf[3]; struct cp2112_smbus_config_report config; int ret; dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->in_out_buffer = devm_kzalloc(&hdev->dev, CP2112_REPORT_MAX_LENGTH, GFP_KERNEL); if (!dev->in_out_buffer) return -ENOMEM; mutex_init(&dev->lock); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); return ret; } ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); if (ret) { hid_err(hdev, "hw start failed\n"); return ret; } ret = hid_hw_open(hdev); if (ret) { hid_err(hdev, "hw open failed\n"); goto err_hid_stop; } ret = hid_hw_power(hdev, PM_HINT_FULLON); if (ret < 0) { hid_err(hdev, "power management error: %d\n", ret); goto err_hid_close; } ret = cp2112_hid_get(hdev, CP2112_GET_VERSION_INFO, buf, sizeof(buf), HID_FEATURE_REPORT); if (ret != sizeof(buf)) { hid_err(hdev, "error requesting version\n"); if (ret >= 0) ret = -EIO; goto err_power_normal; } hid_info(hdev, "Part Number: 0x%02X Device Version: 0x%02X\n", buf[1], buf[2]); ret = cp2112_hid_get(hdev, CP2112_SMBUS_CONFIG, (u8 *)&config, sizeof(config), HID_FEATURE_REPORT); if (ret != sizeof(config)) { hid_err(hdev, "error requesting SMBus config\n"); if (ret >= 0) ret = -EIO; goto err_power_normal; } config.retry_time = cpu_to_be16(1); ret = cp2112_hid_output(hdev, (u8 *)&config, sizeof(config), HID_FEATURE_REPORT); if (ret != sizeof(config)) { hid_err(hdev, "error setting SMBus config\n"); if (ret >= 0) ret = -EIO; goto err_power_normal; } hid_set_drvdata(hdev, (void *)dev); dev->hdev = hdev; dev->adap.owner = THIS_MODULE; dev->adap.class = I2C_CLASS_HWMON; dev->adap.algo = &smbus_algorithm; dev->adap.algo_data = dev; dev->adap.dev.parent = &hdev->dev; snprintf(dev->adap.name, sizeof(dev->adap.name), "CP2112 SMBus Bridge on hiddev%d", hdev->minor); dev->hwversion = buf[2]; init_waitqueue_head(&dev->wait); hid_device_io_start(hdev); ret = i2c_add_adapter(&dev->adap); hid_device_io_stop(hdev); if (ret) { hid_err(hdev, "error registering i2c adapter\n"); goto err_power_normal; } hid_dbg(hdev, "adapter registered\n"); dev->gc.label = "cp2112_gpio"; dev->gc.direction_input = cp2112_gpio_direction_input; dev->gc.direction_output = cp2112_gpio_direction_output; dev->gc.set = cp2112_gpio_set; dev->gc.get = cp2112_gpio_get; dev->gc.base = -1; dev->gc.ngpio = 8; dev->gc.can_sleep = 1; dev->gc.parent = &hdev->dev; ret = gpiochip_add_data(&dev->gc, dev); if (ret < 0) { hid_err(hdev, "error registering gpio chip\n"); goto err_free_i2c; } ret = sysfs_create_group(&hdev->dev.kobj, &cp2112_attr_group); if (ret < 0) { hid_err(hdev, "error creating sysfs attrs\n"); goto err_gpiochip_remove; } chmod_sysfs_attrs(hdev); hid_hw_power(hdev, PM_HINT_NORMAL); ret = gpiochip_irqchip_add(&dev->gc, &cp2112_gpio_irqchip, 0, handle_simple_irq, IRQ_TYPE_NONE); if (ret) { dev_err(dev->gc.parent, "failed to add IRQ chip\n"); goto err_sysfs_remove; } return ret; err_sysfs_remove: sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group); err_gpiochip_remove: gpiochip_remove(&dev->gc); err_free_i2c: i2c_del_adapter(&dev->adap); err_power_normal: hid_hw_power(hdev, PM_HINT_NORMAL); err_hid_close: hid_hw_close(hdev); err_hid_stop: hid_hw_stop(hdev); return ret; } static void cp2112_remove(struct hid_device *hdev) { struct cp2112_device *dev = hid_get_drvdata(hdev); int i; sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group); i2c_del_adapter(&dev->adap); if (dev->gpio_poll) { dev->gpio_poll = false; cancel_delayed_work_sync(&dev->gpio_poll_worker); } for (i = 0; i < ARRAY_SIZE(dev->desc); i++) { gpiochip_unlock_as_irq(&dev->gc, i); gpiochip_free_own_desc(dev->desc[i]); } gpiochip_remove(&dev->gc); /* i2c_del_adapter has finished removing all i2c devices from our * adapter. Well behaved devices should no longer call our cp2112_xfer * and should have waited for any pending calls to finish. It has also * waited for device_unregister(&adap->dev) to complete. Therefore we * can safely free our struct cp2112_device. */ hid_hw_close(hdev); hid_hw_stop(hdev); } static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct cp2112_device *dev = hid_get_drvdata(hdev); struct cp2112_xfer_status_report *xfer = (void *)data; switch (data[0]) { case CP2112_TRANSFER_STATUS_RESPONSE: hid_dbg(hdev, "xfer status: %02x %02x %04x %04x\n", xfer->status0, xfer->status1, be16_to_cpu(xfer->retries), be16_to_cpu(xfer->length)); switch (xfer->status0) { case STATUS0_IDLE: dev->xfer_status = -EAGAIN; break; case STATUS0_BUSY: dev->xfer_status = -EBUSY; break; case STATUS0_COMPLETE: dev->xfer_status = be16_to_cpu(xfer->length); break; case STATUS0_ERROR: switch (xfer->status1) { case STATUS1_TIMEOUT_NACK: case STATUS1_TIMEOUT_BUS: dev->xfer_status = -ETIMEDOUT; break; default: dev->xfer_status = -EIO; break; } break; default: dev->xfer_status = -EINVAL; break; } atomic_set(&dev->xfer_avail, 1); break; case CP2112_DATA_READ_RESPONSE: hid_dbg(hdev, "read response: %02x %02x\n", data[1], data[2]); dev->read_length = data[2]; if (dev->read_length > sizeof(dev->read_data)) dev->read_length = sizeof(dev->read_data); memcpy(dev->read_data, &data[3], dev->read_length); atomic_set(&dev->read_avail, 1); break; default: hid_err(hdev, "unknown report\n"); return 0; } wake_up_interruptible(&dev->wait); return 1; } static struct hid_driver cp2112_driver = { .name = "cp2112", .id_table = cp2112_devices, .probe = cp2112_probe, .remove = cp2112_remove, .raw_event = cp2112_raw_event, }; module_hid_driver(cp2112_driver); MODULE_DESCRIPTION("Silicon Labs HID USB to SMBus master bridge"); MODULE_AUTHOR("David Barksdale <dbarksdale@uplogix.com>"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-404/c/good_3320_0
crossvul-cpp_data_good_3351_3
/* * XDR support for nfsd/protocol version 3. * * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> * * 2003-08-09 Jamie Lokier: Use htonl() for nanoseconds, not htons()! */ #include <linux/namei.h> #include <linux/sunrpc/svc_xprt.h> #include "xdr3.h" #include "auth.h" #include "netns.h" #include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_XDR /* * Mapping of S_IF* types to NFS file types */ static u32 nfs3_ftypes[] = { NF3NON, NF3FIFO, NF3CHR, NF3BAD, NF3DIR, NF3BAD, NF3BLK, NF3BAD, NF3REG, NF3BAD, NF3LNK, NF3BAD, NF3SOCK, NF3BAD, NF3LNK, NF3BAD, }; /* * XDR functions for basic NFS types */ static __be32 * encode_time3(__be32 *p, struct timespec *time) { *p++ = htonl((u32) time->tv_sec); *p++ = htonl(time->tv_nsec); return p; } static __be32 * decode_time3(__be32 *p, struct timespec *time) { time->tv_sec = ntohl(*p++); time->tv_nsec = ntohl(*p++); return p; } static __be32 * decode_fh(__be32 *p, struct svc_fh *fhp) { unsigned int size; fh_init(fhp, NFS3_FHSIZE); size = ntohl(*p++); if (size > NFS3_FHSIZE) return NULL; memcpy(&fhp->fh_handle.fh_base, p, size); fhp->fh_handle.fh_size = size; return p + XDR_QUADLEN(size); } /* Helper function for NFSv3 ACL code */ __be32 *nfs3svc_decode_fh(__be32 *p, struct svc_fh *fhp) { return decode_fh(p, fhp); } static __be32 * encode_fh(__be32 *p, struct svc_fh *fhp) { unsigned int size = fhp->fh_handle.fh_size; *p++ = htonl(size); if (size) p[XDR_QUADLEN(size)-1]=0; memcpy(p, &fhp->fh_handle.fh_base, size); return p + XDR_QUADLEN(size); } /* * Decode a file name and make sure that the path contains * no slashes or null bytes. */ static __be32 * decode_filename(__be32 *p, char **namp, unsigned int *lenp) { char *name; unsigned int i; if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS3_MAXNAMLEN)) != NULL) { for (i = 0, name = *namp; i < *lenp; i++, name++) { if (*name == '\0' || *name == '/') return NULL; } } return p; } static __be32 * decode_sattr3(__be32 *p, struct iattr *iap) { u32 tmp; iap->ia_valid = 0; if (*p++) { iap->ia_valid |= ATTR_MODE; iap->ia_mode = ntohl(*p++); } if (*p++) { iap->ia_uid = make_kuid(&init_user_ns, ntohl(*p++)); if (uid_valid(iap->ia_uid)) iap->ia_valid |= ATTR_UID; } if (*p++) { iap->ia_gid = make_kgid(&init_user_ns, ntohl(*p++)); if (gid_valid(iap->ia_gid)) iap->ia_valid |= ATTR_GID; } if (*p++) { u64 newsize; iap->ia_valid |= ATTR_SIZE; p = xdr_decode_hyper(p, &newsize); iap->ia_size = min_t(u64, newsize, NFS_OFFSET_MAX); } if ((tmp = ntohl(*p++)) == 1) { /* set to server time */ iap->ia_valid |= ATTR_ATIME; } else if (tmp == 2) { /* set to client time */ iap->ia_valid |= ATTR_ATIME | ATTR_ATIME_SET; iap->ia_atime.tv_sec = ntohl(*p++); iap->ia_atime.tv_nsec = ntohl(*p++); } if ((tmp = ntohl(*p++)) == 1) { /* set to server time */ iap->ia_valid |= ATTR_MTIME; } else if (tmp == 2) { /* set to client time */ iap->ia_valid |= ATTR_MTIME | ATTR_MTIME_SET; iap->ia_mtime.tv_sec = ntohl(*p++); iap->ia_mtime.tv_nsec = ntohl(*p++); } return p; } static __be32 *encode_fsid(__be32 *p, struct svc_fh *fhp) { u64 f; switch(fsid_source(fhp)) { default: case FSIDSOURCE_DEV: p = xdr_encode_hyper(p, (u64)huge_encode_dev (fhp->fh_dentry->d_sb->s_dev)); break; case FSIDSOURCE_FSID: p = xdr_encode_hyper(p, (u64) fhp->fh_export->ex_fsid); break; case FSIDSOURCE_UUID: f = ((u64*)fhp->fh_export->ex_uuid)[0]; f ^= ((u64*)fhp->fh_export->ex_uuid)[1]; p = xdr_encode_hyper(p, f); break; } return p; } static __be32 * encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat) { *p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]); *p++ = htonl((u32) (stat->mode & S_IALLUGO)); *p++ = htonl((u32) stat->nlink); *p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid)); *p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid)); if (S_ISLNK(stat->mode) && stat->size > NFS3_MAXPATHLEN) { p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN); } else { p = xdr_encode_hyper(p, (u64) stat->size); } p = xdr_encode_hyper(p, ((u64)stat->blocks) << 9); *p++ = htonl((u32) MAJOR(stat->rdev)); *p++ = htonl((u32) MINOR(stat->rdev)); p = encode_fsid(p, fhp); p = xdr_encode_hyper(p, stat->ino); p = encode_time3(p, &stat->atime); p = encode_time3(p, &stat->mtime); p = encode_time3(p, &stat->ctime); return p; } static __be32 * encode_saved_post_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { /* Attributes to follow */ *p++ = xdr_one; return encode_fattr3(rqstp, p, fhp, &fhp->fh_post_attr); } /* * Encode post-operation attributes. * The inode may be NULL if the call failed because of a stale file * handle. In this case, no attributes are returned. */ static __be32 * encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { struct dentry *dentry = fhp->fh_dentry; if (dentry && d_really_is_positive(dentry)) { __be32 err; struct kstat stat; err = fh_getattr(fhp, &stat); if (!err) { *p++ = xdr_one; /* attributes follow */ lease_get_mtime(d_inode(dentry), &stat.mtime); return encode_fattr3(rqstp, p, fhp, &stat); } } *p++ = xdr_zero; return p; } /* Helper for NFSv3 ACLs */ __be32 * nfs3svc_encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { return encode_post_op_attr(rqstp, p, fhp); } /* * Enocde weak cache consistency data */ static __be32 * encode_wcc_data(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { struct dentry *dentry = fhp->fh_dentry; if (dentry && d_really_is_positive(dentry) && fhp->fh_post_saved) { if (fhp->fh_pre_saved) { *p++ = xdr_one; p = xdr_encode_hyper(p, (u64) fhp->fh_pre_size); p = encode_time3(p, &fhp->fh_pre_mtime); p = encode_time3(p, &fhp->fh_pre_ctime); } else { *p++ = xdr_zero; } return encode_saved_post_attr(rqstp, p, fhp); } /* no pre- or post-attrs */ *p++ = xdr_zero; return encode_post_op_attr(rqstp, p, fhp); } /* * Fill in the post_op attr for the wcc data */ void fill_post_wcc(struct svc_fh *fhp) { __be32 err; if (fhp->fh_post_saved) printk("nfsd: inode locked twice during operation.\n"); err = fh_getattr(fhp, &fhp->fh_post_attr); fhp->fh_post_change = d_inode(fhp->fh_dentry)->i_version; if (err) { fhp->fh_post_saved = false; /* Grab the ctime anyway - set_change_info might use it */ fhp->fh_post_attr.ctime = d_inode(fhp->fh_dentry)->i_ctime; } else fhp->fh_post_saved = true; } /* * XDR decode functions */ int nfs3svc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *args) { p = decode_fh(p, &args->fh); if (!p) return 0; return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_sattrargs *args) { p = decode_fh(p, &args->fh); if (!p) return 0; p = decode_sattr3(p, &args->attrs); if ((args->check_guard = ntohl(*p++)) != 0) { struct timespec time; p = decode_time3(p, &time); args->guardtime = time.tv_sec; } return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_diropargs *args) { if (!(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_accessargs *args) { p = decode_fh(p, &args->fh); if (!p) return 0; args->access = ntohl(*p++); return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readargs *args) { unsigned int len; int v; u32 max_blocksize = svc_max_payload(rqstp); p = decode_fh(p, &args->fh); if (!p) return 0; p = xdr_decode_hyper(p, &args->offset); args->count = ntohl(*p++); if (!xdr_argsize_check(rqstp, p)) return 0; len = min(args->count, max_blocksize); /* set up the kvec */ v=0; while (len > 0) { struct page *p = *(rqstp->rq_next_page++); rqstp->rq_vec[v].iov_base = page_address(p); rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE); len -= rqstp->rq_vec[v].iov_len; v++; } args->vlen = v; return 1; } int nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_writeargs *args) { unsigned int len, v, hdr, dlen; u32 max_blocksize = svc_max_payload(rqstp); struct kvec *head = rqstp->rq_arg.head; struct kvec *tail = rqstp->rq_arg.tail; p = decode_fh(p, &args->fh); if (!p) return 0; p = xdr_decode_hyper(p, &args->offset); args->count = ntohl(*p++); args->stable = ntohl(*p++); len = args->len = ntohl(*p++); if ((void *)p > head->iov_base + head->iov_len) return 0; /* * The count must equal the amount of data passed. */ if (args->count != args->len) return 0; /* * Check to make sure that we got the right number of * bytes. */ hdr = (void*)p - head->iov_base; dlen = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len - hdr; /* * Round the length of the data which was specified up to * the next multiple of XDR units and then compare that * against the length which was actually received. * Note that when RPCSEC/GSS (for example) is used, the * data buffer can be padded so dlen might be larger * than required. It must never be smaller. */ if (dlen < XDR_QUADLEN(len)*4) return 0; if (args->count > max_blocksize) { args->count = max_blocksize; len = args->len = max_blocksize; } rqstp->rq_vec[0].iov_base = (void*)p; rqstp->rq_vec[0].iov_len = head->iov_len - hdr; v = 0; while (len > rqstp->rq_vec[v].iov_len) { len -= rqstp->rq_vec[v].iov_len; v++; rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_pages[v]); rqstp->rq_vec[v].iov_len = PAGE_SIZE; } rqstp->rq_vec[v].iov_len = len; args->vlen = v + 1; return 1; } int nfs3svc_decode_createargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_createargs *args) { if (!(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; switch (args->createmode = ntohl(*p++)) { case NFS3_CREATE_UNCHECKED: case NFS3_CREATE_GUARDED: p = decode_sattr3(p, &args->attrs); break; case NFS3_CREATE_EXCLUSIVE: args->verf = p; p += 2; break; default: return 0; } return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_mkdirargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_createargs *args) { if (!(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; p = decode_sattr3(p, &args->attrs); return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_symlinkargs *args) { unsigned int len, avail; char *old, *new; struct kvec *vec; if (!(p = decode_fh(p, &args->ffh)) || !(p = decode_filename(p, &args->fname, &args->flen)) ) return 0; p = decode_sattr3(p, &args->attrs); /* now decode the pathname, which might be larger than the first page. * As we have to check for nul's anyway, we copy it into a new page * This page appears in the rq_res.pages list, but as pages_len is always * 0, it won't get in the way */ len = ntohl(*p++); if (len == 0 || len > NFS3_MAXPATHLEN || len >= PAGE_SIZE) return 0; args->tname = new = page_address(*(rqstp->rq_next_page++)); args->tlen = len; /* first copy and check from the first page */ old = (char*)p; vec = &rqstp->rq_arg.head[0]; if ((void *)old > vec->iov_base + vec->iov_len) return 0; avail = vec->iov_len - (old - (char*)vec->iov_base); while (len && avail && *old) { *new++ = *old++; len--; avail--; } /* now copy next page if there is one */ if (len && !avail && rqstp->rq_arg.page_len) { avail = min_t(unsigned int, rqstp->rq_arg.page_len, PAGE_SIZE); old = page_address(rqstp->rq_arg.pages[0]); } while (len && avail && *old) { *new++ = *old++; len--; avail--; } *new = '\0'; if (len) return 0; return 1; } int nfs3svc_decode_mknodargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_mknodargs *args) { if (!(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; args->ftype = ntohl(*p++); if (args->ftype == NF3BLK || args->ftype == NF3CHR || args->ftype == NF3SOCK || args->ftype == NF3FIFO) p = decode_sattr3(p, &args->attrs); if (args->ftype == NF3BLK || args->ftype == NF3CHR) { args->major = ntohl(*p++); args->minor = ntohl(*p++); } return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_renameargs *args) { if (!(p = decode_fh(p, &args->ffh)) || !(p = decode_filename(p, &args->fname, &args->flen)) || !(p = decode_fh(p, &args->tfh)) || !(p = decode_filename(p, &args->tname, &args->tlen))) return 0; return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readlinkargs *args) { p = decode_fh(p, &args->fh); if (!p) return 0; if (!xdr_argsize_check(rqstp, p)) return 0; args->buffer = page_address(*(rqstp->rq_next_page++)); return 1; } int nfs3svc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_linkargs *args) { if (!(p = decode_fh(p, &args->ffh)) || !(p = decode_fh(p, &args->tfh)) || !(p = decode_filename(p, &args->tname, &args->tlen))) return 0; return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readdirargs *args) { p = decode_fh(p, &args->fh); if (!p) return 0; p = xdr_decode_hyper(p, &args->cookie); args->verf = p; p += 2; args->dircount = ~0; args->count = ntohl(*p++); if (!xdr_argsize_check(rqstp, p)) return 0; args->count = min_t(u32, args->count, PAGE_SIZE); args->buffer = page_address(*(rqstp->rq_next_page++)); return 1; } int nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readdirargs *args) { int len; u32 max_blocksize = svc_max_payload(rqstp); p = decode_fh(p, &args->fh); if (!p) return 0; p = xdr_decode_hyper(p, &args->cookie); args->verf = p; p += 2; args->dircount = ntohl(*p++); args->count = ntohl(*p++); if (!xdr_argsize_check(rqstp, p)) return 0; len = args->count = min(args->count, max_blocksize); while (len > 0) { struct page *p = *(rqstp->rq_next_page++); if (!args->buffer) args->buffer = page_address(p); len -= PAGE_SIZE; } return 1; } int nfs3svc_decode_commitargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_commitargs *args) { p = decode_fh(p, &args->fh); if (!p) return 0; p = xdr_decode_hyper(p, &args->offset); args->count = ntohl(*p++); return xdr_argsize_check(rqstp, p); } /* * XDR encode functions */ /* * There must be an encoding function for void results so svc_process * will work properly. */ int nfs3svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_ressize_check(rqstp, p); } /* GETATTR */ int nfs3svc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_attrstat *resp) { if (resp->status == 0) { lease_get_mtime(d_inode(resp->fh.fh_dentry), &resp->stat.mtime); p = encode_fattr3(rqstp, p, &resp->fh, &resp->stat); } return xdr_ressize_check(rqstp, p); } /* SETATTR, REMOVE, RMDIR */ int nfs3svc_encode_wccstat(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_attrstat *resp) { p = encode_wcc_data(rqstp, p, &resp->fh); return xdr_ressize_check(rqstp, p); } /* LOOKUP */ int nfs3svc_encode_diropres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_diropres *resp) { if (resp->status == 0) { p = encode_fh(p, &resp->fh); p = encode_post_op_attr(rqstp, p, &resp->fh); } p = encode_post_op_attr(rqstp, p, &resp->dirfh); return xdr_ressize_check(rqstp, p); } /* ACCESS */ int nfs3svc_encode_accessres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_accessres *resp) { p = encode_post_op_attr(rqstp, p, &resp->fh); if (resp->status == 0) *p++ = htonl(resp->access); return xdr_ressize_check(rqstp, p); } /* READLINK */ int nfs3svc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readlinkres *resp) { p = encode_post_op_attr(rqstp, p, &resp->fh); if (resp->status == 0) { *p++ = htonl(resp->len); xdr_ressize_check(rqstp, p); rqstp->rq_res.page_len = resp->len; if (resp->len & 3) { /* need to pad the tail */ rqstp->rq_res.tail[0].iov_base = p; *p = 0; rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3); } return 1; } else return xdr_ressize_check(rqstp, p); } /* READ */ int nfs3svc_encode_readres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readres *resp) { p = encode_post_op_attr(rqstp, p, &resp->fh); if (resp->status == 0) { *p++ = htonl(resp->count); *p++ = htonl(resp->eof); *p++ = htonl(resp->count); /* xdr opaque count */ xdr_ressize_check(rqstp, p); /* now update rqstp->rq_res to reflect data as well */ rqstp->rq_res.page_len = resp->count; if (resp->count & 3) { /* need to pad the tail */ rqstp->rq_res.tail[0].iov_base = p; *p = 0; rqstp->rq_res.tail[0].iov_len = 4 - (resp->count & 3); } return 1; } else return xdr_ressize_check(rqstp, p); } /* WRITE */ int nfs3svc_encode_writeres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_writeres *resp) { struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); p = encode_wcc_data(rqstp, p, &resp->fh); if (resp->status == 0) { *p++ = htonl(resp->count); *p++ = htonl(resp->committed); *p++ = htonl(nn->nfssvc_boot.tv_sec); *p++ = htonl(nn->nfssvc_boot.tv_usec); } return xdr_ressize_check(rqstp, p); } /* CREATE, MKDIR, SYMLINK, MKNOD */ int nfs3svc_encode_createres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_diropres *resp) { if (resp->status == 0) { *p++ = xdr_one; p = encode_fh(p, &resp->fh); p = encode_post_op_attr(rqstp, p, &resp->fh); } p = encode_wcc_data(rqstp, p, &resp->dirfh); return xdr_ressize_check(rqstp, p); } /* RENAME */ int nfs3svc_encode_renameres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_renameres *resp) { p = encode_wcc_data(rqstp, p, &resp->ffh); p = encode_wcc_data(rqstp, p, &resp->tfh); return xdr_ressize_check(rqstp, p); } /* LINK */ int nfs3svc_encode_linkres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_linkres *resp) { p = encode_post_op_attr(rqstp, p, &resp->fh); p = encode_wcc_data(rqstp, p, &resp->tfh); return xdr_ressize_check(rqstp, p); } /* READDIR */ int nfs3svc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readdirres *resp) { p = encode_post_op_attr(rqstp, p, &resp->fh); if (resp->status == 0) { /* stupid readdir cookie */ memcpy(p, resp->verf, 8); p += 2; xdr_ressize_check(rqstp, p); if (rqstp->rq_res.head[0].iov_len + (2<<2) > PAGE_SIZE) return 1; /*No room for trailer */ rqstp->rq_res.page_len = (resp->count) << 2; /* add the 'tail' to the end of the 'head' page - page 0. */ rqstp->rq_res.tail[0].iov_base = p; *p++ = 0; /* no more entries */ *p++ = htonl(resp->common.err == nfserr_eof); rqstp->rq_res.tail[0].iov_len = 2<<2; return 1; } else return xdr_ressize_check(rqstp, p); } static __be32 * encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen, u64 ino) { *p++ = xdr_one; /* mark entry present */ p = xdr_encode_hyper(p, ino); /* file id */ p = xdr_encode_array(p, name, namlen);/* name length & name */ cd->offset = p; /* remember pointer */ p = xdr_encode_hyper(p, NFS_OFFSET_MAX);/* offset of next entry */ return p; } static __be32 compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp, const char *name, int namlen, u64 ino) { struct svc_export *exp; struct dentry *dparent, *dchild; __be32 rv = nfserr_noent; dparent = cd->fh.fh_dentry; exp = cd->fh.fh_export; if (isdotent(name, namlen)) { if (namlen == 2) { dchild = dget_parent(dparent); /* filesystem root - cannot return filehandle for ".." */ if (dchild == dparent) goto out; } else dchild = dget(dparent); } else dchild = lookup_one_len_unlocked(name, dparent, namlen); if (IS_ERR(dchild)) return rv; if (d_mountpoint(dchild)) goto out; if (d_really_is_negative(dchild)) goto out; if (dchild->d_inode->i_ino != ino) goto out; rv = fh_compose(fhp, exp, dchild, &cd->fh); out: dput(dchild); return rv; } static __be32 *encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen, u64 ino) { struct svc_fh *fh = &cd->scratch; __be32 err; fh_init(fh, NFS3_FHSIZE); err = compose_entry_fh(cd, fh, name, namlen, ino); if (err) { *p++ = 0; *p++ = 0; goto out; } p = encode_post_op_attr(cd->rqstp, p, fh); *p++ = xdr_one; /* yes, a file handle follows */ p = encode_fh(p, fh); out: fh_put(fh); return p; } /* * Encode a directory entry. This one works for both normal readdir * and readdirplus. * The normal readdir reply requires 2 (fileid) + 1 (stringlen) * + string + 2 (cookie) + 1 (next) words, i.e. 6 + strlen. * * The readdirplus baggage is 1+21 words for post_op_attr, plus the * file handle. */ #define NFS3_ENTRY_BAGGAGE (2 + 1 + 2 + 1) #define NFS3_ENTRYPLUS_BAGGAGE (1 + 21 + 1 + (NFS3_FHSIZE >> 2)) static int encode_entry(struct readdir_cd *ccd, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type, int plus) { struct nfsd3_readdirres *cd = container_of(ccd, struct nfsd3_readdirres, common); __be32 *p = cd->buffer; caddr_t curr_page_addr = NULL; struct page ** page; int slen; /* string (name) length */ int elen; /* estimated entry length in words */ int num_entry_words = 0; /* actual number of words */ if (cd->offset) { u64 offset64 = offset; if (unlikely(cd->offset1)) { /* we ended up with offset on a page boundary */ *cd->offset = htonl(offset64 >> 32); *cd->offset1 = htonl(offset64 & 0xffffffff); cd->offset1 = NULL; } else { xdr_encode_hyper(cd->offset, offset64); } } /* dprintk("encode_entry(%.*s @%ld%s)\n", namlen, name, (long) offset, plus? " plus" : ""); */ /* truncate filename if too long */ namlen = min(namlen, NFS3_MAXNAMLEN); slen = XDR_QUADLEN(namlen); elen = slen + NFS3_ENTRY_BAGGAGE + (plus? NFS3_ENTRYPLUS_BAGGAGE : 0); if (cd->buflen < elen) { cd->common.err = nfserr_toosmall; return -EINVAL; } /* determine which page in rq_respages[] we are currently filling */ for (page = cd->rqstp->rq_respages + 1; page < cd->rqstp->rq_next_page; page++) { curr_page_addr = page_address(*page); if (((caddr_t)cd->buffer >= curr_page_addr) && ((caddr_t)cd->buffer < curr_page_addr + PAGE_SIZE)) break; } if ((caddr_t)(cd->buffer + elen) < (curr_page_addr + PAGE_SIZE)) { /* encode entry in current page */ p = encode_entry_baggage(cd, p, name, namlen, ino); if (plus) p = encode_entryplus_baggage(cd, p, name, namlen, ino); num_entry_words = p - cd->buffer; } else if (*(page+1) != NULL) { /* temporarily encode entry into next page, then move back to * current and next page in rq_respages[] */ __be32 *p1, *tmp; int len1, len2; /* grab next page for temporary storage of entry */ p1 = tmp = page_address(*(page+1)); p1 = encode_entry_baggage(cd, p1, name, namlen, ino); if (plus) p1 = encode_entryplus_baggage(cd, p1, name, namlen, ino); /* determine entry word length and lengths to go in pages */ num_entry_words = p1 - tmp; len1 = curr_page_addr + PAGE_SIZE - (caddr_t)cd->buffer; if ((num_entry_words << 2) < len1) { /* the actual number of words in the entry is less * than elen and can still fit in the current page */ memmove(p, tmp, num_entry_words << 2); p += num_entry_words; /* update offset */ cd->offset = cd->buffer + (cd->offset - tmp); } else { unsigned int offset_r = (cd->offset - tmp) << 2; /* update pointer to offset location. * This is a 64bit quantity, so we need to * deal with 3 cases: * - entirely in first page * - entirely in second page * - 4 bytes in each page */ if (offset_r + 8 <= len1) { cd->offset = p + (cd->offset - tmp); } else if (offset_r >= len1) { cd->offset -= len1 >> 2; } else { /* sitting on the fence */ BUG_ON(offset_r != len1 - 4); cd->offset = p + (cd->offset - tmp); cd->offset1 = tmp; } len2 = (num_entry_words << 2) - len1; /* move from temp page to current and next pages */ memmove(p, tmp, len1); memmove(tmp, (caddr_t)tmp+len1, len2); p = tmp + (len2 >> 2); } } else { cd->common.err = nfserr_toosmall; return -EINVAL; } cd->buflen -= num_entry_words; cd->buffer = p; cd->common.err = nfs_ok; return 0; } int nfs3svc_encode_entry(void *cd, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { return encode_entry(cd, name, namlen, offset, ino, d_type, 0); } int nfs3svc_encode_entry_plus(void *cd, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { return encode_entry(cd, name, namlen, offset, ino, d_type, 1); } /* FSSTAT */ int nfs3svc_encode_fsstatres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_fsstatres *resp) { struct kstatfs *s = &resp->stats; u64 bs = s->f_bsize; *p++ = xdr_zero; /* no post_op_attr */ if (resp->status == 0) { p = xdr_encode_hyper(p, bs * s->f_blocks); /* total bytes */ p = xdr_encode_hyper(p, bs * s->f_bfree); /* free bytes */ p = xdr_encode_hyper(p, bs * s->f_bavail); /* user available bytes */ p = xdr_encode_hyper(p, s->f_files); /* total inodes */ p = xdr_encode_hyper(p, s->f_ffree); /* free inodes */ p = xdr_encode_hyper(p, s->f_ffree); /* user available inodes */ *p++ = htonl(resp->invarsec); /* mean unchanged time */ } return xdr_ressize_check(rqstp, p); } /* FSINFO */ int nfs3svc_encode_fsinfores(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_fsinfores *resp) { *p++ = xdr_zero; /* no post_op_attr */ if (resp->status == 0) { *p++ = htonl(resp->f_rtmax); *p++ = htonl(resp->f_rtpref); *p++ = htonl(resp->f_rtmult); *p++ = htonl(resp->f_wtmax); *p++ = htonl(resp->f_wtpref); *p++ = htonl(resp->f_wtmult); *p++ = htonl(resp->f_dtpref); p = xdr_encode_hyper(p, resp->f_maxfilesize); *p++ = xdr_one; *p++ = xdr_zero; *p++ = htonl(resp->f_properties); } return xdr_ressize_check(rqstp, p); } /* PATHCONF */ int nfs3svc_encode_pathconfres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_pathconfres *resp) { *p++ = xdr_zero; /* no post_op_attr */ if (resp->status == 0) { *p++ = htonl(resp->p_link_max); *p++ = htonl(resp->p_name_max); *p++ = htonl(resp->p_no_trunc); *p++ = htonl(resp->p_chown_restricted); *p++ = htonl(resp->p_case_insensitive); *p++ = htonl(resp->p_case_preserving); } return xdr_ressize_check(rqstp, p); } /* COMMIT */ int nfs3svc_encode_commitres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_commitres *resp) { struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); p = encode_wcc_data(rqstp, p, &resp->fh); /* Write verifier */ if (resp->status == 0) { *p++ = htonl(nn->nfssvc_boot.tv_sec); *p++ = htonl(nn->nfssvc_boot.tv_usec); } return xdr_ressize_check(rqstp, p); } /* * XDR release functions */ int nfs3svc_release_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_attrstat *resp) { fh_put(&resp->fh); return 1; } int nfs3svc_release_fhandle2(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_fhandle_pair *resp) { fh_put(&resp->fh1); fh_put(&resp->fh2); return 1; }
./CrossVul/dataset_final_sorted/CWE-404/c/good_3351_3
crossvul-cpp_data_bad_3351_5
/* * Copyright (c) 2001 The Regents of the University of Michigan. * All rights reserved. * * Kendrick Smith <kmsmith@umich.edu> * Andy Adamson <kandros@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/file.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/namei.h> #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/ratelimit.h> #include <linux/sunrpc/svcauth_gss.h> #include <linux/sunrpc/addr.h> #include <linux/jhash.h> #include "xdr4.h" #include "xdr4cb.h" #include "vfs.h" #include "current_stateid.h" #include "netns.h" #include "pnfs.h" #define NFSDDBG_FACILITY NFSDDBG_PROC #define all_ones {{~0,~0},~0} static const stateid_t one_stateid = { .si_generation = ~0, .si_opaque = all_ones, }; static const stateid_t zero_stateid = { /* all fields zero */ }; static const stateid_t currentstateid = { .si_generation = 1, }; static u64 current_sessionid = 1; #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t))) /* forward declarations */ static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner); static void nfs4_free_ol_stateid(struct nfs4_stid *stid); /* Locking: */ /* * Currently used for the del_recall_lru and file hash table. In an * effort to decrease the scope of the client_mutex, this spinlock may * eventually cover more: */ static DEFINE_SPINLOCK(state_lock); /* * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for * the refcount on the open stateid to drop. */ static DECLARE_WAIT_QUEUE_HEAD(close_wq); static struct kmem_cache *openowner_slab; static struct kmem_cache *lockowner_slab; static struct kmem_cache *file_slab; static struct kmem_cache *stateid_slab; static struct kmem_cache *deleg_slab; static struct kmem_cache *odstate_slab; static void free_session(struct nfsd4_session *); static const struct nfsd4_callback_ops nfsd4_cb_recall_ops; static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops; static bool is_session_dead(struct nfsd4_session *ses) { return ses->se_flags & NFS4_SESSION_DEAD; } static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me) { if (atomic_read(&ses->se_ref) > ref_held_by_me) return nfserr_jukebox; ses->se_flags |= NFS4_SESSION_DEAD; return nfs_ok; } static bool is_client_expired(struct nfs4_client *clp) { return clp->cl_time == 0; } static __be32 get_client_locked(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); lockdep_assert_held(&nn->client_lock); if (is_client_expired(clp)) return nfserr_expired; atomic_inc(&clp->cl_refcount); return nfs_ok; } /* must be called under the client_lock */ static inline void renew_client_locked(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); if (is_client_expired(clp)) { WARN_ON(1); printk("%s: client (clientid %08x/%08x) already expired\n", __func__, clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); return; } dprintk("renewing client (clientid %08x/%08x)\n", clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); list_move_tail(&clp->cl_lru, &nn->client_lru); clp->cl_time = get_seconds(); } static void put_client_renew_locked(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); lockdep_assert_held(&nn->client_lock); if (!atomic_dec_and_test(&clp->cl_refcount)) return; if (!is_client_expired(clp)) renew_client_locked(clp); } static void put_client_renew(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock)) return; if (!is_client_expired(clp)) renew_client_locked(clp); spin_unlock(&nn->client_lock); } static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses) { __be32 status; if (is_session_dead(ses)) return nfserr_badsession; status = get_client_locked(ses->se_client); if (status) return status; atomic_inc(&ses->se_ref); return nfs_ok; } static void nfsd4_put_session_locked(struct nfsd4_session *ses) { struct nfs4_client *clp = ses->se_client; struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); lockdep_assert_held(&nn->client_lock); if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses)) free_session(ses); put_client_renew_locked(clp); } static void nfsd4_put_session(struct nfsd4_session *ses) { struct nfs4_client *clp = ses->se_client; struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); spin_lock(&nn->client_lock); nfsd4_put_session_locked(ses); spin_unlock(&nn->client_lock); } static struct nfsd4_blocked_lock * find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh, struct nfsd_net *nn) { struct nfsd4_blocked_lock *cur, *found = NULL; spin_lock(&nn->blocked_locks_lock); list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { if (fh_match(fh, &cur->nbl_fh)) { list_del_init(&cur->nbl_list); list_del_init(&cur->nbl_lru); found = cur; break; } } spin_unlock(&nn->blocked_locks_lock); if (found) posix_unblock_lock(&found->nbl_lock); return found; } static struct nfsd4_blocked_lock * find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh, struct nfsd_net *nn) { struct nfsd4_blocked_lock *nbl; nbl = find_blocked_lock(lo, fh, nn); if (!nbl) { nbl= kmalloc(sizeof(*nbl), GFP_KERNEL); if (nbl) { fh_copy_shallow(&nbl->nbl_fh, fh); locks_init_lock(&nbl->nbl_lock); nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client, &nfsd4_cb_notify_lock_ops, NFSPROC4_CLNT_CB_NOTIFY_LOCK); } } return nbl; } static void free_blocked_lock(struct nfsd4_blocked_lock *nbl) { locks_release_private(&nbl->nbl_lock); kfree(nbl); } static int nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task) { /* * Since this is just an optimization, we don't try very hard if it * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and * just quit trying on anything else. */ switch (task->tk_status) { case -NFS4ERR_DELAY: rpc_delay(task, 1 * HZ); return 0; default: return 1; } } static void nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb) { struct nfsd4_blocked_lock *nbl = container_of(cb, struct nfsd4_blocked_lock, nbl_cb); free_blocked_lock(nbl); } static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = { .done = nfsd4_cb_notify_lock_done, .release = nfsd4_cb_notify_lock_release, }; static inline struct nfs4_stateowner * nfs4_get_stateowner(struct nfs4_stateowner *sop) { atomic_inc(&sop->so_count); return sop; } static int same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner) { return (sop->so_owner.len == owner->len) && 0 == memcmp(sop->so_owner.data, owner->data, owner->len); } static struct nfs4_openowner * find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open, struct nfs4_client *clp) { struct nfs4_stateowner *so; lockdep_assert_held(&clp->cl_lock); list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval], so_strhash) { if (!so->so_is_open_owner) continue; if (same_owner_str(so, &open->op_owner)) return openowner(nfs4_get_stateowner(so)); } return NULL; } static struct nfs4_openowner * find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open, struct nfs4_client *clp) { struct nfs4_openowner *oo; spin_lock(&clp->cl_lock); oo = find_openstateowner_str_locked(hashval, open, clp); spin_unlock(&clp->cl_lock); return oo; } static inline u32 opaque_hashval(const void *ptr, int nbytes) { unsigned char *cptr = (unsigned char *) ptr; u32 x = 0; while (nbytes--) { x *= 37; x += *cptr++; } return x; } static void nfsd4_free_file_rcu(struct rcu_head *rcu) { struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu); kmem_cache_free(file_slab, fp); } void put_nfs4_file(struct nfs4_file *fi) { might_lock(&state_lock); if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) { hlist_del_rcu(&fi->fi_hash); spin_unlock(&state_lock); WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate)); WARN_ON_ONCE(!list_empty(&fi->fi_delegations)); call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu); } } static struct file * __nfs4_get_fd(struct nfs4_file *f, int oflag) { if (f->fi_fds[oflag]) return get_file(f->fi_fds[oflag]); return NULL; } static struct file * find_writeable_file_locked(struct nfs4_file *f) { struct file *ret; lockdep_assert_held(&f->fi_lock); ret = __nfs4_get_fd(f, O_WRONLY); if (!ret) ret = __nfs4_get_fd(f, O_RDWR); return ret; } static struct file * find_writeable_file(struct nfs4_file *f) { struct file *ret; spin_lock(&f->fi_lock); ret = find_writeable_file_locked(f); spin_unlock(&f->fi_lock); return ret; } static struct file *find_readable_file_locked(struct nfs4_file *f) { struct file *ret; lockdep_assert_held(&f->fi_lock); ret = __nfs4_get_fd(f, O_RDONLY); if (!ret) ret = __nfs4_get_fd(f, O_RDWR); return ret; } static struct file * find_readable_file(struct nfs4_file *f) { struct file *ret; spin_lock(&f->fi_lock); ret = find_readable_file_locked(f); spin_unlock(&f->fi_lock); return ret; } struct file * find_any_file(struct nfs4_file *f) { struct file *ret; spin_lock(&f->fi_lock); ret = __nfs4_get_fd(f, O_RDWR); if (!ret) { ret = __nfs4_get_fd(f, O_WRONLY); if (!ret) ret = __nfs4_get_fd(f, O_RDONLY); } spin_unlock(&f->fi_lock); return ret; } static atomic_long_t num_delegations; unsigned long max_delegations; /* * Open owner state (share locks) */ /* hash tables for lock and open owners */ #define OWNER_HASH_BITS 8 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) static unsigned int ownerstr_hashval(struct xdr_netobj *ownername) { unsigned int ret; ret = opaque_hashval(ownername->data, ownername->len); return ret & OWNER_HASH_MASK; } /* hash table for nfs4_file */ #define FILE_HASH_BITS 8 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS) static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh) { return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0); } static unsigned int file_hashval(struct knfsd_fh *fh) { return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1); } static struct hlist_head file_hashtbl[FILE_HASH_SIZE]; static void __nfs4_file_get_access(struct nfs4_file *fp, u32 access) { lockdep_assert_held(&fp->fi_lock); if (access & NFS4_SHARE_ACCESS_WRITE) atomic_inc(&fp->fi_access[O_WRONLY]); if (access & NFS4_SHARE_ACCESS_READ) atomic_inc(&fp->fi_access[O_RDONLY]); } static __be32 nfs4_file_get_access(struct nfs4_file *fp, u32 access) { lockdep_assert_held(&fp->fi_lock); /* Does this access mode make sense? */ if (access & ~NFS4_SHARE_ACCESS_BOTH) return nfserr_inval; /* Does it conflict with a deny mode already set? */ if ((access & fp->fi_share_deny) != 0) return nfserr_share_denied; __nfs4_file_get_access(fp, access); return nfs_ok; } static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny) { /* Common case is that there is no deny mode. */ if (deny) { /* Does this deny mode make sense? */ if (deny & ~NFS4_SHARE_DENY_BOTH) return nfserr_inval; if ((deny & NFS4_SHARE_DENY_READ) && atomic_read(&fp->fi_access[O_RDONLY])) return nfserr_share_denied; if ((deny & NFS4_SHARE_DENY_WRITE) && atomic_read(&fp->fi_access[O_WRONLY])) return nfserr_share_denied; } return nfs_ok; } static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) { might_lock(&fp->fi_lock); if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) { struct file *f1 = NULL; struct file *f2 = NULL; swap(f1, fp->fi_fds[oflag]); if (atomic_read(&fp->fi_access[1 - oflag]) == 0) swap(f2, fp->fi_fds[O_RDWR]); spin_unlock(&fp->fi_lock); if (f1) fput(f1); if (f2) fput(f2); } } static void nfs4_file_put_access(struct nfs4_file *fp, u32 access) { WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH); if (access & NFS4_SHARE_ACCESS_WRITE) __nfs4_file_put_access(fp, O_WRONLY); if (access & NFS4_SHARE_ACCESS_READ) __nfs4_file_put_access(fp, O_RDONLY); } /* * Allocate a new open/delegation state counter. This is needed for * pNFS for proper return on close semantics. * * Note that we only allocate it for pNFS-enabled exports, otherwise * all pointers to struct nfs4_clnt_odstate are always NULL. */ static struct nfs4_clnt_odstate * alloc_clnt_odstate(struct nfs4_client *clp) { struct nfs4_clnt_odstate *co; co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL); if (co) { co->co_client = clp; atomic_set(&co->co_odcount, 1); } return co; } static void hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co) { struct nfs4_file *fp = co->co_file; lockdep_assert_held(&fp->fi_lock); list_add(&co->co_perfile, &fp->fi_clnt_odstate); } static inline void get_clnt_odstate(struct nfs4_clnt_odstate *co) { if (co) atomic_inc(&co->co_odcount); } static void put_clnt_odstate(struct nfs4_clnt_odstate *co) { struct nfs4_file *fp; if (!co) return; fp = co->co_file; if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) { list_del(&co->co_perfile); spin_unlock(&fp->fi_lock); nfsd4_return_all_file_layouts(co->co_client, fp); kmem_cache_free(odstate_slab, co); } } static struct nfs4_clnt_odstate * find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new) { struct nfs4_clnt_odstate *co; struct nfs4_client *cl; if (!new) return NULL; cl = new->co_client; spin_lock(&fp->fi_lock); list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { if (co->co_client == cl) { get_clnt_odstate(co); goto out; } } co = new; co->co_file = fp; hash_clnt_odstate_locked(new); out: spin_unlock(&fp->fi_lock); return co; } struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab, void (*sc_free)(struct nfs4_stid *)) { struct nfs4_stid *stid; int new_id; stid = kmem_cache_zalloc(slab, GFP_KERNEL); if (!stid) return NULL; idr_preload(GFP_KERNEL); spin_lock(&cl->cl_lock); new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT); spin_unlock(&cl->cl_lock); idr_preload_end(); if (new_id < 0) goto out_free; stid->sc_free = sc_free; stid->sc_client = cl; stid->sc_stateid.si_opaque.so_id = new_id; stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; /* Will be incremented before return to client: */ atomic_set(&stid->sc_count, 1); spin_lock_init(&stid->sc_lock); /* * It shouldn't be a problem to reuse an opaque stateid value. * I don't think it is for 4.1. But with 4.0 I worry that, for * example, a stray write retransmission could be accepted by * the server when it should have been rejected. Therefore, * adopt a trick from the sctp code to attempt to maximize the * amount of time until an id is reused, by ensuring they always * "increase" (mod INT_MAX): */ return stid; out_free: kmem_cache_free(slab, stid); return NULL; } static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) { struct nfs4_stid *stid; stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid); if (!stid) return NULL; return openlockstateid(stid); } static void nfs4_free_deleg(struct nfs4_stid *stid) { kmem_cache_free(deleg_slab, stid); atomic_long_dec(&num_delegations); } /* * When we recall a delegation, we should be careful not to hand it * out again straight away. * To ensure this we keep a pair of bloom filters ('new' and 'old') * in which the filehandles of recalled delegations are "stored". * If a filehandle appear in either filter, a delegation is blocked. * When a delegation is recalled, the filehandle is stored in the "new" * filter. * Every 30 seconds we swap the filters and clear the "new" one, * unless both are empty of course. * * Each filter is 256 bits. We hash the filehandle to 32bit and use the * low 3 bytes as hash-table indices. * * 'blocked_delegations_lock', which is always taken in block_delegations(), * is used to manage concurrent access. Testing does not need the lock * except when swapping the two filters. */ static DEFINE_SPINLOCK(blocked_delegations_lock); static struct bloom_pair { int entries, old_entries; time_t swap_time; int new; /* index into 'set' */ DECLARE_BITMAP(set[2], 256); } blocked_delegations; static int delegation_blocked(struct knfsd_fh *fh) { u32 hash; struct bloom_pair *bd = &blocked_delegations; if (bd->entries == 0) return 0; if (seconds_since_boot() - bd->swap_time > 30) { spin_lock(&blocked_delegations_lock); if (seconds_since_boot() - bd->swap_time > 30) { bd->entries -= bd->old_entries; bd->old_entries = bd->entries; memset(bd->set[bd->new], 0, sizeof(bd->set[0])); bd->new = 1-bd->new; bd->swap_time = seconds_since_boot(); } spin_unlock(&blocked_delegations_lock); } hash = jhash(&fh->fh_base, fh->fh_size, 0); if (test_bit(hash&255, bd->set[0]) && test_bit((hash>>8)&255, bd->set[0]) && test_bit((hash>>16)&255, bd->set[0])) return 1; if (test_bit(hash&255, bd->set[1]) && test_bit((hash>>8)&255, bd->set[1]) && test_bit((hash>>16)&255, bd->set[1])) return 1; return 0; } static void block_delegations(struct knfsd_fh *fh) { u32 hash; struct bloom_pair *bd = &blocked_delegations; hash = jhash(&fh->fh_base, fh->fh_size, 0); spin_lock(&blocked_delegations_lock); __set_bit(hash&255, bd->set[bd->new]); __set_bit((hash>>8)&255, bd->set[bd->new]); __set_bit((hash>>16)&255, bd->set[bd->new]); if (bd->entries == 0) bd->swap_time = seconds_since_boot(); bd->entries += 1; spin_unlock(&blocked_delegations_lock); } static struct nfs4_delegation * alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh, struct nfs4_clnt_odstate *odstate) { struct nfs4_delegation *dp; long n; dprintk("NFSD alloc_init_deleg\n"); n = atomic_long_inc_return(&num_delegations); if (n < 0 || n > max_delegations) goto out_dec; if (delegation_blocked(&current_fh->fh_handle)) goto out_dec; dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg)); if (dp == NULL) goto out_dec; /* * delegation seqid's are never incremented. The 4.1 special * meaning of seqid 0 isn't meaningful, really, but let's avoid * 0 anyway just for consistency and use 1: */ dp->dl_stid.sc_stateid.si_generation = 1; INIT_LIST_HEAD(&dp->dl_perfile); INIT_LIST_HEAD(&dp->dl_perclnt); INIT_LIST_HEAD(&dp->dl_recall_lru); dp->dl_clnt_odstate = odstate; get_clnt_odstate(odstate); dp->dl_type = NFS4_OPEN_DELEGATE_READ; dp->dl_retries = 1; nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL); return dp; out_dec: atomic_long_dec(&num_delegations); return NULL; } void nfs4_put_stid(struct nfs4_stid *s) { struct nfs4_file *fp = s->sc_file; struct nfs4_client *clp = s->sc_client; might_lock(&clp->cl_lock); if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) { wake_up_all(&close_wq); return; } idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); spin_unlock(&clp->cl_lock); s->sc_free(s); if (fp) put_nfs4_file(fp); } void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid) { stateid_t *src = &stid->sc_stateid; spin_lock(&stid->sc_lock); if (unlikely(++src->si_generation == 0)) src->si_generation = 1; memcpy(dst, src, sizeof(*dst)); spin_unlock(&stid->sc_lock); } static void nfs4_put_deleg_lease(struct nfs4_file *fp) { struct file *filp = NULL; spin_lock(&fp->fi_lock); if (fp->fi_deleg_file && --fp->fi_delegees == 0) swap(filp, fp->fi_deleg_file); spin_unlock(&fp->fi_lock); if (filp) { vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp); fput(filp); } } void nfs4_unhash_stid(struct nfs4_stid *s) { s->sc_type = 0; } /** * nfs4_get_existing_delegation - Discover if this delegation already exists * @clp: a pointer to the nfs4_client we're granting a delegation to * @fp: a pointer to the nfs4_file we're granting a delegation on * * Return: * On success: NULL if an existing delegation was not found. * * On error: -EAGAIN if one was previously granted to this nfs4_client * for this nfs4_file. * */ static int nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp) { struct nfs4_delegation *searchdp = NULL; struct nfs4_client *searchclp = NULL; lockdep_assert_held(&state_lock); lockdep_assert_held(&fp->fi_lock); list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) { searchclp = searchdp->dl_stid.sc_client; if (clp == searchclp) { return -EAGAIN; } } return 0; } /** * hash_delegation_locked - Add a delegation to the appropriate lists * @dp: a pointer to the nfs4_delegation we are adding. * @fp: a pointer to the nfs4_file we're granting a delegation on * * Return: * On success: NULL if the delegation was successfully hashed. * * On error: -EAGAIN if one was previously granted to this * nfs4_client for this nfs4_file. Delegation is not hashed. * */ static int hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) { int status; struct nfs4_client *clp = dp->dl_stid.sc_client; lockdep_assert_held(&state_lock); lockdep_assert_held(&fp->fi_lock); status = nfs4_get_existing_delegation(clp, fp); if (status) return status; ++fp->fi_delegees; atomic_inc(&dp->dl_stid.sc_count); dp->dl_stid.sc_type = NFS4_DELEG_STID; list_add(&dp->dl_perfile, &fp->fi_delegations); list_add(&dp->dl_perclnt, &clp->cl_delegations); return 0; } static bool unhash_delegation_locked(struct nfs4_delegation *dp) { struct nfs4_file *fp = dp->dl_stid.sc_file; lockdep_assert_held(&state_lock); if (list_empty(&dp->dl_perfile)) return false; dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID; /* Ensure that deleg break won't try to requeue it */ ++dp->dl_time; spin_lock(&fp->fi_lock); list_del_init(&dp->dl_perclnt); list_del_init(&dp->dl_recall_lru); list_del_init(&dp->dl_perfile); spin_unlock(&fp->fi_lock); return true; } static void destroy_delegation(struct nfs4_delegation *dp) { bool unhashed; spin_lock(&state_lock); unhashed = unhash_delegation_locked(dp); spin_unlock(&state_lock); if (unhashed) { put_clnt_odstate(dp->dl_clnt_odstate); nfs4_put_deleg_lease(dp->dl_stid.sc_file); nfs4_put_stid(&dp->dl_stid); } } static void revoke_delegation(struct nfs4_delegation *dp) { struct nfs4_client *clp = dp->dl_stid.sc_client; WARN_ON(!list_empty(&dp->dl_recall_lru)); put_clnt_odstate(dp->dl_clnt_odstate); nfs4_put_deleg_lease(dp->dl_stid.sc_file); if (clp->cl_minorversion == 0) nfs4_put_stid(&dp->dl_stid); else { dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; spin_lock(&clp->cl_lock); list_add(&dp->dl_recall_lru, &clp->cl_revoked); spin_unlock(&clp->cl_lock); } } /* * SETCLIENTID state */ static unsigned int clientid_hashval(u32 id) { return id & CLIENT_HASH_MASK; } static unsigned int clientstr_hashval(const char *name) { return opaque_hashval(name, 8) & CLIENT_HASH_MASK; } /* * We store the NONE, READ, WRITE, and BOTH bits separately in the * st_{access,deny}_bmap field of the stateid, in order to track not * only what share bits are currently in force, but also what * combinations of share bits previous opens have used. This allows us * to enforce the recommendation of rfc 3530 14.2.19 that the server * return an error if the client attempt to downgrade to a combination * of share bits not explicable by closing some of its previous opens. * * XXX: This enforcement is actually incomplete, since we don't keep * track of access/deny bit combinations; so, e.g., we allow: * * OPEN allow read, deny write * OPEN allow both, deny none * DOWNGRADE allow read, deny none * * which we should reject. */ static unsigned int bmap_to_share_mode(unsigned long bmap) { int i; unsigned int access = 0; for (i = 1; i < 4; i++) { if (test_bit(i, &bmap)) access |= i; } return access; } /* set share access for a given stateid */ static inline void set_access(u32 access, struct nfs4_ol_stateid *stp) { unsigned char mask = 1 << access; WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); stp->st_access_bmap |= mask; } /* clear share access for a given stateid */ static inline void clear_access(u32 access, struct nfs4_ol_stateid *stp) { unsigned char mask = 1 << access; WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); stp->st_access_bmap &= ~mask; } /* test whether a given stateid has access */ static inline bool test_access(u32 access, struct nfs4_ol_stateid *stp) { unsigned char mask = 1 << access; return (bool)(stp->st_access_bmap & mask); } /* set share deny for a given stateid */ static inline void set_deny(u32 deny, struct nfs4_ol_stateid *stp) { unsigned char mask = 1 << deny; WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); stp->st_deny_bmap |= mask; } /* clear share deny for a given stateid */ static inline void clear_deny(u32 deny, struct nfs4_ol_stateid *stp) { unsigned char mask = 1 << deny; WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); stp->st_deny_bmap &= ~mask; } /* test whether a given stateid is denying specific access */ static inline bool test_deny(u32 deny, struct nfs4_ol_stateid *stp) { unsigned char mask = 1 << deny; return (bool)(stp->st_deny_bmap & mask); } static int nfs4_access_to_omode(u32 access) { switch (access & NFS4_SHARE_ACCESS_BOTH) { case NFS4_SHARE_ACCESS_READ: return O_RDONLY; case NFS4_SHARE_ACCESS_WRITE: return O_WRONLY; case NFS4_SHARE_ACCESS_BOTH: return O_RDWR; } WARN_ON_ONCE(1); return O_RDONLY; } /* * A stateid that had a deny mode associated with it is being released * or downgraded. Recalculate the deny mode on the file. */ static void recalculate_deny_mode(struct nfs4_file *fp) { struct nfs4_ol_stateid *stp; spin_lock(&fp->fi_lock); fp->fi_share_deny = 0; list_for_each_entry(stp, &fp->fi_stateids, st_perfile) fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap); spin_unlock(&fp->fi_lock); } static void reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp) { int i; bool change = false; for (i = 1; i < 4; i++) { if ((i & deny) != i) { change = true; clear_deny(i, stp); } } /* Recalculate per-file deny mode if there was a change */ if (change) recalculate_deny_mode(stp->st_stid.sc_file); } /* release all access and file references for a given stateid */ static void release_all_access(struct nfs4_ol_stateid *stp) { int i; struct nfs4_file *fp = stp->st_stid.sc_file; if (fp && stp->st_deny_bmap != 0) recalculate_deny_mode(fp); for (i = 1; i < 4; i++) { if (test_access(i, stp)) nfs4_file_put_access(stp->st_stid.sc_file, i); clear_access(i, stp); } } static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop) { kfree(sop->so_owner.data); sop->so_ops->so_free(sop); } static void nfs4_put_stateowner(struct nfs4_stateowner *sop) { struct nfs4_client *clp = sop->so_client; might_lock(&clp->cl_lock); if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock)) return; sop->so_ops->so_unhash(sop); spin_unlock(&clp->cl_lock); nfs4_free_stateowner(sop); } static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp) { struct nfs4_file *fp = stp->st_stid.sc_file; lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock); if (list_empty(&stp->st_perfile)) return false; spin_lock(&fp->fi_lock); list_del_init(&stp->st_perfile); spin_unlock(&fp->fi_lock); list_del(&stp->st_perstateowner); return true; } static void nfs4_free_ol_stateid(struct nfs4_stid *stid) { struct nfs4_ol_stateid *stp = openlockstateid(stid); put_clnt_odstate(stp->st_clnt_odstate); release_all_access(stp); if (stp->st_stateowner) nfs4_put_stateowner(stp->st_stateowner); kmem_cache_free(stateid_slab, stid); } static void nfs4_free_lock_stateid(struct nfs4_stid *stid) { struct nfs4_ol_stateid *stp = openlockstateid(stid); struct nfs4_lockowner *lo = lockowner(stp->st_stateowner); struct file *file; file = find_any_file(stp->st_stid.sc_file); if (file) filp_close(file, (fl_owner_t)lo); nfs4_free_ol_stateid(stid); } /* * Put the persistent reference to an already unhashed generic stateid, while * holding the cl_lock. If it's the last reference, then put it onto the * reaplist for later destruction. */ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp, struct list_head *reaplist) { struct nfs4_stid *s = &stp->st_stid; struct nfs4_client *clp = s->sc_client; lockdep_assert_held(&clp->cl_lock); WARN_ON_ONCE(!list_empty(&stp->st_locks)); if (!atomic_dec_and_test(&s->sc_count)) { wake_up_all(&close_wq); return; } idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); list_add(&stp->st_locks, reaplist); } static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) { lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); list_del_init(&stp->st_locks); nfs4_unhash_stid(&stp->st_stid); return unhash_ol_stateid(stp); } static void release_lock_stateid(struct nfs4_ol_stateid *stp) { struct nfs4_client *clp = stp->st_stid.sc_client; bool unhashed; spin_lock(&clp->cl_lock); unhashed = unhash_lock_stateid(stp); spin_unlock(&clp->cl_lock); if (unhashed) nfs4_put_stid(&stp->st_stid); } static void unhash_lockowner_locked(struct nfs4_lockowner *lo) { struct nfs4_client *clp = lo->lo_owner.so_client; lockdep_assert_held(&clp->cl_lock); list_del_init(&lo->lo_owner.so_strhash); } /* * Free a list of generic stateids that were collected earlier after being * fully unhashed. */ static void free_ol_stateid_reaplist(struct list_head *reaplist) { struct nfs4_ol_stateid *stp; struct nfs4_file *fp; might_sleep(); while (!list_empty(reaplist)) { stp = list_first_entry(reaplist, struct nfs4_ol_stateid, st_locks); list_del(&stp->st_locks); fp = stp->st_stid.sc_file; stp->st_stid.sc_free(&stp->st_stid); if (fp) put_nfs4_file(fp); } } static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, struct list_head *reaplist) { struct nfs4_ol_stateid *stp; lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock); while (!list_empty(&open_stp->st_locks)) { stp = list_entry(open_stp->st_locks.next, struct nfs4_ol_stateid, st_locks); WARN_ON(!unhash_lock_stateid(stp)); put_ol_stateid_locked(stp, reaplist); } } static bool unhash_open_stateid(struct nfs4_ol_stateid *stp, struct list_head *reaplist) { bool unhashed; lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); unhashed = unhash_ol_stateid(stp); release_open_stateid_locks(stp, reaplist); return unhashed; } static void release_open_stateid(struct nfs4_ol_stateid *stp) { LIST_HEAD(reaplist); spin_lock(&stp->st_stid.sc_client->cl_lock); if (unhash_open_stateid(stp, &reaplist)) put_ol_stateid_locked(stp, &reaplist); spin_unlock(&stp->st_stid.sc_client->cl_lock); free_ol_stateid_reaplist(&reaplist); } static void unhash_openowner_locked(struct nfs4_openowner *oo) { struct nfs4_client *clp = oo->oo_owner.so_client; lockdep_assert_held(&clp->cl_lock); list_del_init(&oo->oo_owner.so_strhash); list_del_init(&oo->oo_perclient); } static void release_last_closed_stateid(struct nfs4_openowner *oo) { struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net, nfsd_net_id); struct nfs4_ol_stateid *s; spin_lock(&nn->client_lock); s = oo->oo_last_closed_stid; if (s) { list_del_init(&oo->oo_close_lru); oo->oo_last_closed_stid = NULL; } spin_unlock(&nn->client_lock); if (s) nfs4_put_stid(&s->st_stid); } static void release_openowner(struct nfs4_openowner *oo) { struct nfs4_ol_stateid *stp; struct nfs4_client *clp = oo->oo_owner.so_client; struct list_head reaplist; INIT_LIST_HEAD(&reaplist); spin_lock(&clp->cl_lock); unhash_openowner_locked(oo); while (!list_empty(&oo->oo_owner.so_stateids)) { stp = list_first_entry(&oo->oo_owner.so_stateids, struct nfs4_ol_stateid, st_perstateowner); if (unhash_open_stateid(stp, &reaplist)) put_ol_stateid_locked(stp, &reaplist); } spin_unlock(&clp->cl_lock); free_ol_stateid_reaplist(&reaplist); release_last_closed_stateid(oo); nfs4_put_stateowner(&oo->oo_owner); } static inline int hash_sessionid(struct nfs4_sessionid *sessionid) { struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid; return sid->sequence % SESSION_HASH_SIZE; } #ifdef CONFIG_SUNRPC_DEBUG static inline void dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) { u32 *ptr = (u32 *)(&sessionid->data[0]); dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]); } #else static inline void dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) { } #endif /* * Bump the seqid on cstate->replay_owner, and clear replay_owner if it * won't be used for replay. */ void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr) { struct nfs4_stateowner *so = cstate->replay_owner; if (nfserr == nfserr_replay_me) return; if (!seqid_mutating_err(ntohl(nfserr))) { nfsd4_cstate_clear_replay(cstate); return; } if (!so) return; if (so->so_is_open_owner) release_last_closed_stateid(openowner(so)); so->so_seqid++; return; } static void gen_sessionid(struct nfsd4_session *ses) { struct nfs4_client *clp = ses->se_client; struct nfsd4_sessionid *sid; sid = (struct nfsd4_sessionid *)ses->se_sessionid.data; sid->clientid = clp->cl_clientid; sid->sequence = current_sessionid++; sid->reserved = 0; } /* * The protocol defines ca_maxresponssize_cached to include the size of * the rpc header, but all we need to cache is the data starting after * the end of the initial SEQUENCE operation--the rest we regenerate * each time. Therefore we can advertise a ca_maxresponssize_cached * value that is the number of bytes in our cache plus a few additional * bytes. In order to stay on the safe side, and not promise more than * we can cache, those additional bytes must be the minimum possible: 24 * bytes of rpc header (xid through accept state, with AUTH_NULL * verifier), 12 for the compound header (with zero-length tag), and 44 * for the SEQUENCE op response: */ #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) static void free_session_slots(struct nfsd4_session *ses) { int i; for (i = 0; i < ses->se_fchannel.maxreqs; i++) kfree(ses->se_slots[i]); } /* * We don't actually need to cache the rpc and session headers, so we * can allocate a little less for each slot: */ static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca) { u32 size; if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ) size = 0; else size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; return size + sizeof(struct nfsd4_slot); } /* * XXX: If we run out of reserved DRC memory we could (up to a point) * re-negotiate active sessions and reduce their slot usage to make * room for new connections. For now we just fail the create session. */ static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca) { u32 slotsize = slot_bytes(ca); u32 num = ca->maxreqs; int avail; spin_lock(&nfsd_drc_lock); avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, nfsd_drc_max_mem - nfsd_drc_mem_used); num = min_t(int, num, avail / slotsize); nfsd_drc_mem_used += num * slotsize; spin_unlock(&nfsd_drc_lock); return num; } static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca) { int slotsize = slot_bytes(ca); spin_lock(&nfsd_drc_lock); nfsd_drc_mem_used -= slotsize * ca->maxreqs; spin_unlock(&nfsd_drc_lock); } static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs, struct nfsd4_channel_attrs *battrs) { int numslots = fattrs->maxreqs; int slotsize = slot_bytes(fattrs); struct nfsd4_session *new; int mem, i; BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *) + sizeof(struct nfsd4_session) > PAGE_SIZE); mem = numslots * sizeof(struct nfsd4_slot *); new = kzalloc(sizeof(*new) + mem, GFP_KERNEL); if (!new) return NULL; /* allocate each struct nfsd4_slot and data cache in one piece */ for (i = 0; i < numslots; i++) { new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL); if (!new->se_slots[i]) goto out_free; } memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs)); memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs)); return new; out_free: while (i--) kfree(new->se_slots[i]); kfree(new); return NULL; } static void free_conn(struct nfsd4_conn *c) { svc_xprt_put(c->cn_xprt); kfree(c); } static void nfsd4_conn_lost(struct svc_xpt_user *u) { struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user); struct nfs4_client *clp = c->cn_session->se_client; spin_lock(&clp->cl_lock); if (!list_empty(&c->cn_persession)) { list_del(&c->cn_persession); free_conn(c); } nfsd4_probe_callback(clp); spin_unlock(&clp->cl_lock); } static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) { struct nfsd4_conn *conn; conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL); if (!conn) return NULL; svc_xprt_get(rqstp->rq_xprt); conn->cn_xprt = rqstp->rq_xprt; conn->cn_flags = flags; INIT_LIST_HEAD(&conn->cn_xpt_user.list); return conn; } static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) { conn->cn_session = ses; list_add(&conn->cn_persession, &ses->se_conns); } static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) { struct nfs4_client *clp = ses->se_client; spin_lock(&clp->cl_lock); __nfsd4_hash_conn(conn, ses); spin_unlock(&clp->cl_lock); } static int nfsd4_register_conn(struct nfsd4_conn *conn) { conn->cn_xpt_user.callback = nfsd4_conn_lost; return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); } static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses) { int ret; nfsd4_hash_conn(conn, ses); ret = nfsd4_register_conn(conn); if (ret) /* oops; xprt is already down: */ nfsd4_conn_lost(&conn->cn_xpt_user); /* We may have gained or lost a callback channel: */ nfsd4_probe_callback_sync(ses->se_client); } static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses) { u32 dir = NFS4_CDFC4_FORE; if (cses->flags & SESSION4_BACK_CHAN) dir |= NFS4_CDFC4_BACK; return alloc_conn(rqstp, dir); } /* must be called under client_lock */ static void nfsd4_del_conns(struct nfsd4_session *s) { struct nfs4_client *clp = s->se_client; struct nfsd4_conn *c; spin_lock(&clp->cl_lock); while (!list_empty(&s->se_conns)) { c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); list_del_init(&c->cn_persession); spin_unlock(&clp->cl_lock); unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); free_conn(c); spin_lock(&clp->cl_lock); } spin_unlock(&clp->cl_lock); } static void __free_session(struct nfsd4_session *ses) { free_session_slots(ses); kfree(ses); } static void free_session(struct nfsd4_session *ses) { nfsd4_del_conns(ses); nfsd4_put_drc_mem(&ses->se_fchannel); __free_session(ses); } static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses) { int idx; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); new->se_client = clp; gen_sessionid(new); INIT_LIST_HEAD(&new->se_conns); new->se_cb_seq_nr = 1; new->se_flags = cses->flags; new->se_cb_prog = cses->callback_prog; new->se_cb_sec = cses->cb_sec; atomic_set(&new->se_ref, 0); idx = hash_sessionid(&new->se_sessionid); list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); spin_lock(&clp->cl_lock); list_add(&new->se_perclnt, &clp->cl_sessions); spin_unlock(&clp->cl_lock); { struct sockaddr *sa = svc_addr(rqstp); /* * This is a little silly; with sessions there's no real * use for the callback address. Use the peer address * as a reasonable default for now, but consider fixing * the rpc client not to require an address in the * future: */ rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); } } /* caller must hold client_lock */ static struct nfsd4_session * __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net) { struct nfsd4_session *elem; int idx; struct nfsd_net *nn = net_generic(net, nfsd_net_id); lockdep_assert_held(&nn->client_lock); dump_sessionid(__func__, sessionid); idx = hash_sessionid(sessionid); /* Search in the appropriate list */ list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) { if (!memcmp(elem->se_sessionid.data, sessionid->data, NFS4_MAX_SESSIONID_LEN)) { return elem; } } dprintk("%s: session not found\n", __func__); return NULL; } static struct nfsd4_session * find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net, __be32 *ret) { struct nfsd4_session *session; __be32 status = nfserr_badsession; session = __find_in_sessionid_hashtbl(sessionid, net); if (!session) goto out; status = nfsd4_get_session_locked(session); if (status) session = NULL; out: *ret = status; return session; } /* caller must hold client_lock */ static void unhash_session(struct nfsd4_session *ses) { struct nfs4_client *clp = ses->se_client; struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); lockdep_assert_held(&nn->client_lock); list_del(&ses->se_hash); spin_lock(&ses->se_client->cl_lock); list_del(&ses->se_perclnt); spin_unlock(&ses->se_client->cl_lock); } /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ static int STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn) { /* * We're assuming the clid was not given out from a boot * precisely 2^32 (about 136 years) before this one. That seems * a safe assumption: */ if (clid->cl_boot == (u32)nn->boot_time) return 0; dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n", clid->cl_boot, clid->cl_id, nn->boot_time); return 1; } /* * XXX Should we use a slab cache ? * This type of memory management is somewhat inefficient, but we use it * anyway since SETCLIENTID is not a common operation. */ static struct nfs4_client *alloc_client(struct xdr_netobj name) { struct nfs4_client *clp; int i; clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL); if (clp == NULL) return NULL; clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL); if (clp->cl_name.data == NULL) goto err_no_name; clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) * OWNER_HASH_SIZE, GFP_KERNEL); if (!clp->cl_ownerstr_hashtbl) goto err_no_hashtbl; for (i = 0; i < OWNER_HASH_SIZE; i++) INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]); clp->cl_name.len = name.len; INIT_LIST_HEAD(&clp->cl_sessions); idr_init(&clp->cl_stateids); atomic_set(&clp->cl_refcount, 0); clp->cl_cb_state = NFSD4_CB_UNKNOWN; INIT_LIST_HEAD(&clp->cl_idhash); INIT_LIST_HEAD(&clp->cl_openowners); INIT_LIST_HEAD(&clp->cl_delegations); INIT_LIST_HEAD(&clp->cl_lru); INIT_LIST_HEAD(&clp->cl_revoked); #ifdef CONFIG_NFSD_PNFS INIT_LIST_HEAD(&clp->cl_lo_states); #endif spin_lock_init(&clp->cl_lock); rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); return clp; err_no_hashtbl: kfree(clp->cl_name.data); err_no_name: kfree(clp); return NULL; } static void free_client(struct nfs4_client *clp) { while (!list_empty(&clp->cl_sessions)) { struct nfsd4_session *ses; ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, se_perclnt); list_del(&ses->se_perclnt); WARN_ON_ONCE(atomic_read(&ses->se_ref)); free_session(ses); } rpc_destroy_wait_queue(&clp->cl_cb_waitq); free_svc_cred(&clp->cl_cred); kfree(clp->cl_ownerstr_hashtbl); kfree(clp->cl_name.data); idr_destroy(&clp->cl_stateids); kfree(clp); } /* must be called under the client_lock */ static void unhash_client_locked(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); struct nfsd4_session *ses; lockdep_assert_held(&nn->client_lock); /* Mark the client as expired! */ clp->cl_time = 0; /* Make it invisible */ if (!list_empty(&clp->cl_idhash)) { list_del_init(&clp->cl_idhash); if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) rb_erase(&clp->cl_namenode, &nn->conf_name_tree); else rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); } list_del_init(&clp->cl_lru); spin_lock(&clp->cl_lock); list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) list_del_init(&ses->se_hash); spin_unlock(&clp->cl_lock); } static void unhash_client(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); spin_lock(&nn->client_lock); unhash_client_locked(clp); spin_unlock(&nn->client_lock); } static __be32 mark_client_expired_locked(struct nfs4_client *clp) { if (atomic_read(&clp->cl_refcount)) return nfserr_jukebox; unhash_client_locked(clp); return nfs_ok; } static void __destroy_client(struct nfs4_client *clp) { struct nfs4_openowner *oo; struct nfs4_delegation *dp; struct list_head reaplist; INIT_LIST_HEAD(&reaplist); spin_lock(&state_lock); while (!list_empty(&clp->cl_delegations)) { dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); WARN_ON(!unhash_delegation_locked(dp)); list_add(&dp->dl_recall_lru, &reaplist); } spin_unlock(&state_lock); while (!list_empty(&reaplist)) { dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); list_del_init(&dp->dl_recall_lru); put_clnt_odstate(dp->dl_clnt_odstate); nfs4_put_deleg_lease(dp->dl_stid.sc_file); nfs4_put_stid(&dp->dl_stid); } while (!list_empty(&clp->cl_revoked)) { dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru); list_del_init(&dp->dl_recall_lru); nfs4_put_stid(&dp->dl_stid); } while (!list_empty(&clp->cl_openowners)) { oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); nfs4_get_stateowner(&oo->oo_owner); release_openowner(oo); } nfsd4_return_all_client_layouts(clp); nfsd4_shutdown_callback(clp); if (clp->cl_cb_conn.cb_xprt) svc_xprt_put(clp->cl_cb_conn.cb_xprt); free_client(clp); } static void destroy_client(struct nfs4_client *clp) { unhash_client(clp); __destroy_client(clp); } static void expire_client(struct nfs4_client *clp) { unhash_client(clp); nfsd4_client_record_remove(clp); __destroy_client(clp); } static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) { memcpy(target->cl_verifier.data, source->data, sizeof(target->cl_verifier.data)); } static void copy_clid(struct nfs4_client *target, struct nfs4_client *source) { target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; target->cl_clientid.cl_id = source->cl_clientid.cl_id; } int strdup_if_nonnull(char **target, char *source) { if (source) { *target = kstrdup(source, GFP_KERNEL); if (!*target) return -ENOMEM; } else *target = NULL; return 0; } static int copy_cred(struct svc_cred *target, struct svc_cred *source) { int ret; ret = strdup_if_nonnull(&target->cr_principal, source->cr_principal); if (ret) return ret; ret = strdup_if_nonnull(&target->cr_raw_principal, source->cr_raw_principal); if (ret) return ret; target->cr_flavor = source->cr_flavor; target->cr_uid = source->cr_uid; target->cr_gid = source->cr_gid; target->cr_group_info = source->cr_group_info; get_group_info(target->cr_group_info); target->cr_gss_mech = source->cr_gss_mech; if (source->cr_gss_mech) gss_mech_get(source->cr_gss_mech); return 0; } static int compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2) { if (o1->len < o2->len) return -1; if (o1->len > o2->len) return 1; return memcmp(o1->data, o2->data, o1->len); } static int same_name(const char *n1, const char *n2) { return 0 == memcmp(n1, n2, HEXDIR_LEN); } static int same_verf(nfs4_verifier *v1, nfs4_verifier *v2) { return 0 == memcmp(v1->data, v2->data, sizeof(v1->data)); } static int same_clid(clientid_t *cl1, clientid_t *cl2) { return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); } static bool groups_equal(struct group_info *g1, struct group_info *g2) { int i; if (g1->ngroups != g2->ngroups) return false; for (i=0; i<g1->ngroups; i++) if (!gid_eq(g1->gid[i], g2->gid[i])) return false; return true; } /* * RFC 3530 language requires clid_inuse be returned when the * "principal" associated with a requests differs from that previously * used. We use uid, gid's, and gss principal string as our best * approximation. We also don't want to allow non-gss use of a client * established using gss: in theory cr_principal should catch that * change, but in practice cr_principal can be null even in the gss case * since gssd doesn't always pass down a principal string. */ static bool is_gss_cred(struct svc_cred *cr) { /* Is cr_flavor one of the gss "pseudoflavors"?: */ return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR); } static bool same_creds(struct svc_cred *cr1, struct svc_cred *cr2) { if ((is_gss_cred(cr1) != is_gss_cred(cr2)) || (!uid_eq(cr1->cr_uid, cr2->cr_uid)) || (!gid_eq(cr1->cr_gid, cr2->cr_gid)) || !groups_equal(cr1->cr_group_info, cr2->cr_group_info)) return false; if (cr1->cr_principal == cr2->cr_principal) return true; if (!cr1->cr_principal || !cr2->cr_principal) return false; return 0 == strcmp(cr1->cr_principal, cr2->cr_principal); } static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp) { struct svc_cred *cr = &rqstp->rq_cred; u32 service; if (!cr->cr_gss_mech) return false; service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor); return service == RPC_GSS_SVC_INTEGRITY || service == RPC_GSS_SVC_PRIVACY; } bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp) { struct svc_cred *cr = &rqstp->rq_cred; if (!cl->cl_mach_cred) return true; if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech) return false; if (!svc_rqst_integrity_protected(rqstp)) return false; if (cl->cl_cred.cr_raw_principal) return 0 == strcmp(cl->cl_cred.cr_raw_principal, cr->cr_raw_principal); if (!cr->cr_principal) return false; return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal); } static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn) { __be32 verf[2]; /* * This is opaque to client, so no need to byte-swap. Use * __force to keep sparse happy */ verf[0] = (__force __be32)get_seconds(); verf[1] = (__force __be32)nn->clverifier_counter++; memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); } static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn) { clp->cl_clientid.cl_boot = nn->boot_time; clp->cl_clientid.cl_id = nn->clientid_counter++; gen_confirm(clp, nn); } static struct nfs4_stid * find_stateid_locked(struct nfs4_client *cl, stateid_t *t) { struct nfs4_stid *ret; ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id); if (!ret || !ret->sc_type) return NULL; return ret; } static struct nfs4_stid * find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) { struct nfs4_stid *s; spin_lock(&cl->cl_lock); s = find_stateid_locked(cl, t); if (s != NULL) { if (typemask & s->sc_type) atomic_inc(&s->sc_count); else s = NULL; } spin_unlock(&cl->cl_lock); return s; } static struct nfs4_client *create_client(struct xdr_netobj name, struct svc_rqst *rqstp, nfs4_verifier *verf) { struct nfs4_client *clp; struct sockaddr *sa = svc_addr(rqstp); int ret; struct net *net = SVC_NET(rqstp); clp = alloc_client(name); if (clp == NULL) return NULL; ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); if (ret) { free_client(clp); return NULL; } nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL); clp->cl_time = get_seconds(); clear_bit(0, &clp->cl_cb_slot_busy); copy_verf(clp, verf); rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); clp->cl_cb_session = NULL; clp->net = net; return clp; } static void add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root) { struct rb_node **new = &(root->rb_node), *parent = NULL; struct nfs4_client *clp; while (*new) { clp = rb_entry(*new, struct nfs4_client, cl_namenode); parent = *new; if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0) new = &((*new)->rb_left); else new = &((*new)->rb_right); } rb_link_node(&new_clp->cl_namenode, parent, new); rb_insert_color(&new_clp->cl_namenode, root); } static struct nfs4_client * find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root) { int cmp; struct rb_node *node = root->rb_node; struct nfs4_client *clp; while (node) { clp = rb_entry(node, struct nfs4_client, cl_namenode); cmp = compare_blob(&clp->cl_name, name); if (cmp > 0) node = node->rb_left; else if (cmp < 0) node = node->rb_right; else return clp; } return NULL; } static void add_to_unconfirmed(struct nfs4_client *clp) { unsigned int idhashval; struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); lockdep_assert_held(&nn->client_lock); clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); add_clp_to_name_tree(clp, &nn->unconf_name_tree); idhashval = clientid_hashval(clp->cl_clientid.cl_id); list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]); renew_client_locked(clp); } static void move_to_confirmed(struct nfs4_client *clp) { unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); lockdep_assert_held(&nn->client_lock); dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp); list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]); rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); add_clp_to_name_tree(clp, &nn->conf_name_tree); set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); renew_client_locked(clp); } static struct nfs4_client * find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions) { struct nfs4_client *clp; unsigned int idhashval = clientid_hashval(clid->cl_id); list_for_each_entry(clp, &tbl[idhashval], cl_idhash) { if (same_clid(&clp->cl_clientid, clid)) { if ((bool)clp->cl_minorversion != sessions) return NULL; renew_client_locked(clp); return clp; } } return NULL; } static struct nfs4_client * find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) { struct list_head *tbl = nn->conf_id_hashtbl; lockdep_assert_held(&nn->client_lock); return find_client_in_id_table(tbl, clid, sessions); } static struct nfs4_client * find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) { struct list_head *tbl = nn->unconf_id_hashtbl; lockdep_assert_held(&nn->client_lock); return find_client_in_id_table(tbl, clid, sessions); } static bool clp_used_exchangeid(struct nfs4_client *clp) { return clp->cl_exchange_flags != 0; } static struct nfs4_client * find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) { lockdep_assert_held(&nn->client_lock); return find_clp_in_name_tree(name, &nn->conf_name_tree); } static struct nfs4_client * find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) { lockdep_assert_held(&nn->client_lock); return find_clp_in_name_tree(name, &nn->unconf_name_tree); } static void gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp) { struct nfs4_cb_conn *conn = &clp->cl_cb_conn; struct sockaddr *sa = svc_addr(rqstp); u32 scopeid = rpc_get_scope_id(sa); unsigned short expected_family; /* Currently, we only support tcp and tcp6 for the callback channel */ if (se->se_callback_netid_len == 3 && !memcmp(se->se_callback_netid_val, "tcp", 3)) expected_family = AF_INET; else if (se->se_callback_netid_len == 4 && !memcmp(se->se_callback_netid_val, "tcp6", 4)) expected_family = AF_INET6; else goto out_err; conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val, se->se_callback_addr_len, (struct sockaddr *)&conn->cb_addr, sizeof(conn->cb_addr)); if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) goto out_err; if (conn->cb_addr.ss_family == AF_INET6) ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; conn->cb_prog = se->se_callback_prog; conn->cb_ident = se->se_callback_ident; memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); return; out_err: conn->cb_addr.ss_family = AF_UNSPEC; conn->cb_addrlen = 0; dprintk("NFSD: this client (clientid %08x/%08x) " "will not receive delegations\n", clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); return; } /* * Cache a reply. nfsd4_check_resp_size() has bounded the cache size. */ static void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) { struct xdr_buf *buf = resp->xdr.buf; struct nfsd4_slot *slot = resp->cstate.slot; unsigned int base; dprintk("--> %s slot %p\n", __func__, slot); slot->sl_opcnt = resp->opcnt; slot->sl_status = resp->cstate.status; slot->sl_flags |= NFSD4_SLOT_INITIALIZED; if (nfsd4_not_cached(resp)) { slot->sl_datalen = 0; return; } base = resp->cstate.data_offset; slot->sl_datalen = buf->len - base; if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen)) WARN(1, "%s: sessions DRC could not cache compound\n", __func__); return; } /* * Encode the replay sequence operation from the slot values. * If cachethis is FALSE encode the uncached rep error on the next * operation which sets resp->p and increments resp->opcnt for * nfs4svc_encode_compoundres. * */ static __be32 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, struct nfsd4_compoundres *resp) { struct nfsd4_op *op; struct nfsd4_slot *slot = resp->cstate.slot; /* Encode the replayed sequence operation */ op = &args->ops[resp->opcnt - 1]; nfsd4_encode_operation(resp, op); /* Return nfserr_retry_uncached_rep in next operation. */ if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) { op = &args->ops[resp->opcnt++]; op->status = nfserr_retry_uncached_rep; nfsd4_encode_operation(resp, op); } return op->status; } /* * The sequence operation is not cached because we can use the slot and * session values. */ static __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, struct nfsd4_sequence *seq) { struct nfsd4_slot *slot = resp->cstate.slot; struct xdr_stream *xdr = &resp->xdr; __be32 *p; __be32 status; dprintk("--> %s slot %p\n", __func__, slot); status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); if (status) return status; p = xdr_reserve_space(xdr, slot->sl_datalen); if (!p) { WARN_ON_ONCE(1); return nfserr_serverfault; } xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen); xdr_commit_encode(xdr); resp->opcnt = slot->sl_opcnt; return slot->sl_status; } /* * Set the exchange_id flags returned by the server. */ static void nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid) { #ifdef CONFIG_NFSD_PNFS new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS; #else new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; #endif /* Referrals are supported, Migration is not. */ new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; /* set the wire flags to return to client. */ clid->flags = new->cl_exchange_flags; } static bool client_has_openowners(struct nfs4_client *clp) { struct nfs4_openowner *oo; list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) { if (!list_empty(&oo->oo_owner.so_stateids)) return true; } return false; } static bool client_has_state(struct nfs4_client *clp) { return client_has_openowners(clp) #ifdef CONFIG_NFSD_PNFS || !list_empty(&clp->cl_lo_states) #endif || !list_empty(&clp->cl_delegations) || !list_empty(&clp->cl_sessions); } __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_exchange_id *exid) { struct nfs4_client *conf, *new; struct nfs4_client *unconf = NULL; __be32 status; char addr_str[INET6_ADDRSTRLEN]; nfs4_verifier verf = exid->verifier; struct sockaddr *sa = svc_addr(rqstp); bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); rpc_ntop(sa, addr_str, sizeof(addr_str)); dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p " "ip_addr=%s flags %x, spa_how %d\n", __func__, rqstp, exid, exid->clname.len, exid->clname.data, addr_str, exid->flags, exid->spa_how); if (exid->flags & ~EXCHGID4_FLAG_MASK_A) return nfserr_inval; new = create_client(exid->clname, rqstp, &verf); if (new == NULL) return nfserr_jukebox; switch (exid->spa_how) { case SP4_MACH_CRED: exid->spo_must_enforce[0] = 0; exid->spo_must_enforce[1] = ( 1 << (OP_BIND_CONN_TO_SESSION - 32) | 1 << (OP_EXCHANGE_ID - 32) | 1 << (OP_CREATE_SESSION - 32) | 1 << (OP_DESTROY_SESSION - 32) | 1 << (OP_DESTROY_CLIENTID - 32)); exid->spo_must_allow[0] &= (1 << (OP_CLOSE) | 1 << (OP_OPEN_DOWNGRADE) | 1 << (OP_LOCKU) | 1 << (OP_DELEGRETURN)); exid->spo_must_allow[1] &= ( 1 << (OP_TEST_STATEID - 32) | 1 << (OP_FREE_STATEID - 32)); if (!svc_rqst_integrity_protected(rqstp)) { status = nfserr_inval; goto out_nolock; } /* * Sometimes userspace doesn't give us a principal. * Which is a bug, really. Anyway, we can't enforce * MACH_CRED in that case, better to give up now: */ if (!new->cl_cred.cr_principal && !new->cl_cred.cr_raw_principal) { status = nfserr_serverfault; goto out_nolock; } new->cl_mach_cred = true; case SP4_NONE: break; default: /* checked by xdr code */ WARN_ON_ONCE(1); case SP4_SSV: status = nfserr_encr_alg_unsupp; goto out_nolock; } /* Cases below refer to rfc 5661 section 18.35.4: */ spin_lock(&nn->client_lock); conf = find_confirmed_client_by_name(&exid->clname, nn); if (conf) { bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); bool verfs_match = same_verf(&verf, &conf->cl_verifier); if (update) { if (!clp_used_exchangeid(conf)) { /* buggy client */ status = nfserr_inval; goto out; } if (!nfsd4_mach_creds_match(conf, rqstp)) { status = nfserr_wrong_cred; goto out; } if (!creds_match) { /* case 9 */ status = nfserr_perm; goto out; } if (!verfs_match) { /* case 8 */ status = nfserr_not_same; goto out; } /* case 6 */ exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; goto out_copy; } if (!creds_match) { /* case 3 */ if (client_has_state(conf)) { status = nfserr_clid_inuse; goto out; } goto out_new; } if (verfs_match) { /* case 2 */ conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; goto out_copy; } /* case 5, client reboot */ conf = NULL; goto out_new; } if (update) { /* case 7 */ status = nfserr_noent; goto out; } unconf = find_unconfirmed_client_by_name(&exid->clname, nn); if (unconf) /* case 4, possible retry or client restart */ unhash_client_locked(unconf); /* case 1 (normal case) */ out_new: if (conf) { status = mark_client_expired_locked(conf); if (status) goto out; } new->cl_minorversion = cstate->minorversion; new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0]; new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1]; gen_clid(new, nn); add_to_unconfirmed(new); swap(new, conf); out_copy: exid->clientid.cl_boot = conf->cl_clientid.cl_boot; exid->clientid.cl_id = conf->cl_clientid.cl_id; exid->seqid = conf->cl_cs_slot.sl_seqid + 1; nfsd4_set_ex_flags(conf, exid); dprintk("nfsd4_exchange_id seqid %d flags %x\n", conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags); status = nfs_ok; out: spin_unlock(&nn->client_lock); out_nolock: if (new) expire_client(new); if (unconf) expire_client(unconf); return status; } static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse) { dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid, slot_seqid); /* The slot is in use, and no response has been sent. */ if (slot_inuse) { if (seqid == slot_seqid) return nfserr_jukebox; else return nfserr_seq_misordered; } /* Note unsigned 32-bit arithmetic handles wraparound: */ if (likely(seqid == slot_seqid + 1)) return nfs_ok; if (seqid == slot_seqid) return nfserr_replay_cache; return nfserr_seq_misordered; } /* * Cache the create session result into the create session single DRC * slot cache by saving the xdr structure. sl_seqid has been set. * Do this for solo or embedded create session operations. */ static void nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses, struct nfsd4_clid_slot *slot, __be32 nfserr) { slot->sl_status = nfserr; memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); } static __be32 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses, struct nfsd4_clid_slot *slot) { memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); return slot->sl_status; } #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\ 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \ 1 + /* MIN tag is length with zero, only length */ \ 3 + /* version, opcount, opcode */ \ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ /* seqid, slotID, slotID, cache */ \ 4 ) * sizeof(__be32)) #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\ 2 + /* verifier: AUTH_NULL, length 0 */\ 1 + /* status */ \ 1 + /* MIN tag is length with zero, only length */ \ 3 + /* opcount, opcode, opstatus*/ \ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ /* seqid, slotID, slotID, slotID, status */ \ 5 ) * sizeof(__be32)) static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) { u32 maxrpc = nn->nfsd_serv->sv_max_mesg; if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ) return nfserr_toosmall; if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ) return nfserr_toosmall; ca->headerpadsz = 0; ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc); ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND); ca->maxresp_cached = min_t(u32, ca->maxresp_cached, NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ); ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); /* * Note decreasing slot size below client's request may make it * difficult for client to function correctly, whereas * decreasing the number of slots will (just?) affect * performance. When short on memory we therefore prefer to * decrease number of slots instead of their size. Clients that * request larger slots than they need will get poor results: */ ca->maxreqs = nfsd4_get_drc_mem(ca); if (!ca->maxreqs) return nfserr_jukebox; return nfs_ok; } /* * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now. * These are based on similar macros in linux/sunrpc/msg_prot.h . */ #define RPC_MAX_HEADER_WITH_AUTH_SYS \ (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK)) #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \ (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK)) #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \ RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32)) #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \ RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \ sizeof(__be32)) static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca) { ca->headerpadsz = 0; if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ) return nfserr_toosmall; if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ) return nfserr_toosmall; ca->maxresp_cached = 0; if (ca->maxops < 2) return nfserr_toosmall; return nfs_ok; } static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs) { switch (cbs->flavor) { case RPC_AUTH_NULL: case RPC_AUTH_UNIX: return nfs_ok; default: /* * GSS case: the spec doesn't allow us to return this * error. But it also doesn't allow us not to support * GSS. * I'd rather this fail hard than return some error the * client might think it can already handle: */ return nfserr_encr_alg_unsupp; } } __be32 nfsd4_create_session(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_create_session *cr_ses) { struct sockaddr *sa = svc_addr(rqstp); struct nfs4_client *conf, *unconf; struct nfs4_client *old = NULL; struct nfsd4_session *new; struct nfsd4_conn *conn; struct nfsd4_clid_slot *cs_slot = NULL; __be32 status = 0; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) return nfserr_inval; status = nfsd4_check_cb_sec(&cr_ses->cb_sec); if (status) return status; status = check_forechannel_attrs(&cr_ses->fore_channel, nn); if (status) return status; status = check_backchannel_attrs(&cr_ses->back_channel); if (status) goto out_release_drc_mem; status = nfserr_jukebox; new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel); if (!new) goto out_release_drc_mem; conn = alloc_conn_from_crses(rqstp, cr_ses); if (!conn) goto out_free_session; spin_lock(&nn->client_lock); unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); conf = find_confirmed_client(&cr_ses->clientid, true, nn); WARN_ON_ONCE(conf && unconf); if (conf) { status = nfserr_wrong_cred; if (!nfsd4_mach_creds_match(conf, rqstp)) goto out_free_conn; cs_slot = &conf->cl_cs_slot; status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); if (status) { if (status == nfserr_replay_cache) status = nfsd4_replay_create_session(cr_ses, cs_slot); goto out_free_conn; } } else if (unconf) { if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { status = nfserr_clid_inuse; goto out_free_conn; } status = nfserr_wrong_cred; if (!nfsd4_mach_creds_match(unconf, rqstp)) goto out_free_conn; cs_slot = &unconf->cl_cs_slot; status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); if (status) { /* an unconfirmed replay returns misordered */ status = nfserr_seq_misordered; goto out_free_conn; } old = find_confirmed_client_by_name(&unconf->cl_name, nn); if (old) { status = mark_client_expired_locked(old); if (status) { old = NULL; goto out_free_conn; } } move_to_confirmed(unconf); conf = unconf; } else { status = nfserr_stale_clientid; goto out_free_conn; } status = nfs_ok; /* Persistent sessions are not supported */ cr_ses->flags &= ~SESSION4_PERSIST; /* Upshifting from TCP to RDMA is not supported */ cr_ses->flags &= ~SESSION4_RDMA; init_session(rqstp, new, conf, cr_ses); nfsd4_get_session_locked(new); memcpy(cr_ses->sessionid.data, new->se_sessionid.data, NFS4_MAX_SESSIONID_LEN); cs_slot->sl_seqid++; cr_ses->seqid = cs_slot->sl_seqid; /* cache solo and embedded create sessions under the client_lock */ nfsd4_cache_create_session(cr_ses, cs_slot, status); spin_unlock(&nn->client_lock); /* init connection and backchannel */ nfsd4_init_conn(rqstp, conn, new); nfsd4_put_session(new); if (old) expire_client(old); return status; out_free_conn: spin_unlock(&nn->client_lock); free_conn(conn); if (old) expire_client(old); out_free_session: __free_session(new); out_release_drc_mem: nfsd4_put_drc_mem(&cr_ses->fore_channel); return status; } static __be32 nfsd4_map_bcts_dir(u32 *dir) { switch (*dir) { case NFS4_CDFC4_FORE: case NFS4_CDFC4_BACK: return nfs_ok; case NFS4_CDFC4_FORE_OR_BOTH: case NFS4_CDFC4_BACK_OR_BOTH: *dir = NFS4_CDFC4_BOTH; return nfs_ok; }; return nfserr_inval; } __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc) { struct nfsd4_session *session = cstate->session; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); __be32 status; status = nfsd4_check_cb_sec(&bc->bc_cb_sec); if (status) return status; spin_lock(&nn->client_lock); session->se_cb_prog = bc->bc_cb_program; session->se_cb_sec = bc->bc_cb_sec; spin_unlock(&nn->client_lock); nfsd4_probe_callback(session->se_client); return nfs_ok; } __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_bind_conn_to_session *bcts) { __be32 status; struct nfsd4_conn *conn; struct nfsd4_session *session; struct net *net = SVC_NET(rqstp); struct nfsd_net *nn = net_generic(net, nfsd_net_id); if (!nfsd4_last_compound_op(rqstp)) return nfserr_not_only_op; spin_lock(&nn->client_lock); session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status); spin_unlock(&nn->client_lock); if (!session) goto out_no_session; status = nfserr_wrong_cred; if (!nfsd4_mach_creds_match(session->se_client, rqstp)) goto out; status = nfsd4_map_bcts_dir(&bcts->dir); if (status) goto out; conn = alloc_conn(rqstp, bcts->dir); status = nfserr_jukebox; if (!conn) goto out; nfsd4_init_conn(rqstp, conn, session); status = nfs_ok; out: nfsd4_put_session(session); out_no_session: return status; } static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid) { if (!session) return 0; return !memcmp(sid, &session->se_sessionid, sizeof(*sid)); } __be32 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_session *sessionid) { struct nfsd4_session *ses; __be32 status; int ref_held_by_me = 0; struct net *net = SVC_NET(r); struct nfsd_net *nn = net_generic(net, nfsd_net_id); status = nfserr_not_only_op; if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) { if (!nfsd4_last_compound_op(r)) goto out; ref_held_by_me++; } dump_sessionid(__func__, &sessionid->sessionid); spin_lock(&nn->client_lock); ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status); if (!ses) goto out_client_lock; status = nfserr_wrong_cred; if (!nfsd4_mach_creds_match(ses->se_client, r)) goto out_put_session; status = mark_session_dead_locked(ses, 1 + ref_held_by_me); if (status) goto out_put_session; unhash_session(ses); spin_unlock(&nn->client_lock); nfsd4_probe_callback_sync(ses->se_client); spin_lock(&nn->client_lock); status = nfs_ok; out_put_session: nfsd4_put_session_locked(ses); out_client_lock: spin_unlock(&nn->client_lock); out: return status; } static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) { struct nfsd4_conn *c; list_for_each_entry(c, &s->se_conns, cn_persession) { if (c->cn_xprt == xpt) { return c; } } return NULL; } static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) { struct nfs4_client *clp = ses->se_client; struct nfsd4_conn *c; __be32 status = nfs_ok; int ret; spin_lock(&clp->cl_lock); c = __nfsd4_find_conn(new->cn_xprt, ses); if (c) goto out_free; status = nfserr_conn_not_bound_to_session; if (clp->cl_mach_cred) goto out_free; __nfsd4_hash_conn(new, ses); spin_unlock(&clp->cl_lock); ret = nfsd4_register_conn(new); if (ret) /* oops; xprt is already down: */ nfsd4_conn_lost(&new->cn_xpt_user); return nfs_ok; out_free: spin_unlock(&clp->cl_lock); free_conn(new); return status; } static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session) { struct nfsd4_compoundargs *args = rqstp->rq_argp; return args->opcnt > session->se_fchannel.maxops; } static bool nfsd4_request_too_big(struct svc_rqst *rqstp, struct nfsd4_session *session) { struct xdr_buf *xb = &rqstp->rq_arg; return xb->len > session->se_fchannel.maxreq_sz; } __be32 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_sequence *seq) { struct nfsd4_compoundres *resp = rqstp->rq_resp; struct xdr_stream *xdr = &resp->xdr; struct nfsd4_session *session; struct nfs4_client *clp; struct nfsd4_slot *slot; struct nfsd4_conn *conn; __be32 status; int buflen; struct net *net = SVC_NET(rqstp); struct nfsd_net *nn = net_generic(net, nfsd_net_id); if (resp->opcnt != 1) return nfserr_sequence_pos; /* * Will be either used or freed by nfsd4_sequence_check_conn * below. */ conn = alloc_conn(rqstp, NFS4_CDFC4_FORE); if (!conn) return nfserr_jukebox; spin_lock(&nn->client_lock); session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status); if (!session) goto out_no_session; clp = session->se_client; status = nfserr_too_many_ops; if (nfsd4_session_too_many_ops(rqstp, session)) goto out_put_session; status = nfserr_req_too_big; if (nfsd4_request_too_big(rqstp, session)) goto out_put_session; status = nfserr_badslot; if (seq->slotid >= session->se_fchannel.maxreqs) goto out_put_session; slot = session->se_slots[seq->slotid]; dprintk("%s: slotid %d\n", __func__, seq->slotid); /* We do not negotiate the number of slots yet, so set the * maxslots to the session maxreqs which is used to encode * sr_highest_slotid and the sr_target_slot id to maxslots */ seq->maxslots = session->se_fchannel.maxreqs; status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_flags & NFSD4_SLOT_INUSE); if (status == nfserr_replay_cache) { status = nfserr_seq_misordered; if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) goto out_put_session; cstate->slot = slot; cstate->session = session; cstate->clp = clp; /* Return the cached reply status and set cstate->status * for nfsd4_proc_compound processing */ status = nfsd4_replay_cache_entry(resp, seq); cstate->status = nfserr_replay_cache; goto out; } if (status) goto out_put_session; status = nfsd4_sequence_check_conn(conn, session); conn = NULL; if (status) goto out_put_session; buflen = (seq->cachethis) ? session->se_fchannel.maxresp_cached : session->se_fchannel.maxresp_sz; status = (seq->cachethis) ? nfserr_rep_too_big_to_cache : nfserr_rep_too_big; if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack)) goto out_put_session; svc_reserve(rqstp, buflen); status = nfs_ok; /* Success! bump slot seqid */ slot->sl_seqid = seq->seqid; slot->sl_flags |= NFSD4_SLOT_INUSE; if (seq->cachethis) slot->sl_flags |= NFSD4_SLOT_CACHETHIS; else slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS; cstate->slot = slot; cstate->session = session; cstate->clp = clp; out: switch (clp->cl_cb_state) { case NFSD4_CB_DOWN: seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; break; case NFSD4_CB_FAULT: seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; break; default: seq->status_flags = 0; } if (!list_empty(&clp->cl_revoked)) seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED; out_no_session: if (conn) free_conn(conn); spin_unlock(&nn->client_lock); return status; out_put_session: nfsd4_put_session_locked(session); goto out_no_session; } void nfsd4_sequence_done(struct nfsd4_compoundres *resp) { struct nfsd4_compound_state *cs = &resp->cstate; if (nfsd4_has_session(cs)) { if (cs->status != nfserr_replay_cache) { nfsd4_store_cache_entry(resp); cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; } /* Drop session reference that was taken in nfsd4_sequence() */ nfsd4_put_session(cs->session); } else if (cs->clp) put_client_renew(cs->clp); } __be32 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc) { struct nfs4_client *conf, *unconf; struct nfs4_client *clp = NULL; __be32 status = 0; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); spin_lock(&nn->client_lock); unconf = find_unconfirmed_client(&dc->clientid, true, nn); conf = find_confirmed_client(&dc->clientid, true, nn); WARN_ON_ONCE(conf && unconf); if (conf) { if (client_has_state(conf)) { status = nfserr_clientid_busy; goto out; } status = mark_client_expired_locked(conf); if (status) goto out; clp = conf; } else if (unconf) clp = unconf; else { status = nfserr_stale_clientid; goto out; } if (!nfsd4_mach_creds_match(clp, rqstp)) { clp = NULL; status = nfserr_wrong_cred; goto out; } unhash_client_locked(clp); out: spin_unlock(&nn->client_lock); if (clp) expire_client(clp); return status; } __be32 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc) { __be32 status = 0; if (rc->rca_one_fs) { if (!cstate->current_fh.fh_dentry) return nfserr_nofilehandle; /* * We don't take advantage of the rca_one_fs case. * That's OK, it's optional, we can safely ignore it. */ return nfs_ok; } status = nfserr_complete_already; if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->session->se_client->cl_flags)) goto out; status = nfserr_stale_clientid; if (is_client_expired(cstate->session->se_client)) /* * The following error isn't really legal. * But we only get here if the client just explicitly * destroyed the client. Surely it no longer cares what * error it gets back on an operation for the dead * client. */ goto out; status = nfs_ok; nfsd4_client_record_create(cstate->session->se_client); out: return status; } __be32 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_setclientid *setclid) { struct xdr_netobj clname = setclid->se_name; nfs4_verifier clverifier = setclid->se_verf; struct nfs4_client *conf, *new; struct nfs4_client *unconf = NULL; __be32 status; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); new = create_client(clname, rqstp, &clverifier); if (new == NULL) return nfserr_jukebox; /* Cases below refer to rfc 3530 section 14.2.33: */ spin_lock(&nn->client_lock); conf = find_confirmed_client_by_name(&clname, nn); if (conf && client_has_state(conf)) { /* case 0: */ status = nfserr_clid_inuse; if (clp_used_exchangeid(conf)) goto out; if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { char addr_str[INET6_ADDRSTRLEN]; rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str, sizeof(addr_str)); dprintk("NFSD: setclientid: string in use by client " "at %s\n", addr_str); goto out; } } unconf = find_unconfirmed_client_by_name(&clname, nn); if (unconf) unhash_client_locked(unconf); if (conf && same_verf(&conf->cl_verifier, &clverifier)) { /* case 1: probable callback update */ copy_clid(new, conf); gen_confirm(new, nn); } else /* case 4 (new client) or cases 2, 3 (client reboot): */ gen_clid(new, nn); new->cl_minorversion = 0; gen_callback(new, setclid, rqstp); add_to_unconfirmed(new); setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; setclid->se_clientid.cl_id = new->cl_clientid.cl_id; memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); new = NULL; status = nfs_ok; out: spin_unlock(&nn->client_lock); if (new) free_client(new); if (unconf) expire_client(unconf); return status; } __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_setclientid_confirm *setclientid_confirm) { struct nfs4_client *conf, *unconf; struct nfs4_client *old = NULL; nfs4_verifier confirm = setclientid_confirm->sc_confirm; clientid_t * clid = &setclientid_confirm->sc_clientid; __be32 status; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); if (STALE_CLIENTID(clid, nn)) return nfserr_stale_clientid; spin_lock(&nn->client_lock); conf = find_confirmed_client(clid, false, nn); unconf = find_unconfirmed_client(clid, false, nn); /* * We try hard to give out unique clientid's, so if we get an * attempt to confirm the same clientid with a different cred, * the client may be buggy; this should never happen. * * Nevertheless, RFC 7530 recommends INUSE for this case: */ status = nfserr_clid_inuse; if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) goto out; if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) goto out; /* cases below refer to rfc 3530 section 14.2.34: */ if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) { if (conf && same_verf(&confirm, &conf->cl_confirm)) { /* case 2: probable retransmit */ status = nfs_ok; } else /* case 4: client hasn't noticed we rebooted yet? */ status = nfserr_stale_clientid; goto out; } status = nfs_ok; if (conf) { /* case 1: callback update */ old = unconf; unhash_client_locked(old); nfsd4_change_callback(conf, &unconf->cl_cb_conn); } else { /* case 3: normal case; new or rebooted client */ old = find_confirmed_client_by_name(&unconf->cl_name, nn); if (old) { status = nfserr_clid_inuse; if (client_has_state(old) && !same_creds(&unconf->cl_cred, &old->cl_cred)) goto out; status = mark_client_expired_locked(old); if (status) { old = NULL; goto out; } } move_to_confirmed(unconf); conf = unconf; } get_client_locked(conf); spin_unlock(&nn->client_lock); nfsd4_probe_callback(conf); spin_lock(&nn->client_lock); put_client_renew_locked(conf); out: spin_unlock(&nn->client_lock); if (old) expire_client(old); return status; } static struct nfs4_file *nfsd4_alloc_file(void) { return kmem_cache_alloc(file_slab, GFP_KERNEL); } /* OPEN Share state helper functions */ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval, struct nfs4_file *fp) { lockdep_assert_held(&state_lock); atomic_set(&fp->fi_ref, 1); spin_lock_init(&fp->fi_lock); INIT_LIST_HEAD(&fp->fi_stateids); INIT_LIST_HEAD(&fp->fi_delegations); INIT_LIST_HEAD(&fp->fi_clnt_odstate); fh_copy_shallow(&fp->fi_fhandle, fh); fp->fi_deleg_file = NULL; fp->fi_had_conflict = false; fp->fi_share_deny = 0; memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); memset(fp->fi_access, 0, sizeof(fp->fi_access)); #ifdef CONFIG_NFSD_PNFS INIT_LIST_HEAD(&fp->fi_lo_states); atomic_set(&fp->fi_lo_recalls, 0); #endif hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]); } void nfsd4_free_slabs(void) { kmem_cache_destroy(odstate_slab); kmem_cache_destroy(openowner_slab); kmem_cache_destroy(lockowner_slab); kmem_cache_destroy(file_slab); kmem_cache_destroy(stateid_slab); kmem_cache_destroy(deleg_slab); } int nfsd4_init_slabs(void) { openowner_slab = kmem_cache_create("nfsd4_openowners", sizeof(struct nfs4_openowner), 0, 0, NULL); if (openowner_slab == NULL) goto out; lockowner_slab = kmem_cache_create("nfsd4_lockowners", sizeof(struct nfs4_lockowner), 0, 0, NULL); if (lockowner_slab == NULL) goto out_free_openowner_slab; file_slab = kmem_cache_create("nfsd4_files", sizeof(struct nfs4_file), 0, 0, NULL); if (file_slab == NULL) goto out_free_lockowner_slab; stateid_slab = kmem_cache_create("nfsd4_stateids", sizeof(struct nfs4_ol_stateid), 0, 0, NULL); if (stateid_slab == NULL) goto out_free_file_slab; deleg_slab = kmem_cache_create("nfsd4_delegations", sizeof(struct nfs4_delegation), 0, 0, NULL); if (deleg_slab == NULL) goto out_free_stateid_slab; odstate_slab = kmem_cache_create("nfsd4_odstate", sizeof(struct nfs4_clnt_odstate), 0, 0, NULL); if (odstate_slab == NULL) goto out_free_deleg_slab; return 0; out_free_deleg_slab: kmem_cache_destroy(deleg_slab); out_free_stateid_slab: kmem_cache_destroy(stateid_slab); out_free_file_slab: kmem_cache_destroy(file_slab); out_free_lockowner_slab: kmem_cache_destroy(lockowner_slab); out_free_openowner_slab: kmem_cache_destroy(openowner_slab); out: dprintk("nfsd4: out of memory while initializing nfsv4\n"); return -ENOMEM; } static void init_nfs4_replay(struct nfs4_replay *rp) { rp->rp_status = nfserr_serverfault; rp->rp_buflen = 0; rp->rp_buf = rp->rp_ibuf; mutex_init(&rp->rp_mutex); } static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so) { if (!nfsd4_has_session(cstate)) { mutex_lock(&so->so_replay.rp_mutex); cstate->replay_owner = nfs4_get_stateowner(so); } } void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate) { struct nfs4_stateowner *so = cstate->replay_owner; if (so != NULL) { cstate->replay_owner = NULL; mutex_unlock(&so->so_replay.rp_mutex); nfs4_put_stateowner(so); } } static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp) { struct nfs4_stateowner *sop; sop = kmem_cache_alloc(slab, GFP_KERNEL); if (!sop) return NULL; sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL); if (!sop->so_owner.data) { kmem_cache_free(slab, sop); return NULL; } sop->so_owner.len = owner->len; INIT_LIST_HEAD(&sop->so_stateids); sop->so_client = clp; init_nfs4_replay(&sop->so_replay); atomic_set(&sop->so_count, 1); return sop; } static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval) { lockdep_assert_held(&clp->cl_lock); list_add(&oo->oo_owner.so_strhash, &clp->cl_ownerstr_hashtbl[strhashval]); list_add(&oo->oo_perclient, &clp->cl_openowners); } static void nfs4_unhash_openowner(struct nfs4_stateowner *so) { unhash_openowner_locked(openowner(so)); } static void nfs4_free_openowner(struct nfs4_stateowner *so) { struct nfs4_openowner *oo = openowner(so); kmem_cache_free(openowner_slab, oo); } static const struct nfs4_stateowner_operations openowner_ops = { .so_unhash = nfs4_unhash_openowner, .so_free = nfs4_free_openowner, }; static struct nfs4_ol_stateid * nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) { struct nfs4_ol_stateid *local, *ret = NULL; struct nfs4_openowner *oo = open->op_openowner; lockdep_assert_held(&fp->fi_lock); list_for_each_entry(local, &fp->fi_stateids, st_perfile) { /* ignore lock owners */ if (local->st_stateowner->so_is_open_owner == 0) continue; if (local->st_stateowner == &oo->oo_owner) { ret = local; atomic_inc(&ret->st_stid.sc_count); break; } } return ret; } static struct nfs4_openowner * alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, struct nfsd4_compound_state *cstate) { struct nfs4_client *clp = cstate->clp; struct nfs4_openowner *oo, *ret; oo = alloc_stateowner(openowner_slab, &open->op_owner, clp); if (!oo) return NULL; oo->oo_owner.so_ops = &openowner_ops; oo->oo_owner.so_is_open_owner = 1; oo->oo_owner.so_seqid = open->op_seqid; oo->oo_flags = 0; if (nfsd4_has_session(cstate)) oo->oo_flags |= NFS4_OO_CONFIRMED; oo->oo_time = 0; oo->oo_last_closed_stid = NULL; INIT_LIST_HEAD(&oo->oo_close_lru); spin_lock(&clp->cl_lock); ret = find_openstateowner_str_locked(strhashval, open, clp); if (ret == NULL) { hash_openowner(oo, clp, strhashval); ret = oo; } else nfs4_free_stateowner(&oo->oo_owner); spin_unlock(&clp->cl_lock); return ret; } static struct nfs4_ol_stateid * init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open) { struct nfs4_openowner *oo = open->op_openowner; struct nfs4_ol_stateid *retstp = NULL; struct nfs4_ol_stateid *stp; stp = open->op_stp; /* We are moving these outside of the spinlocks to avoid the warnings */ mutex_init(&stp->st_mutex); mutex_lock(&stp->st_mutex); spin_lock(&oo->oo_owner.so_client->cl_lock); spin_lock(&fp->fi_lock); retstp = nfsd4_find_existing_open(fp, open); if (retstp) goto out_unlock; open->op_stp = NULL; atomic_inc(&stp->st_stid.sc_count); stp->st_stid.sc_type = NFS4_OPEN_STID; INIT_LIST_HEAD(&stp->st_locks); stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner); get_nfs4_file(fp); stp->st_stid.sc_file = fp; stp->st_access_bmap = 0; stp->st_deny_bmap = 0; stp->st_openstp = NULL; list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); list_add(&stp->st_perfile, &fp->fi_stateids); out_unlock: spin_unlock(&fp->fi_lock); spin_unlock(&oo->oo_owner.so_client->cl_lock); if (retstp) { mutex_lock(&retstp->st_mutex); /* To keep mutex tracking happy */ mutex_unlock(&stp->st_mutex); stp = retstp; } return stp; } /* * In the 4.0 case we need to keep the owners around a little while to handle * CLOSE replay. We still do need to release any file access that is held by * them before returning however. */ static void move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net) { struct nfs4_ol_stateid *last; struct nfs4_openowner *oo = openowner(s->st_stateowner); struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net, nfsd_net_id); dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo); /* * We know that we hold one reference via nfsd4_close, and another * "persistent" reference for the client. If the refcount is higher * than 2, then there are still calls in progress that are using this * stateid. We can't put the sc_file reference until they are finished. * Wait for the refcount to drop to 2. Since it has been unhashed, * there should be no danger of the refcount going back up again at * this point. */ wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2); release_all_access(s); if (s->st_stid.sc_file) { put_nfs4_file(s->st_stid.sc_file); s->st_stid.sc_file = NULL; } spin_lock(&nn->client_lock); last = oo->oo_last_closed_stid; oo->oo_last_closed_stid = s; list_move_tail(&oo->oo_close_lru, &nn->close_lru); oo->oo_time = get_seconds(); spin_unlock(&nn->client_lock); if (last) nfs4_put_stid(&last->st_stid); } /* search file_hashtbl[] for file */ static struct nfs4_file * find_file_locked(struct knfsd_fh *fh, unsigned int hashval) { struct nfs4_file *fp; hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) { if (fh_match(&fp->fi_fhandle, fh)) { if (atomic_inc_not_zero(&fp->fi_ref)) return fp; } } return NULL; } struct nfs4_file * find_file(struct knfsd_fh *fh) { struct nfs4_file *fp; unsigned int hashval = file_hashval(fh); rcu_read_lock(); fp = find_file_locked(fh, hashval); rcu_read_unlock(); return fp; } static struct nfs4_file * find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh) { struct nfs4_file *fp; unsigned int hashval = file_hashval(fh); rcu_read_lock(); fp = find_file_locked(fh, hashval); rcu_read_unlock(); if (fp) return fp; spin_lock(&state_lock); fp = find_file_locked(fh, hashval); if (likely(fp == NULL)) { nfsd4_init_file(fh, hashval, new); fp = new; } spin_unlock(&state_lock); return fp; } /* * Called to check deny when READ with all zero stateid or * WRITE with all zero or all one stateid */ static __be32 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) { struct nfs4_file *fp; __be32 ret = nfs_ok; fp = find_file(&current_fh->fh_handle); if (!fp) return ret; /* Check for conflicting share reservations */ spin_lock(&fp->fi_lock); if (fp->fi_share_deny & deny_type) ret = nfserr_locked; spin_unlock(&fp->fi_lock); put_nfs4_file(fp); return ret; } static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb) { struct nfs4_delegation *dp = cb_to_delegation(cb); struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net, nfsd_net_id); block_delegations(&dp->dl_stid.sc_file->fi_fhandle); /* * We can't do this in nfsd_break_deleg_cb because it is * already holding inode->i_lock. * * If the dl_time != 0, then we know that it has already been * queued for a lease break. Don't queue it again. */ spin_lock(&state_lock); if (dp->dl_time == 0) { dp->dl_time = get_seconds(); list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); } spin_unlock(&state_lock); } static int nfsd4_cb_recall_done(struct nfsd4_callback *cb, struct rpc_task *task) { struct nfs4_delegation *dp = cb_to_delegation(cb); if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID) return 1; switch (task->tk_status) { case 0: return 1; case -EBADHANDLE: case -NFS4ERR_BAD_STATEID: /* * Race: client probably got cb_recall before open reply * granting delegation. */ if (dp->dl_retries--) { rpc_delay(task, 2 * HZ); return 0; } /*FALLTHRU*/ default: return -1; } } static void nfsd4_cb_recall_release(struct nfsd4_callback *cb) { struct nfs4_delegation *dp = cb_to_delegation(cb); nfs4_put_stid(&dp->dl_stid); } static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = { .prepare = nfsd4_cb_recall_prepare, .done = nfsd4_cb_recall_done, .release = nfsd4_cb_recall_release, }; static void nfsd_break_one_deleg(struct nfs4_delegation *dp) { /* * We're assuming the state code never drops its reference * without first removing the lease. Since we're in this lease * callback (and since the lease code is serialized by the kernel * lock) we know the server hasn't removed the lease yet, we know * it's safe to take a reference. */ atomic_inc(&dp->dl_stid.sc_count); nfsd4_run_cb(&dp->dl_recall); } /* Called from break_lease() with i_lock held. */ static bool nfsd_break_deleg_cb(struct file_lock *fl) { bool ret = false; struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner; struct nfs4_delegation *dp; if (!fp) { WARN(1, "(%p)->fl_owner NULL\n", fl); return ret; } if (fp->fi_had_conflict) { WARN(1, "duplicate break on %p\n", fp); return ret; } /* * We don't want the locks code to timeout the lease for us; * we'll remove it ourself if a delegation isn't returned * in time: */ fl->fl_break_time = 0; spin_lock(&fp->fi_lock); fp->fi_had_conflict = true; /* * If there are no delegations on the list, then return true * so that the lease code will go ahead and delete it. */ if (list_empty(&fp->fi_delegations)) ret = true; else list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) nfsd_break_one_deleg(dp); spin_unlock(&fp->fi_lock); return ret; } static int nfsd_change_deleg_cb(struct file_lock *onlist, int arg, struct list_head *dispose) { if (arg & F_UNLCK) return lease_modify(onlist, arg, dispose); else return -EAGAIN; } static const struct lock_manager_operations nfsd_lease_mng_ops = { .lm_break = nfsd_break_deleg_cb, .lm_change = nfsd_change_deleg_cb, }; static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid) { if (nfsd4_has_session(cstate)) return nfs_ok; if (seqid == so->so_seqid - 1) return nfserr_replay_me; if (seqid == so->so_seqid) return nfs_ok; return nfserr_bad_seqid; } static __be32 lookup_clientid(clientid_t *clid, struct nfsd4_compound_state *cstate, struct nfsd_net *nn) { struct nfs4_client *found; if (cstate->clp) { found = cstate->clp; if (!same_clid(&found->cl_clientid, clid)) return nfserr_stale_clientid; return nfs_ok; } if (STALE_CLIENTID(clid, nn)) return nfserr_stale_clientid; /* * For v4.1+ we get the client in the SEQUENCE op. If we don't have one * cached already then we know this is for is for v4.0 and "sessions" * will be false. */ WARN_ON_ONCE(cstate->session); spin_lock(&nn->client_lock); found = find_confirmed_client(clid, false, nn); if (!found) { spin_unlock(&nn->client_lock); return nfserr_expired; } atomic_inc(&found->cl_refcount); spin_unlock(&nn->client_lock); /* Cache the nfs4_client in cstate! */ cstate->clp = found; return nfs_ok; } __be32 nfsd4_process_open1(struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct nfsd_net *nn) { clientid_t *clientid = &open->op_clientid; struct nfs4_client *clp = NULL; unsigned int strhashval; struct nfs4_openowner *oo = NULL; __be32 status; if (STALE_CLIENTID(&open->op_clientid, nn)) return nfserr_stale_clientid; /* * In case we need it later, after we've already created the * file and don't want to risk a further failure: */ open->op_file = nfsd4_alloc_file(); if (open->op_file == NULL) return nfserr_jukebox; status = lookup_clientid(clientid, cstate, nn); if (status) return status; clp = cstate->clp; strhashval = ownerstr_hashval(&open->op_owner); oo = find_openstateowner_str(strhashval, open, clp); open->op_openowner = oo; if (!oo) { goto new_owner; } if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { /* Replace unconfirmed owners without checking for replay. */ release_openowner(oo); open->op_openowner = NULL; goto new_owner; } status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); if (status) return status; goto alloc_stateid; new_owner: oo = alloc_init_open_stateowner(strhashval, open, cstate); if (oo == NULL) return nfserr_jukebox; open->op_openowner = oo; alloc_stateid: open->op_stp = nfs4_alloc_open_stateid(clp); if (!open->op_stp) return nfserr_jukebox; if (nfsd4_has_session(cstate) && (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) { open->op_odstate = alloc_clnt_odstate(clp); if (!open->op_odstate) return nfserr_jukebox; } return nfs_ok; } static inline __be32 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) { if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ)) return nfserr_openmode; else return nfs_ok; } static int share_access_to_flags(u32 share_access) { return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE; } static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s) { struct nfs4_stid *ret; ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID); if (!ret) return NULL; return delegstateid(ret); } static bool nfsd4_is_deleg_cur(struct nfsd4_open *open) { return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR || open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH; } static __be32 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open, struct nfs4_delegation **dp) { int flags; __be32 status = nfserr_bad_stateid; struct nfs4_delegation *deleg; deleg = find_deleg_stateid(cl, &open->op_delegate_stateid); if (deleg == NULL) goto out; flags = share_access_to_flags(open->op_share_access); status = nfs4_check_delegmode(deleg, flags); if (status) { nfs4_put_stid(&deleg->dl_stid); goto out; } *dp = deleg; out: if (!nfsd4_is_deleg_cur(open)) return nfs_ok; if (status) return status; open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; return nfs_ok; } static inline int nfs4_access_to_access(u32 nfs4_access) { int flags = 0; if (nfs4_access & NFS4_SHARE_ACCESS_READ) flags |= NFSD_MAY_READ; if (nfs4_access & NFS4_SHARE_ACCESS_WRITE) flags |= NFSD_MAY_WRITE; return flags; } static inline __be32 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, struct nfsd4_open *open) { struct iattr iattr = { .ia_valid = ATTR_SIZE, .ia_size = 0, }; if (!open->op_truncate) return 0; if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) return nfserr_inval; return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0); } static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open) { struct file *filp = NULL; __be32 status; int oflag = nfs4_access_to_omode(open->op_share_access); int access = nfs4_access_to_access(open->op_share_access); unsigned char old_access_bmap, old_deny_bmap; spin_lock(&fp->fi_lock); /* * Are we trying to set a deny mode that would conflict with * current access? */ status = nfs4_file_check_deny(fp, open->op_share_deny); if (status != nfs_ok) { spin_unlock(&fp->fi_lock); goto out; } /* set access to the file */ status = nfs4_file_get_access(fp, open->op_share_access); if (status != nfs_ok) { spin_unlock(&fp->fi_lock); goto out; } /* Set access bits in stateid */ old_access_bmap = stp->st_access_bmap; set_access(open->op_share_access, stp); /* Set new deny mask */ old_deny_bmap = stp->st_deny_bmap; set_deny(open->op_share_deny, stp); fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH); if (!fp->fi_fds[oflag]) { spin_unlock(&fp->fi_lock); status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp); if (status) goto out_put_access; spin_lock(&fp->fi_lock); if (!fp->fi_fds[oflag]) { fp->fi_fds[oflag] = filp; filp = NULL; } } spin_unlock(&fp->fi_lock); if (filp) fput(filp); status = nfsd4_truncate(rqstp, cur_fh, open); if (status) goto out_put_access; out: return status; out_put_access: stp->st_access_bmap = old_access_bmap; nfs4_file_put_access(fp, open->op_share_access); reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp); goto out; } static __be32 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open) { __be32 status; unsigned char old_deny_bmap = stp->st_deny_bmap; if (!test_access(open->op_share_access, stp)) return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open); /* test and set deny mode */ spin_lock(&fp->fi_lock); status = nfs4_file_check_deny(fp, open->op_share_deny); if (status == nfs_ok) { set_deny(open->op_share_deny, stp); fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH); } spin_unlock(&fp->fi_lock); if (status != nfs_ok) return status; status = nfsd4_truncate(rqstp, cur_fh, open); if (status != nfs_ok) reset_union_bmap_deny(old_deny_bmap, stp); return status; } /* Should we give out recallable state?: */ static bool nfsd4_cb_channel_good(struct nfs4_client *clp) { if (clp->cl_cb_state == NFSD4_CB_UP) return true; /* * In the sessions case, since we don't have to establish a * separate connection for callbacks, we assume it's OK * until we hear otherwise: */ return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; } static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag) { struct file_lock *fl; fl = locks_alloc_lock(); if (!fl) return NULL; fl->fl_lmops = &nfsd_lease_mng_ops; fl->fl_flags = FL_DELEG; fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; fl->fl_end = OFFSET_MAX; fl->fl_owner = (fl_owner_t)fp; fl->fl_pid = current->tgid; return fl; } /** * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer * @dp: a pointer to the nfs4_delegation we're adding. * * Return: * On success: Return code will be 0 on success. * * On error: -EAGAIN if there was an existing delegation. * nonzero if there is an error in other cases. * */ static int nfs4_setlease(struct nfs4_delegation *dp) { struct nfs4_file *fp = dp->dl_stid.sc_file; struct file_lock *fl; struct file *filp; int status = 0; fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ); if (!fl) return -ENOMEM; filp = find_readable_file(fp); if (!filp) { /* We should always have a readable file here */ WARN_ON_ONCE(1); locks_free_lock(fl); return -EBADF; } fl->fl_file = filp; status = vfs_setlease(filp, fl->fl_type, &fl, NULL); if (fl) locks_free_lock(fl); if (status) goto out_fput; spin_lock(&state_lock); spin_lock(&fp->fi_lock); /* Did the lease get broken before we took the lock? */ status = -EAGAIN; if (fp->fi_had_conflict) goto out_unlock; /* Race breaker */ if (fp->fi_deleg_file) { status = hash_delegation_locked(dp, fp); goto out_unlock; } fp->fi_deleg_file = filp; fp->fi_delegees = 0; status = hash_delegation_locked(dp, fp); spin_unlock(&fp->fi_lock); spin_unlock(&state_lock); if (status) { /* Should never happen, this is a new fi_deleg_file */ WARN_ON_ONCE(1); goto out_fput; } return 0; out_unlock: spin_unlock(&fp->fi_lock); spin_unlock(&state_lock); out_fput: fput(filp); return status; } static struct nfs4_delegation * nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate) { int status; struct nfs4_delegation *dp; if (fp->fi_had_conflict) return ERR_PTR(-EAGAIN); spin_lock(&state_lock); spin_lock(&fp->fi_lock); status = nfs4_get_existing_delegation(clp, fp); spin_unlock(&fp->fi_lock); spin_unlock(&state_lock); if (status) return ERR_PTR(status); dp = alloc_init_deleg(clp, fh, odstate); if (!dp) return ERR_PTR(-ENOMEM); get_nfs4_file(fp); spin_lock(&state_lock); spin_lock(&fp->fi_lock); dp->dl_stid.sc_file = fp; if (!fp->fi_deleg_file) { spin_unlock(&fp->fi_lock); spin_unlock(&state_lock); status = nfs4_setlease(dp); goto out; } if (fp->fi_had_conflict) { status = -EAGAIN; goto out_unlock; } status = hash_delegation_locked(dp, fp); out_unlock: spin_unlock(&fp->fi_lock); spin_unlock(&state_lock); out: if (status) { put_clnt_odstate(dp->dl_clnt_odstate); nfs4_put_stid(&dp->dl_stid); return ERR_PTR(status); } return dp; } static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) { open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; if (status == -EAGAIN) open->op_why_no_deleg = WND4_CONTENTION; else { open->op_why_no_deleg = WND4_RESOURCE; switch (open->op_deleg_want) { case NFS4_SHARE_WANT_READ_DELEG: case NFS4_SHARE_WANT_WRITE_DELEG: case NFS4_SHARE_WANT_ANY_DELEG: break; case NFS4_SHARE_WANT_CANCEL: open->op_why_no_deleg = WND4_CANCELLED; break; case NFS4_SHARE_WANT_NO_DELEG: WARN_ON_ONCE(1); } } } /* * Attempt to hand out a delegation. * * Note we don't support write delegations, and won't until the vfs has * proper support for them. */ static void nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_ol_stateid *stp) { struct nfs4_delegation *dp; struct nfs4_openowner *oo = openowner(stp->st_stateowner); struct nfs4_client *clp = stp->st_stid.sc_client; int cb_up; int status = 0; cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); open->op_recall = 0; switch (open->op_claim_type) { case NFS4_OPEN_CLAIM_PREVIOUS: if (!cb_up) open->op_recall = 1; if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ) goto out_no_deleg; break; case NFS4_OPEN_CLAIM_NULL: case NFS4_OPEN_CLAIM_FH: /* * Let's not give out any delegations till everyone's * had the chance to reclaim theirs, *and* until * NLM locks have all been reclaimed: */ if (locks_in_grace(clp->net)) goto out_no_deleg; if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) goto out_no_deleg; /* * Also, if the file was opened for write or * create, there's a good chance the client's * about to write to it, resulting in an * immediate recall (since we don't support * write delegations): */ if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) goto out_no_deleg; if (open->op_create == NFS4_OPEN_CREATE) goto out_no_deleg; break; default: goto out_no_deleg; } dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate); if (IS_ERR(dp)) goto out_no_deleg; memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); dprintk("NFSD: delegation stateid=" STATEID_FMT "\n", STATEID_VAL(&dp->dl_stid.sc_stateid)); open->op_delegate_type = NFS4_OPEN_DELEGATE_READ; nfs4_put_stid(&dp->dl_stid); return; out_no_deleg: open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE; if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) { dprintk("NFSD: WARNING: refusing delegation reclaim\n"); open->op_recall = 1; } /* 4.1 client asking for a delegation? */ if (open->op_deleg_want) nfsd4_open_deleg_none_ext(open, status); return; } static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open, struct nfs4_delegation *dp) { if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG && dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG && dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; } /* Otherwise the client must be confused wanting a delegation * it already has, therefore we don't return * NFS4_OPEN_DELEGATE_NONE_EXT and reason. */ } __be32 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) { struct nfsd4_compoundres *resp = rqstp->rq_resp; struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; struct nfs4_file *fp = NULL; struct nfs4_ol_stateid *stp = NULL; struct nfs4_delegation *dp = NULL; __be32 status; /* * Lookup file; if found, lookup stateid and check open request, * and check for delegations in the process of being recalled. * If not found, create the nfs4_file struct */ fp = find_or_add_file(open->op_file, &current_fh->fh_handle); if (fp != open->op_file) { status = nfs4_check_deleg(cl, open, &dp); if (status) goto out; spin_lock(&fp->fi_lock); stp = nfsd4_find_existing_open(fp, open); spin_unlock(&fp->fi_lock); } else { open->op_file = NULL; status = nfserr_bad_stateid; if (nfsd4_is_deleg_cur(open)) goto out; } /* * OPEN the file, or upgrade an existing OPEN. * If truncate fails, the OPEN fails. */ if (stp) { /* Stateid was found, this is an OPEN upgrade */ mutex_lock(&stp->st_mutex); status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); if (status) { mutex_unlock(&stp->st_mutex); goto out; } } else { /* stp is returned locked. */ stp = init_open_stateid(fp, open); /* See if we lost the race to some other thread */ if (stp->st_access_bmap != 0) { status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); if (status) { mutex_unlock(&stp->st_mutex); goto out; } goto upgrade_out; } status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open); if (status) { mutex_unlock(&stp->st_mutex); release_open_stateid(stp); goto out; } stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp, open->op_odstate); if (stp->st_clnt_odstate == open->op_odstate) open->op_odstate = NULL; } upgrade_out: nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); mutex_unlock(&stp->st_mutex); if (nfsd4_has_session(&resp->cstate)) { if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; open->op_why_no_deleg = WND4_NOT_WANTED; goto nodeleg; } } /* * Attempt to hand out a delegation. No error return, because the * OPEN succeeds even if we fail. */ nfs4_open_delegation(current_fh, open, stp); nodeleg: status = nfs_ok; dprintk("%s: stateid=" STATEID_FMT "\n", __func__, STATEID_VAL(&stp->st_stid.sc_stateid)); out: /* 4.1 client trying to upgrade/downgrade delegation? */ if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp && open->op_deleg_want) nfsd4_deleg_xgrade_none_ext(open, dp); if (fp) put_nfs4_file(fp); if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; /* * To finish the open response, we just need to set the rflags. */ open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX; if (nfsd4_has_session(&resp->cstate)) open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK; else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED)) open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; if (dp) nfs4_put_stid(&dp->dl_stid); if (stp) nfs4_put_stid(&stp->st_stid); return status; } void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate, struct nfsd4_open *open) { if (open->op_openowner) { struct nfs4_stateowner *so = &open->op_openowner->oo_owner; nfsd4_cstate_assign_replay(cstate, so); nfs4_put_stateowner(so); } if (open->op_file) kmem_cache_free(file_slab, open->op_file); if (open->op_stp) nfs4_put_stid(&open->op_stp->st_stid); if (open->op_odstate) kmem_cache_free(odstate_slab, open->op_odstate); } __be32 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, clientid_t *clid) { struct nfs4_client *clp; __be32 status; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); dprintk("process_renew(%08x/%08x): starting\n", clid->cl_boot, clid->cl_id); status = lookup_clientid(clid, cstate, nn); if (status) goto out; clp = cstate->clp; status = nfserr_cb_path_down; if (!list_empty(&clp->cl_delegations) && clp->cl_cb_state != NFSD4_CB_UP) goto out; status = nfs_ok; out: return status; } void nfsd4_end_grace(struct nfsd_net *nn) { /* do nothing if grace period already ended */ if (nn->grace_ended) return; dprintk("NFSD: end of grace period\n"); nn->grace_ended = true; /* * If the server goes down again right now, an NFSv4 * client will still be allowed to reclaim after it comes back up, * even if it hasn't yet had a chance to reclaim state this time. * */ nfsd4_record_grace_done(nn); /* * At this point, NFSv4 clients can still reclaim. But if the * server crashes, any that have not yet reclaimed will be out * of luck on the next boot. * * (NFSv4.1+ clients are considered to have reclaimed once they * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to * have reclaimed after their first OPEN.) */ locks_end_grace(&nn->nfsd4_manager); /* * At this point, and once lockd and/or any other containers * exit their grace period, further reclaims will fail and * regular locking can resume. */ } static time_t nfs4_laundromat(struct nfsd_net *nn) { struct nfs4_client *clp; struct nfs4_openowner *oo; struct nfs4_delegation *dp; struct nfs4_ol_stateid *stp; struct nfsd4_blocked_lock *nbl; struct list_head *pos, *next, reaplist; time_t cutoff = get_seconds() - nn->nfsd4_lease; time_t t, new_timeo = nn->nfsd4_lease; dprintk("NFSD: laundromat service - starting\n"); nfsd4_end_grace(nn); INIT_LIST_HEAD(&reaplist); spin_lock(&nn->client_lock); list_for_each_safe(pos, next, &nn->client_lru) { clp = list_entry(pos, struct nfs4_client, cl_lru); if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) { t = clp->cl_time - cutoff; new_timeo = min(new_timeo, t); break; } if (mark_client_expired_locked(clp)) { dprintk("NFSD: client in use (clientid %08x)\n", clp->cl_clientid.cl_id); continue; } list_add(&clp->cl_lru, &reaplist); } spin_unlock(&nn->client_lock); list_for_each_safe(pos, next, &reaplist) { clp = list_entry(pos, struct nfs4_client, cl_lru); dprintk("NFSD: purging unused client (clientid %08x)\n", clp->cl_clientid.cl_id); list_del_init(&clp->cl_lru); expire_client(clp); } spin_lock(&state_lock); list_for_each_safe(pos, next, &nn->del_recall_lru) { dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) { t = dp->dl_time - cutoff; new_timeo = min(new_timeo, t); break; } WARN_ON(!unhash_delegation_locked(dp)); list_add(&dp->dl_recall_lru, &reaplist); } spin_unlock(&state_lock); while (!list_empty(&reaplist)) { dp = list_first_entry(&reaplist, struct nfs4_delegation, dl_recall_lru); list_del_init(&dp->dl_recall_lru); revoke_delegation(dp); } spin_lock(&nn->client_lock); while (!list_empty(&nn->close_lru)) { oo = list_first_entry(&nn->close_lru, struct nfs4_openowner, oo_close_lru); if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) { t = oo->oo_time - cutoff; new_timeo = min(new_timeo, t); break; } list_del_init(&oo->oo_close_lru); stp = oo->oo_last_closed_stid; oo->oo_last_closed_stid = NULL; spin_unlock(&nn->client_lock); nfs4_put_stid(&stp->st_stid); spin_lock(&nn->client_lock); } spin_unlock(&nn->client_lock); /* * It's possible for a client to try and acquire an already held lock * that is being held for a long time, and then lose interest in it. * So, we clean out any un-revisited request after a lease period * under the assumption that the client is no longer interested. * * RFC5661, sec. 9.6 states that the client must not rely on getting * notifications and must continue to poll for locks, even when the * server supports them. Thus this shouldn't lead to clients blocking * indefinitely once the lock does become free. */ BUG_ON(!list_empty(&reaplist)); spin_lock(&nn->blocked_locks_lock); while (!list_empty(&nn->blocked_locks_lru)) { nbl = list_first_entry(&nn->blocked_locks_lru, struct nfsd4_blocked_lock, nbl_lru); if (time_after((unsigned long)nbl->nbl_time, (unsigned long)cutoff)) { t = nbl->nbl_time - cutoff; new_timeo = min(new_timeo, t); break; } list_move(&nbl->nbl_lru, &reaplist); list_del_init(&nbl->nbl_list); } spin_unlock(&nn->blocked_locks_lock); while (!list_empty(&reaplist)) { nbl = list_first_entry(&nn->blocked_locks_lru, struct nfsd4_blocked_lock, nbl_lru); list_del_init(&nbl->nbl_lru); posix_unblock_lock(&nbl->nbl_lock); free_blocked_lock(nbl); } new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); return new_timeo; } static struct workqueue_struct *laundry_wq; static void laundromat_main(struct work_struct *); static void laundromat_main(struct work_struct *laundry) { time_t t; struct delayed_work *dwork = to_delayed_work(laundry); struct nfsd_net *nn = container_of(dwork, struct nfsd_net, laundromat_work); t = nfs4_laundromat(nn); dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t); queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); } static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp) { if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle)) return nfserr_bad_stateid; return nfs_ok; } static inline int access_permit_read(struct nfs4_ol_stateid *stp) { return test_access(NFS4_SHARE_ACCESS_READ, stp) || test_access(NFS4_SHARE_ACCESS_BOTH, stp) || test_access(NFS4_SHARE_ACCESS_WRITE, stp); } static inline int access_permit_write(struct nfs4_ol_stateid *stp) { return test_access(NFS4_SHARE_ACCESS_WRITE, stp) || test_access(NFS4_SHARE_ACCESS_BOTH, stp); } static __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags) { __be32 status = nfserr_openmode; /* For lock stateid's, we test the parent open, not the lock: */ if (stp->st_openstp) stp = stp->st_openstp; if ((flags & WR_STATE) && !access_permit_write(stp)) goto out; if ((flags & RD_STATE) && !access_permit_read(stp)) goto out; status = nfs_ok; out: return status; } static inline __be32 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags) { if (ONE_STATEID(stateid) && (flags & RD_STATE)) return nfs_ok; else if (opens_in_grace(net)) { /* Answer in remaining cases depends on existence of * conflicting state; so we must wait out the grace period. */ return nfserr_grace; } else if (flags & WR_STATE) return nfs4_share_conflict(current_fh, NFS4_SHARE_DENY_WRITE); else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */ return nfs4_share_conflict(current_fh, NFS4_SHARE_DENY_READ); } /* * Allow READ/WRITE during grace period on recovered state only for files * that are not able to provide mandatory locking. */ static inline int grace_disallows_io(struct net *net, struct inode *inode) { return opens_in_grace(net) && mandatory_lock(inode); } static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session) { /* * When sessions are used the stateid generation number is ignored * when it is zero. */ if (has_session && in->si_generation == 0) return nfs_ok; if (in->si_generation == ref->si_generation) return nfs_ok; /* If the client sends us a stateid from the future, it's buggy: */ if (nfsd4_stateid_generation_after(in, ref)) return nfserr_bad_stateid; /* * However, we could see a stateid from the past, even from a * non-buggy client. For example, if the client sends a lock * while some IO is outstanding, the lock may bump si_generation * while the IO is still in flight. The client could avoid that * situation by waiting for responses on all the IO requests, * but better performance may result in retrying IO that * receives an old_stateid error if requests are rarely * reordered in flight: */ return nfserr_old_stateid; } static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols) { if (ols->st_stateowner->so_is_open_owner && !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) return nfserr_bad_stateid; return nfs_ok; } static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) { struct nfs4_stid *s; __be32 status = nfserr_bad_stateid; if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) return status; /* Client debugging aid. */ if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) { char addr_str[INET6_ADDRSTRLEN]; rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str, sizeof(addr_str)); pr_warn_ratelimited("NFSD: client %s testing state ID " "with incorrect client ID\n", addr_str); return status; } spin_lock(&cl->cl_lock); s = find_stateid_locked(cl, stateid); if (!s) goto out_unlock; status = check_stateid_generation(stateid, &s->sc_stateid, 1); if (status) goto out_unlock; switch (s->sc_type) { case NFS4_DELEG_STID: status = nfs_ok; break; case NFS4_REVOKED_DELEG_STID: status = nfserr_deleg_revoked; break; case NFS4_OPEN_STID: case NFS4_LOCK_STID: status = nfsd4_check_openowner_confirmed(openlockstateid(s)); break; default: printk("unknown stateid type %x\n", s->sc_type); /* Fallthrough */ case NFS4_CLOSED_STID: case NFS4_CLOSED_DELEG_STID: status = nfserr_bad_stateid; } out_unlock: spin_unlock(&cl->cl_lock); return status; } __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid, unsigned char typemask, struct nfs4_stid **s, struct nfsd_net *nn) { __be32 status; if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) return nfserr_bad_stateid; status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn); if (status == nfserr_stale_clientid) { if (cstate->session) return nfserr_bad_stateid; return nfserr_stale_stateid; } if (status) return status; *s = find_stateid_by_type(cstate->clp, stateid, typemask); if (!*s) return nfserr_bad_stateid; return nfs_ok; } static struct file * nfs4_find_file(struct nfs4_stid *s, int flags) { if (!s) return NULL; switch (s->sc_type) { case NFS4_DELEG_STID: if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file)) return NULL; return get_file(s->sc_file->fi_deleg_file); case NFS4_OPEN_STID: case NFS4_LOCK_STID: if (flags & RD_STATE) return find_readable_file(s->sc_file); else return find_writeable_file(s->sc_file); break; } return NULL; } static __be32 nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags) { __be32 status; status = nfsd4_check_openowner_confirmed(ols); if (status) return status; return nfs4_check_openmode(ols, flags); } static __be32 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s, struct file **filpp, bool *tmp_file, int flags) { int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE; struct file *file; __be32 status; file = nfs4_find_file(s, flags); if (file) { status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry, acc | NFSD_MAY_OWNER_OVERRIDE); if (status) { fput(file); return status; } *filpp = file; } else { status = nfsd_open(rqstp, fhp, S_IFREG, acc, filpp); if (status) return status; if (tmp_file) *tmp_file = true; } return 0; } /* * Checks for stateid operations */ __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct svc_fh *fhp, stateid_t *stateid, int flags, struct file **filpp, bool *tmp_file) { struct inode *ino = d_inode(fhp->fh_dentry); struct net *net = SVC_NET(rqstp); struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct nfs4_stid *s = NULL; __be32 status; if (filpp) *filpp = NULL; if (tmp_file) *tmp_file = false; if (grace_disallows_io(net, ino)) return nfserr_grace; if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) { status = check_special_stateids(net, fhp, stateid, flags); goto done; } status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, &s, nn); if (status) return status; status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate)); if (status) goto out; switch (s->sc_type) { case NFS4_DELEG_STID: status = nfs4_check_delegmode(delegstateid(s), flags); break; case NFS4_OPEN_STID: case NFS4_LOCK_STID: status = nfs4_check_olstateid(fhp, openlockstateid(s), flags); break; default: status = nfserr_bad_stateid; break; } if (status) goto out; status = nfs4_check_fh(fhp, s); done: if (!status && filpp) status = nfs4_check_file(rqstp, fhp, s, filpp, tmp_file, flags); out: if (s) nfs4_put_stid(s); return status; } /* * Test if the stateid is valid */ __be32 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_test_stateid *test_stateid) { struct nfsd4_test_stateid_id *stateid; struct nfs4_client *cl = cstate->session->se_client; list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) stateid->ts_id_status = nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); return nfs_ok; } static __be32 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s) { struct nfs4_ol_stateid *stp = openlockstateid(s); __be32 ret; mutex_lock(&stp->st_mutex); ret = check_stateid_generation(stateid, &s->sc_stateid, 1); if (ret) goto out; ret = nfserr_locks_held; if (check_for_locks(stp->st_stid.sc_file, lockowner(stp->st_stateowner))) goto out; release_lock_stateid(stp); ret = nfs_ok; out: mutex_unlock(&stp->st_mutex); nfs4_put_stid(s); return ret; } __be32 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *free_stateid) { stateid_t *stateid = &free_stateid->fr_stateid; struct nfs4_stid *s; struct nfs4_delegation *dp; struct nfs4_client *cl = cstate->session->se_client; __be32 ret = nfserr_bad_stateid; spin_lock(&cl->cl_lock); s = find_stateid_locked(cl, stateid); if (!s) goto out_unlock; switch (s->sc_type) { case NFS4_DELEG_STID: ret = nfserr_locks_held; break; case NFS4_OPEN_STID: ret = check_stateid_generation(stateid, &s->sc_stateid, 1); if (ret) break; ret = nfserr_locks_held; break; case NFS4_LOCK_STID: atomic_inc(&s->sc_count); spin_unlock(&cl->cl_lock); ret = nfsd4_free_lock_stateid(stateid, s); goto out; case NFS4_REVOKED_DELEG_STID: dp = delegstateid(s); list_del_init(&dp->dl_recall_lru); spin_unlock(&cl->cl_lock); nfs4_put_stid(s); ret = nfs_ok; goto out; /* Default falls through and returns nfserr_bad_stateid */ } out_unlock: spin_unlock(&cl->cl_lock); out: return ret; } static inline int setlkflg (int type) { return (type == NFS4_READW_LT || type == NFS4_READ_LT) ? RD_STATE : WR_STATE; } static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp) { struct svc_fh *current_fh = &cstate->current_fh; struct nfs4_stateowner *sop = stp->st_stateowner; __be32 status; status = nfsd4_check_seqid(cstate, sop, seqid); if (status) return status; if (stp->st_stid.sc_type == NFS4_CLOSED_STID || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID) /* * "Closed" stateid's exist *only* to return * nfserr_replay_me from the previous step, and * revoked delegations are kept only for free_stateid. */ return nfserr_bad_stateid; mutex_lock(&stp->st_mutex); status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); if (status == nfs_ok) status = nfs4_check_fh(current_fh, &stp->st_stid); if (status != nfs_ok) mutex_unlock(&stp->st_mutex); return status; } /* * Checks for sequence id mutating operations. */ static __be32 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, stateid_t *stateid, char typemask, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn) { __be32 status; struct nfs4_stid *s; struct nfs4_ol_stateid *stp = NULL; dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__, seqid, STATEID_VAL(stateid)); *stpp = NULL; status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn); if (status) return status; stp = openlockstateid(s); nfsd4_cstate_assign_replay(cstate, stp->st_stateowner); status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp); if (!status) *stpp = stp; else nfs4_put_stid(&stp->st_stid); return status; } static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn) { __be32 status; struct nfs4_openowner *oo; struct nfs4_ol_stateid *stp; status = nfs4_preprocess_seqid_op(cstate, seqid, stateid, NFS4_OPEN_STID, &stp, nn); if (status) return status; oo = openowner(stp->st_stateowner); if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { mutex_unlock(&stp->st_mutex); nfs4_put_stid(&stp->st_stid); return nfserr_bad_stateid; } *stpp = stp; return nfs_ok; } __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open_confirm *oc) { __be32 status; struct nfs4_openowner *oo; struct nfs4_ol_stateid *stp; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); dprintk("NFSD: nfsd4_open_confirm on file %pd\n", cstate->current_fh.fh_dentry); status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); if (status) return status; status = nfs4_preprocess_seqid_op(cstate, oc->oc_seqid, &oc->oc_req_stateid, NFS4_OPEN_STID, &stp, nn); if (status) goto out; oo = openowner(stp->st_stateowner); status = nfserr_bad_stateid; if (oo->oo_flags & NFS4_OO_CONFIRMED) { mutex_unlock(&stp->st_mutex); goto put_stateid; } oo->oo_flags |= NFS4_OO_CONFIRMED; nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); mutex_unlock(&stp->st_mutex); dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid)); nfsd4_client_record_create(oo->oo_owner.so_client); status = nfs_ok; put_stateid: nfs4_put_stid(&stp->st_stid); out: nfsd4_bump_seqid(cstate, status); return status; } static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access) { if (!test_access(access, stp)) return; nfs4_file_put_access(stp->st_stid.sc_file, access); clear_access(access, stp); } static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access) { switch (to_access) { case NFS4_SHARE_ACCESS_READ: nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE); nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); break; case NFS4_SHARE_ACCESS_WRITE: nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ); nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); break; case NFS4_SHARE_ACCESS_BOTH: break; default: WARN_ON_ONCE(1); } } __be32 nfsd4_open_downgrade(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *od) { __be32 status; struct nfs4_ol_stateid *stp; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", cstate->current_fh.fh_dentry); /* We don't yet support WANT bits: */ if (od->od_deleg_want) dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__, od->od_deleg_want); status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, &od->od_stateid, &stp, nn); if (status) goto out; status = nfserr_inval; if (!test_access(od->od_share_access, stp)) { dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n", stp->st_access_bmap, od->od_share_access); goto put_stateid; } if (!test_deny(od->od_share_deny, stp)) { dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n", stp->st_deny_bmap, od->od_share_deny); goto put_stateid; } nfs4_stateid_downgrade(stp, od->od_share_access); reset_union_bmap_deny(od->od_share_deny, stp); nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid); status = nfs_ok; put_stateid: mutex_unlock(&stp->st_mutex); nfs4_put_stid(&stp->st_stid); out: nfsd4_bump_seqid(cstate, status); return status; } static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) { struct nfs4_client *clp = s->st_stid.sc_client; bool unhashed; LIST_HEAD(reaplist); s->st_stid.sc_type = NFS4_CLOSED_STID; spin_lock(&clp->cl_lock); unhashed = unhash_open_stateid(s, &reaplist); if (clp->cl_minorversion) { if (unhashed) put_ol_stateid_locked(s, &reaplist); spin_unlock(&clp->cl_lock); free_ol_stateid_reaplist(&reaplist); } else { spin_unlock(&clp->cl_lock); free_ol_stateid_reaplist(&reaplist); if (unhashed) move_to_close_lru(s, clp->net); } } /* * nfs4_unlock_state() called after encode */ __be32 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_close *close) { __be32 status; struct nfs4_ol_stateid *stp; struct net *net = SVC_NET(rqstp); struct nfsd_net *nn = net_generic(net, nfsd_net_id); dprintk("NFSD: nfsd4_close on file %pd\n", cstate->current_fh.fh_dentry); status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, &close->cl_stateid, NFS4_OPEN_STID|NFS4_CLOSED_STID, &stp, nn); nfsd4_bump_seqid(cstate, status); if (status) goto out; nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); mutex_unlock(&stp->st_mutex); nfsd4_close_open_stateid(stp); /* put reference from nfs4_preprocess_seqid_op */ nfs4_put_stid(&stp->st_stid); out: return status; } __be32 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *dr) { struct nfs4_delegation *dp; stateid_t *stateid = &dr->dr_stateid; struct nfs4_stid *s; __be32 status; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) return status; status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn); if (status) goto out; dp = delegstateid(s); status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate)); if (status) goto put_stateid; destroy_delegation(dp); put_stateid: nfs4_put_stid(&dp->dl_stid); out: return status; } static inline u64 end_offset(u64 start, u64 len) { u64 end; end = start + len; return end >= start ? end: NFS4_MAX_UINT64; } /* last octet in a range */ static inline u64 last_byte_offset(u64 start, u64 len) { u64 end; WARN_ON_ONCE(!len); end = start + len; return end > start ? end - 1: NFS4_MAX_UINT64; } /* * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that * we can't properly handle lock requests that go beyond the (2^63 - 1)-th * byte, because of sign extension problems. Since NFSv4 calls for 64-bit * locking, this prevents us from being completely protocol-compliant. The * real solution to this problem is to start using unsigned file offsets in * the VFS, but this is a very deep change! */ static inline void nfs4_transform_lock_offset(struct file_lock *lock) { if (lock->fl_start < 0) lock->fl_start = OFFSET_MAX; if (lock->fl_end < 0) lock->fl_end = OFFSET_MAX; } static fl_owner_t nfsd4_fl_get_owner(fl_owner_t owner) { struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; nfs4_get_stateowner(&lo->lo_owner); return owner; } static void nfsd4_fl_put_owner(fl_owner_t owner) { struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; if (lo) nfs4_put_stateowner(&lo->lo_owner); } static void nfsd4_lm_notify(struct file_lock *fl) { struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner; struct net *net = lo->lo_owner.so_client->net; struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct nfsd4_blocked_lock *nbl = container_of(fl, struct nfsd4_blocked_lock, nbl_lock); bool queue = false; /* An empty list means that something else is going to be using it */ spin_lock(&nn->blocked_locks_lock); if (!list_empty(&nbl->nbl_list)) { list_del_init(&nbl->nbl_list); list_del_init(&nbl->nbl_lru); queue = true; } spin_unlock(&nn->blocked_locks_lock); if (queue) nfsd4_run_cb(&nbl->nbl_cb); } static const struct lock_manager_operations nfsd_posix_mng_ops = { .lm_notify = nfsd4_lm_notify, .lm_get_owner = nfsd4_fl_get_owner, .lm_put_owner = nfsd4_fl_put_owner, }; static inline void nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) { struct nfs4_lockowner *lo; if (fl->fl_lmops == &nfsd_posix_mng_ops) { lo = (struct nfs4_lockowner *) fl->fl_owner; deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data, lo->lo_owner.so_owner.len, GFP_KERNEL); if (!deny->ld_owner.data) /* We just don't care that much */ goto nevermind; deny->ld_owner.len = lo->lo_owner.so_owner.len; deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; } else { nevermind: deny->ld_owner.len = 0; deny->ld_owner.data = NULL; deny->ld_clientid.cl_boot = 0; deny->ld_clientid.cl_id = 0; } deny->ld_start = fl->fl_start; deny->ld_length = NFS4_MAX_UINT64; if (fl->fl_end != NFS4_MAX_UINT64) deny->ld_length = fl->fl_end - fl->fl_start + 1; deny->ld_type = NFS4_READ_LT; if (fl->fl_type != F_RDLCK) deny->ld_type = NFS4_WRITE_LT; } static struct nfs4_lockowner * find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner) { unsigned int strhashval = ownerstr_hashval(owner); struct nfs4_stateowner *so; lockdep_assert_held(&clp->cl_lock); list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval], so_strhash) { if (so->so_is_open_owner) continue; if (same_owner_str(so, owner)) return lockowner(nfs4_get_stateowner(so)); } return NULL; } static struct nfs4_lockowner * find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner) { struct nfs4_lockowner *lo; spin_lock(&clp->cl_lock); lo = find_lockowner_str_locked(clp, owner); spin_unlock(&clp->cl_lock); return lo; } static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop) { unhash_lockowner_locked(lockowner(sop)); } static void nfs4_free_lockowner(struct nfs4_stateowner *sop) { struct nfs4_lockowner *lo = lockowner(sop); kmem_cache_free(lockowner_slab, lo); } static const struct nfs4_stateowner_operations lockowner_ops = { .so_unhash = nfs4_unhash_lockowner, .so_free = nfs4_free_lockowner, }; /* * Alloc a lock owner structure. * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has * occurred. * * strhashval = ownerstr_hashval */ static struct nfs4_lockowner * alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) { struct nfs4_lockowner *lo, *ret; lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); if (!lo) return NULL; INIT_LIST_HEAD(&lo->lo_blocked); INIT_LIST_HEAD(&lo->lo_owner.so_stateids); lo->lo_owner.so_is_open_owner = 0; lo->lo_owner.so_seqid = lock->lk_new_lock_seqid; lo->lo_owner.so_ops = &lockowner_ops; spin_lock(&clp->cl_lock); ret = find_lockowner_str_locked(clp, &lock->lk_new_owner); if (ret == NULL) { list_add(&lo->lo_owner.so_strhash, &clp->cl_ownerstr_hashtbl[strhashval]); ret = lo; } else nfs4_free_stateowner(&lo->lo_owner); spin_unlock(&clp->cl_lock); return ret; } static void init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, struct nfs4_file *fp, struct inode *inode, struct nfs4_ol_stateid *open_stp) { struct nfs4_client *clp = lo->lo_owner.so_client; lockdep_assert_held(&clp->cl_lock); atomic_inc(&stp->st_stid.sc_count); stp->st_stid.sc_type = NFS4_LOCK_STID; stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); get_nfs4_file(fp); stp->st_stid.sc_file = fp; stp->st_access_bmap = 0; stp->st_deny_bmap = open_stp->st_deny_bmap; stp->st_openstp = open_stp; mutex_init(&stp->st_mutex); list_add(&stp->st_locks, &open_stp->st_locks); list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); spin_lock(&fp->fi_lock); list_add(&stp->st_perfile, &fp->fi_stateids); spin_unlock(&fp->fi_lock); } static struct nfs4_ol_stateid * find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp) { struct nfs4_ol_stateid *lst; struct nfs4_client *clp = lo->lo_owner.so_client; lockdep_assert_held(&clp->cl_lock); list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) { if (lst->st_stid.sc_file == fp) { atomic_inc(&lst->st_stid.sc_count); return lst; } } return NULL; } static struct nfs4_ol_stateid * find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi, struct inode *inode, struct nfs4_ol_stateid *ost, bool *new) { struct nfs4_stid *ns = NULL; struct nfs4_ol_stateid *lst; struct nfs4_openowner *oo = openowner(ost->st_stateowner); struct nfs4_client *clp = oo->oo_owner.so_client; spin_lock(&clp->cl_lock); lst = find_lock_stateid(lo, fi); if (lst == NULL) { spin_unlock(&clp->cl_lock); ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid); if (ns == NULL) return NULL; spin_lock(&clp->cl_lock); lst = find_lock_stateid(lo, fi); if (likely(!lst)) { lst = openlockstateid(ns); init_lock_stateid(lst, lo, fi, inode, ost); ns = NULL; *new = true; } } spin_unlock(&clp->cl_lock); if (ns) nfs4_put_stid(ns); return lst; } static int check_lock_length(u64 offset, u64 length) { return ((length == 0) || ((length != NFS4_MAX_UINT64) && (length > ~offset))); } static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access) { struct nfs4_file *fp = lock_stp->st_stid.sc_file; lockdep_assert_held(&fp->fi_lock); if (test_access(access, lock_stp)) return; __nfs4_file_get_access(fp, access); set_access(access, lock_stp); } static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **plst, bool *new) { __be32 status; struct nfs4_file *fi = ost->st_stid.sc_file; struct nfs4_openowner *oo = openowner(ost->st_stateowner); struct nfs4_client *cl = oo->oo_owner.so_client; struct inode *inode = d_inode(cstate->current_fh.fh_dentry); struct nfs4_lockowner *lo; struct nfs4_ol_stateid *lst; unsigned int strhashval; bool hashed; lo = find_lockowner_str(cl, &lock->lk_new_owner); if (!lo) { strhashval = ownerstr_hashval(&lock->lk_new_owner); lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock); if (lo == NULL) return nfserr_jukebox; } else { /* with an existing lockowner, seqids must be the same */ status = nfserr_bad_seqid; if (!cstate->minorversion && lock->lk_new_lock_seqid != lo->lo_owner.so_seqid) goto out; } retry: lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); if (lst == NULL) { status = nfserr_jukebox; goto out; } mutex_lock(&lst->st_mutex); /* See if it's still hashed to avoid race with FREE_STATEID */ spin_lock(&cl->cl_lock); hashed = !list_empty(&lst->st_perfile); spin_unlock(&cl->cl_lock); if (!hashed) { mutex_unlock(&lst->st_mutex); nfs4_put_stid(&lst->st_stid); goto retry; } status = nfs_ok; *plst = lst; out: nfs4_put_stateowner(&lo->lo_owner); return status; } /* * LOCK operation */ __be32 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock) { struct nfs4_openowner *open_sop = NULL; struct nfs4_lockowner *lock_sop = NULL; struct nfs4_ol_stateid *lock_stp = NULL; struct nfs4_ol_stateid *open_stp = NULL; struct nfs4_file *fp; struct file *filp = NULL; struct nfsd4_blocked_lock *nbl = NULL; struct file_lock *file_lock = NULL; struct file_lock *conflock = NULL; __be32 status = 0; int lkflg; int err; bool new = false; unsigned char fl_type; unsigned int fl_flags = FL_POSIX; struct net *net = SVC_NET(rqstp); struct nfsd_net *nn = net_generic(net, nfsd_net_id); dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", (long long) lock->lk_offset, (long long) lock->lk_length); if (check_lock_length(lock->lk_offset, lock->lk_length)) return nfserr_inval; if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, NFSD_MAY_LOCK))) { dprintk("NFSD: nfsd4_lock: permission denied!\n"); return status; } if (lock->lk_is_new) { if (nfsd4_has_session(cstate)) /* See rfc 5661 18.10.3: given clientid is ignored: */ memcpy(&lock->lk_new_clientid, &cstate->session->se_client->cl_clientid, sizeof(clientid_t)); status = nfserr_stale_clientid; if (STALE_CLIENTID(&lock->lk_new_clientid, nn)) goto out; /* validate and update open stateid and open seqid */ status = nfs4_preprocess_confirmed_seqid_op(cstate, lock->lk_new_open_seqid, &lock->lk_new_open_stateid, &open_stp, nn); if (status) goto out; mutex_unlock(&open_stp->st_mutex); open_sop = openowner(open_stp->st_stateowner); status = nfserr_bad_stateid; if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, &lock->lk_new_clientid)) goto out; status = lookup_or_create_lock_state(cstate, open_stp, lock, &lock_stp, &new); } else { status = nfs4_preprocess_seqid_op(cstate, lock->lk_old_lock_seqid, &lock->lk_old_lock_stateid, NFS4_LOCK_STID, &lock_stp, nn); } if (status) goto out; lock_sop = lockowner(lock_stp->st_stateowner); lkflg = setlkflg(lock->lk_type); status = nfs4_check_openmode(lock_stp, lkflg); if (status) goto out; status = nfserr_grace; if (locks_in_grace(net) && !lock->lk_reclaim) goto out; status = nfserr_no_grace; if (!locks_in_grace(net) && lock->lk_reclaim) goto out; fp = lock_stp->st_stid.sc_file; switch (lock->lk_type) { case NFS4_READW_LT: if (nfsd4_has_session(cstate)) fl_flags |= FL_SLEEP; /* Fallthrough */ case NFS4_READ_LT: spin_lock(&fp->fi_lock); filp = find_readable_file_locked(fp); if (filp) get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); spin_unlock(&fp->fi_lock); fl_type = F_RDLCK; break; case NFS4_WRITEW_LT: if (nfsd4_has_session(cstate)) fl_flags |= FL_SLEEP; /* Fallthrough */ case NFS4_WRITE_LT: spin_lock(&fp->fi_lock); filp = find_writeable_file_locked(fp); if (filp) get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); spin_unlock(&fp->fi_lock); fl_type = F_WRLCK; break; default: status = nfserr_inval; goto out; } if (!filp) { status = nfserr_openmode; goto out; } nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn); if (!nbl) { dprintk("NFSD: %s: unable to allocate block!\n", __func__); status = nfserr_jukebox; goto out; } file_lock = &nbl->nbl_lock; file_lock->fl_type = fl_type; file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner)); file_lock->fl_pid = current->tgid; file_lock->fl_file = filp; file_lock->fl_flags = fl_flags; file_lock->fl_lmops = &nfsd_posix_mng_ops; file_lock->fl_start = lock->lk_offset; file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); nfs4_transform_lock_offset(file_lock); conflock = locks_alloc_lock(); if (!conflock) { dprintk("NFSD: %s: unable to allocate lock!\n", __func__); status = nfserr_jukebox; goto out; } if (fl_flags & FL_SLEEP) { nbl->nbl_time = jiffies; spin_lock(&nn->blocked_locks_lock); list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); spin_unlock(&nn->blocked_locks_lock); } err = vfs_lock_file(filp, F_SETLK, file_lock, conflock); switch (err) { case 0: /* success! */ nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid); status = 0; break; case FILE_LOCK_DEFERRED: nbl = NULL; /* Fallthrough */ case -EAGAIN: /* conflock holds conflicting lock */ status = nfserr_denied; dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); nfs4_set_lock_denied(conflock, &lock->lk_denied); break; case -EDEADLK: status = nfserr_deadlock; break; default: dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err); status = nfserrno(err); break; } out: if (nbl) { /* dequeue it if we queued it before */ if (fl_flags & FL_SLEEP) { spin_lock(&nn->blocked_locks_lock); list_del_init(&nbl->nbl_list); list_del_init(&nbl->nbl_lru); spin_unlock(&nn->blocked_locks_lock); } free_blocked_lock(nbl); } if (filp) fput(filp); if (lock_stp) { /* Bump seqid manually if the 4.0 replay owner is openowner */ if (cstate->replay_owner && cstate->replay_owner != &lock_sop->lo_owner && seqid_mutating_err(ntohl(status))) lock_sop->lo_owner.so_seqid++; mutex_unlock(&lock_stp->st_mutex); /* * If this is a new, never-before-used stateid, and we are * returning an error, then just go ahead and release it. */ if (status && new) release_lock_stateid(lock_stp); nfs4_put_stid(&lock_stp->st_stid); } if (open_stp) nfs4_put_stid(&open_stp->st_stid); nfsd4_bump_seqid(cstate, status); if (conflock) locks_free_lock(conflock); return status; } /* * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN, * so we do a temporary open here just to get an open file to pass to * vfs_test_lock. (Arguably perhaps test_lock should be done with an * inode operation.) */ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) { struct file *file; __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file); if (!err) { err = nfserrno(vfs_test_lock(file, lock)); fput(file); } return err; } /* * LOCKT operation */ __be32 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_lockt *lockt) { struct file_lock *file_lock = NULL; struct nfs4_lockowner *lo = NULL; __be32 status; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); if (locks_in_grace(SVC_NET(rqstp))) return nfserr_grace; if (check_lock_length(lockt->lt_offset, lockt->lt_length)) return nfserr_inval; if (!nfsd4_has_session(cstate)) { status = lookup_clientid(&lockt->lt_clientid, cstate, nn); if (status) goto out; } if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) goto out; file_lock = locks_alloc_lock(); if (!file_lock) { dprintk("NFSD: %s: unable to allocate lock!\n", __func__); status = nfserr_jukebox; goto out; } switch (lockt->lt_type) { case NFS4_READ_LT: case NFS4_READW_LT: file_lock->fl_type = F_RDLCK; break; case NFS4_WRITE_LT: case NFS4_WRITEW_LT: file_lock->fl_type = F_WRLCK; break; default: dprintk("NFSD: nfs4_lockt: bad lock type!\n"); status = nfserr_inval; goto out; } lo = find_lockowner_str(cstate->clp, &lockt->lt_owner); if (lo) file_lock->fl_owner = (fl_owner_t)lo; file_lock->fl_pid = current->tgid; file_lock->fl_flags = FL_POSIX; file_lock->fl_start = lockt->lt_offset; file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); nfs4_transform_lock_offset(file_lock); status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock); if (status) goto out; if (file_lock->fl_type != F_UNLCK) { status = nfserr_denied; nfs4_set_lock_denied(file_lock, &lockt->lt_denied); } out: if (lo) nfs4_put_stateowner(&lo->lo_owner); if (file_lock) locks_free_lock(file_lock); return status; } __be32 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku) { struct nfs4_ol_stateid *stp; struct file *filp = NULL; struct file_lock *file_lock = NULL; __be32 status; int err; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n", (long long) locku->lu_offset, (long long) locku->lu_length); if (check_lock_length(locku->lu_offset, locku->lu_length)) return nfserr_inval; status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, &locku->lu_stateid, NFS4_LOCK_STID, &stp, nn); if (status) goto out; filp = find_any_file(stp->st_stid.sc_file); if (!filp) { status = nfserr_lock_range; goto put_stateid; } file_lock = locks_alloc_lock(); if (!file_lock) { dprintk("NFSD: %s: unable to allocate lock!\n", __func__); status = nfserr_jukebox; goto fput; } file_lock->fl_type = F_UNLCK; file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner)); file_lock->fl_pid = current->tgid; file_lock->fl_file = filp; file_lock->fl_flags = FL_POSIX; file_lock->fl_lmops = &nfsd_posix_mng_ops; file_lock->fl_start = locku->lu_offset; file_lock->fl_end = last_byte_offset(locku->lu_offset, locku->lu_length); nfs4_transform_lock_offset(file_lock); err = vfs_lock_file(filp, F_SETLK, file_lock, NULL); if (err) { dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); goto out_nfserr; } nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid); fput: fput(filp); put_stateid: mutex_unlock(&stp->st_mutex); nfs4_put_stid(&stp->st_stid); out: nfsd4_bump_seqid(cstate, status); if (file_lock) locks_free_lock(file_lock); return status; out_nfserr: status = nfserrno(err); goto fput; } /* * returns * true: locks held by lockowner * false: no locks held by lockowner */ static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) { struct file_lock *fl; int status = false; struct file *filp = find_any_file(fp); struct inode *inode; struct file_lock_context *flctx; if (!filp) { /* Any valid lock stateid should have some sort of access */ WARN_ON_ONCE(1); return status; } inode = file_inode(filp); flctx = inode->i_flctx; if (flctx && !list_empty_careful(&flctx->flc_posix)) { spin_lock(&flctx->flc_lock); list_for_each_entry(fl, &flctx->flc_posix, fl_list) { if (fl->fl_owner == (fl_owner_t)lowner) { status = true; break; } } spin_unlock(&flctx->flc_lock); } fput(filp); return status; } __be32 nfsd4_release_lockowner(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_release_lockowner *rlockowner) { clientid_t *clid = &rlockowner->rl_clientid; struct nfs4_stateowner *sop; struct nfs4_lockowner *lo = NULL; struct nfs4_ol_stateid *stp; struct xdr_netobj *owner = &rlockowner->rl_owner; unsigned int hashval = ownerstr_hashval(owner); __be32 status; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); struct nfs4_client *clp; LIST_HEAD (reaplist); dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", clid->cl_boot, clid->cl_id); status = lookup_clientid(clid, cstate, nn); if (status) return status; clp = cstate->clp; /* Find the matching lock stateowner */ spin_lock(&clp->cl_lock); list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval], so_strhash) { if (sop->so_is_open_owner || !same_owner_str(sop, owner)) continue; /* see if there are still any locks associated with it */ lo = lockowner(sop); list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) { if (check_for_locks(stp->st_stid.sc_file, lo)) { status = nfserr_locks_held; spin_unlock(&clp->cl_lock); return status; } } nfs4_get_stateowner(sop); break; } if (!lo) { spin_unlock(&clp->cl_lock); return status; } unhash_lockowner_locked(lo); while (!list_empty(&lo->lo_owner.so_stateids)) { stp = list_first_entry(&lo->lo_owner.so_stateids, struct nfs4_ol_stateid, st_perstateowner); WARN_ON(!unhash_lock_stateid(stp)); put_ol_stateid_locked(stp, &reaplist); } spin_unlock(&clp->cl_lock); free_ol_stateid_reaplist(&reaplist); nfs4_put_stateowner(&lo->lo_owner); return status; } static inline struct nfs4_client_reclaim * alloc_reclaim(void) { return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL); } bool nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn) { struct nfs4_client_reclaim *crp; crp = nfsd4_find_reclaim_client(name, nn); return (crp && crp->cr_clp); } /* * failure => all reset bets are off, nfserr_no_grace... */ struct nfs4_client_reclaim * nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn) { unsigned int strhashval; struct nfs4_client_reclaim *crp; dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name); crp = alloc_reclaim(); if (crp) { strhashval = clientstr_hashval(name); INIT_LIST_HEAD(&crp->cr_strhash); list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]); memcpy(crp->cr_recdir, name, HEXDIR_LEN); crp->cr_clp = NULL; nn->reclaim_str_hashtbl_size++; } return crp; } void nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn) { list_del(&crp->cr_strhash); kfree(crp); nn->reclaim_str_hashtbl_size--; } void nfs4_release_reclaim(struct nfsd_net *nn) { struct nfs4_client_reclaim *crp = NULL; int i; for (i = 0; i < CLIENT_HASH_SIZE; i++) { while (!list_empty(&nn->reclaim_str_hashtbl[i])) { crp = list_entry(nn->reclaim_str_hashtbl[i].next, struct nfs4_client_reclaim, cr_strhash); nfs4_remove_reclaim_record(crp, nn); } } WARN_ON_ONCE(nn->reclaim_str_hashtbl_size); } /* * called from OPEN, CLAIM_PREVIOUS with a new clientid. */ struct nfs4_client_reclaim * nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn) { unsigned int strhashval; struct nfs4_client_reclaim *crp = NULL; dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir); strhashval = clientstr_hashval(recdir); list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) { if (same_name(crp->cr_recdir, recdir)) { return crp; } } return NULL; } /* * Called from OPEN. Look for clientid in reclaim list. */ __be32 nfs4_check_open_reclaim(clientid_t *clid, struct nfsd4_compound_state *cstate, struct nfsd_net *nn) { __be32 status; /* find clientid in conf_id_hashtbl */ status = lookup_clientid(clid, cstate, nn); if (status) return nfserr_reclaim_bad; if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags)) return nfserr_no_grace; if (nfsd4_client_record_check(cstate->clp)) return nfserr_reclaim_bad; return nfs_ok; } #ifdef CONFIG_NFSD_FAULT_INJECTION static inline void put_client(struct nfs4_client *clp) { atomic_dec(&clp->cl_refcount); } static struct nfs4_client * nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size) { struct nfs4_client *clp; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); if (!nfsd_netns_ready(nn)) return NULL; list_for_each_entry(clp, &nn->client_lru, cl_lru) { if (memcmp(&clp->cl_addr, addr, addr_size) == 0) return clp; } return NULL; } u64 nfsd_inject_print_clients(void) { struct nfs4_client *clp; u64 count = 0; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); char buf[INET6_ADDRSTRLEN]; if (!nfsd_netns_ready(nn)) return 0; spin_lock(&nn->client_lock); list_for_each_entry(clp, &nn->client_lru, cl_lru) { rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf)); pr_info("NFS Client: %s\n", buf); ++count; } spin_unlock(&nn->client_lock); return count; } u64 nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size) { u64 count = 0; struct nfs4_client *clp; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); if (!nfsd_netns_ready(nn)) return count; spin_lock(&nn->client_lock); clp = nfsd_find_client(addr, addr_size); if (clp) { if (mark_client_expired_locked(clp) == nfs_ok) ++count; else clp = NULL; } spin_unlock(&nn->client_lock); if (clp) expire_client(clp); return count; } u64 nfsd_inject_forget_clients(u64 max) { u64 count = 0; struct nfs4_client *clp, *next; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); LIST_HEAD(reaplist); if (!nfsd_netns_ready(nn)) return count; spin_lock(&nn->client_lock); list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) { if (mark_client_expired_locked(clp) == nfs_ok) { list_add(&clp->cl_lru, &reaplist); if (max != 0 && ++count >= max) break; } } spin_unlock(&nn->client_lock); list_for_each_entry_safe(clp, next, &reaplist, cl_lru) expire_client(clp); return count; } static void nfsd_print_count(struct nfs4_client *clp, unsigned int count, const char *type) { char buf[INET6_ADDRSTRLEN]; rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf)); printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type); } static void nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst, struct list_head *collect) { struct nfs4_client *clp = lst->st_stid.sc_client; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); if (!collect) return; lockdep_assert_held(&nn->client_lock); atomic_inc(&clp->cl_refcount); list_add(&lst->st_locks, collect); } static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, struct list_head *collect, bool (*func)(struct nfs4_ol_stateid *)) { struct nfs4_openowner *oop; struct nfs4_ol_stateid *stp, *st_next; struct nfs4_ol_stateid *lst, *lst_next; u64 count = 0; spin_lock(&clp->cl_lock); list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) { list_for_each_entry_safe(stp, st_next, &oop->oo_owner.so_stateids, st_perstateowner) { list_for_each_entry_safe(lst, lst_next, &stp->st_locks, st_locks) { if (func) { if (func(lst)) nfsd_inject_add_lock_to_list(lst, collect); } ++count; /* * Despite the fact that these functions deal * with 64-bit integers for "count", we must * ensure that it doesn't blow up the * clp->cl_refcount. Throw a warning if we * start to approach INT_MAX here. */ WARN_ON_ONCE(count == (INT_MAX / 2)); if (count == max) goto out; } } } out: spin_unlock(&clp->cl_lock); return count; } static u64 nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect, u64 max) { return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid); } static u64 nfsd_print_client_locks(struct nfs4_client *clp) { u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL); nfsd_print_count(clp, count, "locked files"); return count; } u64 nfsd_inject_print_locks(void) { struct nfs4_client *clp; u64 count = 0; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); if (!nfsd_netns_ready(nn)) return 0; spin_lock(&nn->client_lock); list_for_each_entry(clp, &nn->client_lru, cl_lru) count += nfsd_print_client_locks(clp); spin_unlock(&nn->client_lock); return count; } static void nfsd_reap_locks(struct list_head *reaplist) { struct nfs4_client *clp; struct nfs4_ol_stateid *stp, *next; list_for_each_entry_safe(stp, next, reaplist, st_locks) { list_del_init(&stp->st_locks); clp = stp->st_stid.sc_client; nfs4_put_stid(&stp->st_stid); put_client(clp); } } u64 nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size) { unsigned int count = 0; struct nfs4_client *clp; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); LIST_HEAD(reaplist); if (!nfsd_netns_ready(nn)) return count; spin_lock(&nn->client_lock); clp = nfsd_find_client(addr, addr_size); if (clp) count = nfsd_collect_client_locks(clp, &reaplist, 0); spin_unlock(&nn->client_lock); nfsd_reap_locks(&reaplist); return count; } u64 nfsd_inject_forget_locks(u64 max) { u64 count = 0; struct nfs4_client *clp; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); LIST_HEAD(reaplist); if (!nfsd_netns_ready(nn)) return count; spin_lock(&nn->client_lock); list_for_each_entry(clp, &nn->client_lru, cl_lru) { count += nfsd_collect_client_locks(clp, &reaplist, max - count); if (max != 0 && count >= max) break; } spin_unlock(&nn->client_lock); nfsd_reap_locks(&reaplist); return count; } static u64 nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max, struct list_head *collect, void (*func)(struct nfs4_openowner *)) { struct nfs4_openowner *oop, *next; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); u64 count = 0; lockdep_assert_held(&nn->client_lock); spin_lock(&clp->cl_lock); list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) { if (func) { func(oop); if (collect) { atomic_inc(&clp->cl_refcount); list_add(&oop->oo_perclient, collect); } } ++count; /* * Despite the fact that these functions deal with * 64-bit integers for "count", we must ensure that * it doesn't blow up the clp->cl_refcount. Throw a * warning if we start to approach INT_MAX here. */ WARN_ON_ONCE(count == (INT_MAX / 2)); if (count == max) break; } spin_unlock(&clp->cl_lock); return count; } static u64 nfsd_print_client_openowners(struct nfs4_client *clp) { u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL); nfsd_print_count(clp, count, "openowners"); return count; } static u64 nfsd_collect_client_openowners(struct nfs4_client *clp, struct list_head *collect, u64 max) { return nfsd_foreach_client_openowner(clp, max, collect, unhash_openowner_locked); } u64 nfsd_inject_print_openowners(void) { struct nfs4_client *clp; u64 count = 0; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); if (!nfsd_netns_ready(nn)) return 0; spin_lock(&nn->client_lock); list_for_each_entry(clp, &nn->client_lru, cl_lru) count += nfsd_print_client_openowners(clp); spin_unlock(&nn->client_lock); return count; } static void nfsd_reap_openowners(struct list_head *reaplist) { struct nfs4_client *clp; struct nfs4_openowner *oop, *next; list_for_each_entry_safe(oop, next, reaplist, oo_perclient) { list_del_init(&oop->oo_perclient); clp = oop->oo_owner.so_client; release_openowner(oop); put_client(clp); } } u64 nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr, size_t addr_size) { unsigned int count = 0; struct nfs4_client *clp; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); LIST_HEAD(reaplist); if (!nfsd_netns_ready(nn)) return count; spin_lock(&nn->client_lock); clp = nfsd_find_client(addr, addr_size); if (clp) count = nfsd_collect_client_openowners(clp, &reaplist, 0); spin_unlock(&nn->client_lock); nfsd_reap_openowners(&reaplist); return count; } u64 nfsd_inject_forget_openowners(u64 max) { u64 count = 0; struct nfs4_client *clp; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); LIST_HEAD(reaplist); if (!nfsd_netns_ready(nn)) return count; spin_lock(&nn->client_lock); list_for_each_entry(clp, &nn->client_lru, cl_lru) { count += nfsd_collect_client_openowners(clp, &reaplist, max - count); if (max != 0 && count >= max) break; } spin_unlock(&nn->client_lock); nfsd_reap_openowners(&reaplist); return count; } static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max, struct list_head *victims) { struct nfs4_delegation *dp, *next; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); u64 count = 0; lockdep_assert_held(&nn->client_lock); spin_lock(&state_lock); list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) { if (victims) { /* * It's not safe to mess with delegations that have a * non-zero dl_time. They might have already been broken * and could be processed by the laundromat outside of * the state_lock. Just leave them be. */ if (dp->dl_time != 0) continue; atomic_inc(&clp->cl_refcount); WARN_ON(!unhash_delegation_locked(dp)); list_add(&dp->dl_recall_lru, victims); } ++count; /* * Despite the fact that these functions deal with * 64-bit integers for "count", we must ensure that * it doesn't blow up the clp->cl_refcount. Throw a * warning if we start to approach INT_MAX here. */ WARN_ON_ONCE(count == (INT_MAX / 2)); if (count == max) break; } spin_unlock(&state_lock); return count; } static u64 nfsd_print_client_delegations(struct nfs4_client *clp) { u64 count = nfsd_find_all_delegations(clp, 0, NULL); nfsd_print_count(clp, count, "delegations"); return count; } u64 nfsd_inject_print_delegations(void) { struct nfs4_client *clp; u64 count = 0; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); if (!nfsd_netns_ready(nn)) return 0; spin_lock(&nn->client_lock); list_for_each_entry(clp, &nn->client_lru, cl_lru) count += nfsd_print_client_delegations(clp); spin_unlock(&nn->client_lock); return count; } static void nfsd_forget_delegations(struct list_head *reaplist) { struct nfs4_client *clp; struct nfs4_delegation *dp, *next; list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) { list_del_init(&dp->dl_recall_lru); clp = dp->dl_stid.sc_client; revoke_delegation(dp); put_client(clp); } } u64 nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr, size_t addr_size) { u64 count = 0; struct nfs4_client *clp; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); LIST_HEAD(reaplist); if (!nfsd_netns_ready(nn)) return count; spin_lock(&nn->client_lock); clp = nfsd_find_client(addr, addr_size); if (clp) count = nfsd_find_all_delegations(clp, 0, &reaplist); spin_unlock(&nn->client_lock); nfsd_forget_delegations(&reaplist); return count; } u64 nfsd_inject_forget_delegations(u64 max) { u64 count = 0; struct nfs4_client *clp; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); LIST_HEAD(reaplist); if (!nfsd_netns_ready(nn)) return count; spin_lock(&nn->client_lock); list_for_each_entry(clp, &nn->client_lru, cl_lru) { count += nfsd_find_all_delegations(clp, max - count, &reaplist); if (max != 0 && count >= max) break; } spin_unlock(&nn->client_lock); nfsd_forget_delegations(&reaplist); return count; } static void nfsd_recall_delegations(struct list_head *reaplist) { struct nfs4_client *clp; struct nfs4_delegation *dp, *next; list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) { list_del_init(&dp->dl_recall_lru); clp = dp->dl_stid.sc_client; /* * We skipped all entries that had a zero dl_time before, * so we can now reset the dl_time back to 0. If a delegation * break comes in now, then it won't make any difference since * we're recalling it either way. */ spin_lock(&state_lock); dp->dl_time = 0; spin_unlock(&state_lock); nfsd_break_one_deleg(dp); put_client(clp); } } u64 nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr, size_t addr_size) { u64 count = 0; struct nfs4_client *clp; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); LIST_HEAD(reaplist); if (!nfsd_netns_ready(nn)) return count; spin_lock(&nn->client_lock); clp = nfsd_find_client(addr, addr_size); if (clp) count = nfsd_find_all_delegations(clp, 0, &reaplist); spin_unlock(&nn->client_lock); nfsd_recall_delegations(&reaplist); return count; } u64 nfsd_inject_recall_delegations(u64 max) { u64 count = 0; struct nfs4_client *clp, *next; struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); LIST_HEAD(reaplist); if (!nfsd_netns_ready(nn)) return count; spin_lock(&nn->client_lock); list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) { count += nfsd_find_all_delegations(clp, max - count, &reaplist); if (max != 0 && ++count >= max) break; } spin_unlock(&nn->client_lock); nfsd_recall_delegations(&reaplist); return count; } #endif /* CONFIG_NFSD_FAULT_INJECTION */ /* * Since the lifetime of a delegation isn't limited to that of an open, a * client may quite reasonably hang on to a delegation as long as it has * the inode cached. This becomes an obvious problem the first time a * client's inode cache approaches the size of the server's total memory. * * For now we avoid this problem by imposing a hard limit on the number * of delegations, which varies according to the server's memory size. */ static void set_max_delegations(void) { /* * Allow at most 4 delegations per megabyte of RAM. Quick * estimates suggest that in the worst case (where every delegation * is for a different inode), a delegation could take about 1.5K, * giving a worst case usage of about 6% of memory. */ max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); } static int nfs4_state_create_net(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); int i; nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) * CLIENT_HASH_SIZE, GFP_KERNEL); if (!nn->conf_id_hashtbl) goto err; nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) * CLIENT_HASH_SIZE, GFP_KERNEL); if (!nn->unconf_id_hashtbl) goto err_unconf_id; nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) * SESSION_HASH_SIZE, GFP_KERNEL); if (!nn->sessionid_hashtbl) goto err_sessionid; for (i = 0; i < CLIENT_HASH_SIZE; i++) { INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]); INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]); } for (i = 0; i < SESSION_HASH_SIZE; i++) INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); nn->conf_name_tree = RB_ROOT; nn->unconf_name_tree = RB_ROOT; INIT_LIST_HEAD(&nn->client_lru); INIT_LIST_HEAD(&nn->close_lru); INIT_LIST_HEAD(&nn->del_recall_lru); spin_lock_init(&nn->client_lock); spin_lock_init(&nn->blocked_locks_lock); INIT_LIST_HEAD(&nn->blocked_locks_lru); INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); get_net(net); return 0; err_sessionid: kfree(nn->unconf_id_hashtbl); err_unconf_id: kfree(nn->conf_id_hashtbl); err: return -ENOMEM; } static void nfs4_state_destroy_net(struct net *net) { int i; struct nfs4_client *clp = NULL; struct nfsd_net *nn = net_generic(net, nfsd_net_id); for (i = 0; i < CLIENT_HASH_SIZE; i++) { while (!list_empty(&nn->conf_id_hashtbl[i])) { clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); destroy_client(clp); } } for (i = 0; i < CLIENT_HASH_SIZE; i++) { while (!list_empty(&nn->unconf_id_hashtbl[i])) { clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); destroy_client(clp); } } kfree(nn->sessionid_hashtbl); kfree(nn->unconf_id_hashtbl); kfree(nn->conf_id_hashtbl); put_net(net); } int nfs4_state_start_net(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); int ret; ret = nfs4_state_create_net(net); if (ret) return ret; nn->boot_time = get_seconds(); nn->grace_ended = false; nn->nfsd4_manager.block_opens = true; locks_start_grace(net, &nn->nfsd4_manager); nfsd4_client_tracking_init(net); printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n", nn->nfsd4_grace, net); queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); return 0; } /* initialization to perform when the nfsd service is started: */ int nfs4_state_start(void) { int ret; ret = set_callback_cred(); if (ret) return ret; laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4"); if (laundry_wq == NULL) { ret = -ENOMEM; goto out_cleanup_cred; } ret = nfsd4_create_callback_queue(); if (ret) goto out_free_laundry; set_max_delegations(); return 0; out_free_laundry: destroy_workqueue(laundry_wq); out_cleanup_cred: cleanup_callback_cred(); return ret; } void nfs4_state_shutdown_net(struct net *net) { struct nfs4_delegation *dp = NULL; struct list_head *pos, *next, reaplist; struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct nfsd4_blocked_lock *nbl; cancel_delayed_work_sync(&nn->laundromat_work); locks_end_grace(&nn->nfsd4_manager); INIT_LIST_HEAD(&reaplist); spin_lock(&state_lock); list_for_each_safe(pos, next, &nn->del_recall_lru) { dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); WARN_ON(!unhash_delegation_locked(dp)); list_add(&dp->dl_recall_lru, &reaplist); } spin_unlock(&state_lock); list_for_each_safe(pos, next, &reaplist) { dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); list_del_init(&dp->dl_recall_lru); put_clnt_odstate(dp->dl_clnt_odstate); nfs4_put_deleg_lease(dp->dl_stid.sc_file); nfs4_put_stid(&dp->dl_stid); } BUG_ON(!list_empty(&reaplist)); spin_lock(&nn->blocked_locks_lock); while (!list_empty(&nn->blocked_locks_lru)) { nbl = list_first_entry(&nn->blocked_locks_lru, struct nfsd4_blocked_lock, nbl_lru); list_move(&nbl->nbl_lru, &reaplist); list_del_init(&nbl->nbl_list); } spin_unlock(&nn->blocked_locks_lock); while (!list_empty(&reaplist)) { nbl = list_first_entry(&nn->blocked_locks_lru, struct nfsd4_blocked_lock, nbl_lru); list_del_init(&nbl->nbl_lru); posix_unblock_lock(&nbl->nbl_lock); free_blocked_lock(nbl); } nfsd4_client_tracking_exit(net); nfs4_state_destroy_net(net); } void nfs4_state_shutdown(void) { destroy_workqueue(laundry_wq); nfsd4_destroy_callback_queue(); cleanup_callback_cred(); } static void get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) { if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid)) memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); } static void put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) { if (cstate->minorversion) { memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); } } void clear_current_stateid(struct nfsd4_compound_state *cstate) { CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); } /* * functions to set current state id */ void nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp) { put_stateid(cstate, &odp->od_stateid); } void nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open) { put_stateid(cstate, &open->op_stateid); } void nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close) { put_stateid(cstate, &close->cl_stateid); } void nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock) { put_stateid(cstate, &lock->lk_resp_stateid); } /* * functions to consume current state id */ void nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp) { get_stateid(cstate, &odp->od_stateid); } void nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp) { get_stateid(cstate, &drp->dr_stateid); } void nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp) { get_stateid(cstate, &fsp->fr_stateid); } void nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr) { get_stateid(cstate, &setattr->sa_stateid); } void nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close) { get_stateid(cstate, &close->cl_stateid); } void nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku) { get_stateid(cstate, &locku->lu_stateid); } void nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read) { get_stateid(cstate, &read->rd_stateid); } void nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write) { get_stateid(cstate, &write->wr_stateid); }
./CrossVul/dataset_final_sorted/CWE-404/c/bad_3351_5
crossvul-cpp_data_good_3351_1
/* * linux/fs/lockd/svclock.c * * Handling of server-side locks, mostly of the blocked variety. * This is the ugliest part of lockd because we tread on very thin ice. * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc. * IMNSHO introducing the grant callback into the NLM protocol was one * of the worst ideas Sun ever had. Except maybe for the idea of doing * NFS file locking at all. * * I'm trying hard to avoid race conditions by protecting most accesses * to a file's list of blocked locks through a semaphore. The global * list of blocked locks is not protected in this fashion however. * Therefore, some functions (such as the RPC callback for the async grant * call) move blocked locks towards the head of the list *while some other * process might be traversing it*. This should not be a problem in * practice, because this will only cause functions traversing the list * to visit some blocks twice. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #include <linux/types.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svc_xprt.h> #include <linux/lockd/nlm.h> #include <linux/lockd/lockd.h> #include <linux/kthread.h> #define NLMDBG_FACILITY NLMDBG_SVCLOCK #ifdef CONFIG_LOCKD_V4 #define nlm_deadlock nlm4_deadlock #else #define nlm_deadlock nlm_lck_denied #endif static void nlmsvc_release_block(struct nlm_block *block); static void nlmsvc_insert_block(struct nlm_block *block, unsigned long); static void nlmsvc_remove_block(struct nlm_block *block); static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock); static void nlmsvc_freegrantargs(struct nlm_rqst *call); static const struct rpc_call_ops nlmsvc_grant_ops; /* * The list of blocked locks to retry */ static LIST_HEAD(nlm_blocked); static DEFINE_SPINLOCK(nlm_blocked_lock); #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie) { /* * We can get away with a static buffer because this is only called * from lockd, which is single-threaded. */ static char buf[2*NLM_MAXCOOKIELEN+1]; unsigned int i, len = sizeof(buf); char *p = buf; len--; /* allow for trailing \0 */ if (len < 3) return "???"; for (i = 0 ; i < cookie->len ; i++) { if (len < 2) { strcpy(p-3, "..."); break; } sprintf(p, "%02x", cookie->data[i]); p += 2; len -= 2; } *p = '\0'; return buf; } #endif /* * Insert a blocked lock into the global list */ static void nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when) { struct nlm_block *b; struct list_head *pos; dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when); if (list_empty(&block->b_list)) { kref_get(&block->b_count); } else { list_del_init(&block->b_list); } pos = &nlm_blocked; if (when != NLM_NEVER) { if ((when += jiffies) == NLM_NEVER) when ++; list_for_each(pos, &nlm_blocked) { b = list_entry(pos, struct nlm_block, b_list); if (time_after(b->b_when,when) || b->b_when == NLM_NEVER) break; } /* On normal exit from the loop, pos == &nlm_blocked, * so we will be adding to the end of the list - good */ } list_add_tail(&block->b_list, pos); block->b_when = when; } static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when) { spin_lock(&nlm_blocked_lock); nlmsvc_insert_block_locked(block, when); spin_unlock(&nlm_blocked_lock); } /* * Remove a block from the global list */ static inline void nlmsvc_remove_block(struct nlm_block *block) { if (!list_empty(&block->b_list)) { spin_lock(&nlm_blocked_lock); list_del_init(&block->b_list); spin_unlock(&nlm_blocked_lock); nlmsvc_release_block(block); } } /* * Find a block for a given lock */ static struct nlm_block * nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) { struct nlm_block *block; struct file_lock *fl; dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n", file, lock->fl.fl_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end, lock->fl.fl_type); list_for_each_entry(block, &nlm_blocked, b_list) { fl = &block->b_call->a_args.lock.fl; dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", block->b_file, fl->fl_pid, (long long)fl->fl_start, (long long)fl->fl_end, fl->fl_type, nlmdbg_cookie2a(&block->b_call->a_args.cookie)); if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { kref_get(&block->b_count); return block; } } return NULL; } static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b) { if (a->len != b->len) return 0; if (memcmp(a->data, b->data, a->len)) return 0; return 1; } /* * Find a block with a given NLM cookie. */ static inline struct nlm_block * nlmsvc_find_block(struct nlm_cookie *cookie) { struct nlm_block *block; list_for_each_entry(block, &nlm_blocked, b_list) { if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie)) goto found; } return NULL; found: dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block); kref_get(&block->b_count); return block; } /* * Create a block and initialize it. * * Note: we explicitly set the cookie of the grant reply to that of * the blocked lock request. The spec explicitly mentions that the client * should _not_ rely on the callback containing the same cookie as the * request, but (as I found out later) that's because some implementations * do just this. Never mind the standards comittees, they support our * logging industries. * * 10 years later: I hope we can safely ignore these old and broken * clients by now. Let's fix this so we can uniquely identify an incoming * GRANTED_RES message by cookie, without having to rely on the client's IP * address. --okir */ static struct nlm_block * nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host, struct nlm_file *file, struct nlm_lock *lock, struct nlm_cookie *cookie) { struct nlm_block *block; struct nlm_rqst *call = NULL; call = nlm_alloc_call(host); if (call == NULL) return NULL; /* Allocate memory for block, and initialize arguments */ block = kzalloc(sizeof(*block), GFP_KERNEL); if (block == NULL) goto failed; kref_init(&block->b_count); INIT_LIST_HEAD(&block->b_list); INIT_LIST_HEAD(&block->b_flist); if (!nlmsvc_setgrantargs(call, lock)) goto failed_free; /* Set notifier function for VFS, and init args */ call->a_args.lock.fl.fl_flags |= FL_SLEEP; call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; nlmclnt_next_cookie(&call->a_args.cookie); dprintk("lockd: created block %p...\n", block); /* Create and initialize the block */ block->b_daemon = rqstp->rq_server; block->b_host = host; block->b_file = file; file->f_count++; /* Add to file's list of blocks */ list_add(&block->b_flist, &file->f_blocks); /* Set up RPC arguments for callback */ block->b_call = call; call->a_flags = RPC_TASK_ASYNC; call->a_block = block; return block; failed_free: kfree(block); failed: nlmsvc_release_call(call); return NULL; } /* * Delete a block. * It is the caller's responsibility to check whether the file * can be closed hereafter. */ static int nlmsvc_unlink_block(struct nlm_block *block) { int status; dprintk("lockd: unlinking block %p...\n", block); /* Remove block from list */ status = posix_unblock_lock(&block->b_call->a_args.lock.fl); nlmsvc_remove_block(block); return status; } static void nlmsvc_free_block(struct kref *kref) { struct nlm_block *block = container_of(kref, struct nlm_block, b_count); struct nlm_file *file = block->b_file; dprintk("lockd: freeing block %p...\n", block); /* Remove block from file's list of blocks */ list_del_init(&block->b_flist); mutex_unlock(&file->f_mutex); nlmsvc_freegrantargs(block->b_call); nlmsvc_release_call(block->b_call); nlm_release_file(block->b_file); kfree(block); } static void nlmsvc_release_block(struct nlm_block *block) { if (block != NULL) kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex); } /* * Loop over all blocks and delete blocks held by * a matching host. */ void nlmsvc_traverse_blocks(struct nlm_host *host, struct nlm_file *file, nlm_host_match_fn_t match) { struct nlm_block *block, *next; restart: mutex_lock(&file->f_mutex); list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) { if (!match(block->b_host, host)) continue; /* Do not destroy blocks that are not on * the global retry list - why? */ if (list_empty(&block->b_list)) continue; kref_get(&block->b_count); mutex_unlock(&file->f_mutex); nlmsvc_unlink_block(block); nlmsvc_release_block(block); goto restart; } mutex_unlock(&file->f_mutex); } /* * Initialize arguments for GRANTED call. The nlm_rqst structure * has been cleared already. */ static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock) { locks_copy_lock(&call->a_args.lock.fl, &lock->fl); memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh)); call->a_args.lock.caller = utsname()->nodename; call->a_args.lock.oh.len = lock->oh.len; /* set default data area */ call->a_args.lock.oh.data = call->a_owner; call->a_args.lock.svid = lock->fl.fl_pid; if (lock->oh.len > NLMCLNT_OHSIZE) { void *data = kmalloc(lock->oh.len, GFP_KERNEL); if (!data) return 0; call->a_args.lock.oh.data = (u8 *) data; } memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len); return 1; } static void nlmsvc_freegrantargs(struct nlm_rqst *call) { if (call->a_args.lock.oh.data != call->a_owner) kfree(call->a_args.lock.oh.data); locks_release_private(&call->a_args.lock.fl); } /* * Deferred lock request handling for non-blocking lock */ static __be32 nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block) { __be32 status = nlm_lck_denied_nolocks; block->b_flags |= B_QUEUED; nlmsvc_insert_block(block, NLM_TIMEOUT); block->b_cache_req = &rqstp->rq_chandle; if (rqstp->rq_chandle.defer) { block->b_deferred_req = rqstp->rq_chandle.defer(block->b_cache_req); if (block->b_deferred_req != NULL) status = nlm_drop_reply; } dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n", block, block->b_flags, ntohl(status)); return status; } /* * Attempt to establish a lock, and if it can't be granted, block it * if required. */ __be32 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, struct nlm_host *host, struct nlm_lock *lock, int wait, struct nlm_cookie *cookie, int reclaim) { struct nlm_block *block = NULL; int error; __be32 ret; dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n", file_inode(file->f_file)->i_sb->s_id, file_inode(file->f_file)->i_ino, lock->fl.fl_type, lock->fl.fl_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end, wait); /* Lock file against concurrent access */ mutex_lock(&file->f_mutex); /* Get existing block (in case client is busy-waiting) * or create new block */ block = nlmsvc_lookup_block(file, lock); if (block == NULL) { block = nlmsvc_create_block(rqstp, host, file, lock, cookie); ret = nlm_lck_denied_nolocks; if (block == NULL) goto out; lock = &block->b_call->a_args.lock; } else lock->fl.fl_flags &= ~FL_SLEEP; if (block->b_flags & B_QUEUED) { dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n", block, block->b_flags); if (block->b_granted) { nlmsvc_unlink_block(block); ret = nlm_granted; goto out; } if (block->b_flags & B_TIMED_OUT) { nlmsvc_unlink_block(block); ret = nlm_lck_denied; goto out; } ret = nlm_drop_reply; goto out; } if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) { ret = nlm_lck_denied_grace_period; goto out; } if (reclaim && !locks_in_grace(SVC_NET(rqstp))) { ret = nlm_lck_denied_grace_period; goto out; } if (!wait) lock->fl.fl_flags &= ~FL_SLEEP; error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); lock->fl.fl_flags &= ~FL_SLEEP; dprintk("lockd: vfs_lock_file returned %d\n", error); switch (error) { case 0: ret = nlm_granted; goto out; case -EAGAIN: /* * If this is a blocking request for an * already pending lock request then we need * to put it back on lockd's block list */ if (wait) break; ret = nlm_lck_denied; goto out; case FILE_LOCK_DEFERRED: if (wait) break; /* Filesystem lock operation is in progress Add it to the queue waiting for callback */ ret = nlmsvc_defer_lock_rqst(rqstp, block); goto out; case -EDEADLK: ret = nlm_deadlock; goto out; default: /* includes ENOLCK */ ret = nlm_lck_denied_nolocks; goto out; } ret = nlm_lck_blocked; /* Append to list of blocked */ nlmsvc_insert_block(block, NLM_NEVER); out: mutex_unlock(&file->f_mutex); nlmsvc_release_block(block); dprintk("lockd: nlmsvc_lock returned %u\n", ret); return ret; } /* * Test for presence of a conflicting lock. */ __be32 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, struct nlm_host *host, struct nlm_lock *lock, struct nlm_lock *conflock, struct nlm_cookie *cookie) { int error; __be32 ret; dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n", file_inode(file->f_file)->i_sb->s_id, file_inode(file->f_file)->i_ino, lock->fl.fl_type, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); if (locks_in_grace(SVC_NET(rqstp))) { ret = nlm_lck_denied_grace_period; goto out; } error = vfs_test_lock(file->f_file, &lock->fl); if (error) { /* We can't currently deal with deferred test requests */ if (error == FILE_LOCK_DEFERRED) WARN_ON_ONCE(1); ret = nlm_lck_denied_nolocks; goto out; } if (lock->fl.fl_type == F_UNLCK) { ret = nlm_granted; goto out; } dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n", lock->fl.fl_type, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); conflock->caller = "somehost"; /* FIXME */ conflock->len = strlen(conflock->caller); conflock->oh.len = 0; /* don't return OH info */ conflock->svid = lock->fl.fl_pid; conflock->fl.fl_type = lock->fl.fl_type; conflock->fl.fl_start = lock->fl.fl_start; conflock->fl.fl_end = lock->fl.fl_end; locks_release_private(&lock->fl); ret = nlm_lck_denied; out: return ret; } /* * Remove a lock. * This implies a CANCEL call: We send a GRANT_MSG, the client replies * with a GRANT_RES call which gets lost, and calls UNLOCK immediately * afterwards. In this case the block will still be there, and hence * must be removed. */ __be32 nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock) { int error; dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n", file_inode(file->f_file)->i_sb->s_id, file_inode(file->f_file)->i_ino, lock->fl.fl_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); /* First, cancel any lock that might be there */ nlmsvc_cancel_blocked(net, file, lock); lock->fl.fl_type = F_UNLCK; error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); return (error < 0)? nlm_lck_denied_nolocks : nlm_granted; } /* * Cancel a previously blocked request. * * A cancel request always overrides any grant that may currently * be in progress. * The calling procedure must check whether the file can be closed. */ __be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock) { struct nlm_block *block; int status = 0; dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n", file_inode(file->f_file)->i_sb->s_id, file_inode(file->f_file)->i_ino, lock->fl.fl_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); if (locks_in_grace(net)) return nlm_lck_denied_grace_period; mutex_lock(&file->f_mutex); block = nlmsvc_lookup_block(file, lock); mutex_unlock(&file->f_mutex); if (block != NULL) { vfs_cancel_lock(block->b_file->f_file, &block->b_call->a_args.lock.fl); status = nlmsvc_unlink_block(block); nlmsvc_release_block(block); } return status ? nlm_lck_denied : nlm_granted; } /* * This is a callback from the filesystem for VFS file lock requests. * It will be used if lm_grant is defined and the filesystem can not * respond to the request immediately. * For SETLK or SETLKW request it will get the local posix lock. * In all cases it will move the block to the head of nlm_blocked q where * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the * deferred rpc for GETLK and SETLK. */ static void nlmsvc_update_deferred_block(struct nlm_block *block, int result) { block->b_flags |= B_GOT_CALLBACK; if (result == 0) block->b_granted = 1; else block->b_flags |= B_TIMED_OUT; } static int nlmsvc_grant_deferred(struct file_lock *fl, int result) { struct nlm_block *block; int rc = -ENOENT; spin_lock(&nlm_blocked_lock); list_for_each_entry(block, &nlm_blocked, b_list) { if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n", block, block->b_flags); if (block->b_flags & B_QUEUED) { if (block->b_flags & B_TIMED_OUT) { rc = -ENOLCK; break; } nlmsvc_update_deferred_block(block, result); } else if (result == 0) block->b_granted = 1; nlmsvc_insert_block_locked(block, 0); svc_wake_up(block->b_daemon); rc = 0; break; } } spin_unlock(&nlm_blocked_lock); if (rc == -ENOENT) printk(KERN_WARNING "lockd: grant for unknown block\n"); return rc; } /* * Unblock a blocked lock request. This is a callback invoked from the * VFS layer when a lock on which we blocked is removed. * * This function doesn't grant the blocked lock instantly, but rather moves * the block to the head of nlm_blocked where it can be picked up by lockd. */ static void nlmsvc_notify_blocked(struct file_lock *fl) { struct nlm_block *block; dprintk("lockd: VFS unblock notification for block %p\n", fl); spin_lock(&nlm_blocked_lock); list_for_each_entry(block, &nlm_blocked, b_list) { if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { nlmsvc_insert_block_locked(block, 0); spin_unlock(&nlm_blocked_lock); svc_wake_up(block->b_daemon); return; } } spin_unlock(&nlm_blocked_lock); printk(KERN_WARNING "lockd: notification for unknown block!\n"); } static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2) { return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid; } /* * Since NLM uses two "keys" for tracking locks, we need to hash them down * to one for the blocked_hash. Here, we're just xor'ing the host address * with the pid in order to create a key value for picking a hash bucket. */ static unsigned long nlmsvc_owner_key(struct file_lock *fl) { return (unsigned long)fl->fl_owner ^ (unsigned long)fl->fl_pid; } const struct lock_manager_operations nlmsvc_lock_operations = { .lm_compare_owner = nlmsvc_same_owner, .lm_owner_key = nlmsvc_owner_key, .lm_notify = nlmsvc_notify_blocked, .lm_grant = nlmsvc_grant_deferred, }; /* * Try to claim a lock that was previously blocked. * * Note that we use both the RPC_GRANTED_MSG call _and_ an async * RPC thread when notifying the client. This seems like overkill... * Here's why: * - we don't want to use a synchronous RPC thread, otherwise * we might find ourselves hanging on a dead portmapper. * - Some lockd implementations (e.g. HP) don't react to * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls. */ static void nlmsvc_grant_blocked(struct nlm_block *block) { struct nlm_file *file = block->b_file; struct nlm_lock *lock = &block->b_call->a_args.lock; int error; loff_t fl_start, fl_end; dprintk("lockd: grant blocked lock %p\n", block); kref_get(&block->b_count); /* Unlink block request from list */ nlmsvc_unlink_block(block); /* If b_granted is true this means we've been here before. * Just retry the grant callback, possibly refreshing the RPC * binding */ if (block->b_granted) { nlm_rebind_host(block->b_host); goto callback; } /* Try the lock operation again */ /* vfs_lock_file() can mangle fl_start and fl_end, but we need * them unchanged for the GRANT_MSG */ lock->fl.fl_flags |= FL_SLEEP; fl_start = lock->fl.fl_start; fl_end = lock->fl.fl_end; error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); lock->fl.fl_flags &= ~FL_SLEEP; lock->fl.fl_start = fl_start; lock->fl.fl_end = fl_end; switch (error) { case 0: break; case FILE_LOCK_DEFERRED: dprintk("lockd: lock still blocked error %d\n", error); nlmsvc_insert_block(block, NLM_NEVER); nlmsvc_release_block(block); return; default: printk(KERN_WARNING "lockd: unexpected error %d in %s!\n", -error, __func__); nlmsvc_insert_block(block, 10 * HZ); nlmsvc_release_block(block); return; } callback: /* Lock was granted by VFS. */ dprintk("lockd: GRANTing blocked lock.\n"); block->b_granted = 1; /* keep block on the list, but don't reattempt until the RPC * completes or the submission fails */ nlmsvc_insert_block(block, NLM_NEVER); /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked * will queue up a new one if this one times out */ error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, &nlmsvc_grant_ops); /* RPC submission failed, wait a bit and retry */ if (error < 0) nlmsvc_insert_block(block, 10 * HZ); } /* * This is the callback from the RPC layer when the NLM_GRANTED_MSG * RPC call has succeeded or timed out. * Like all RPC callbacks, it is invoked by the rpciod process, so it * better not sleep. Therefore, we put the blocked lock on the nlm_blocked * chain once more in order to have it removed by lockd itself (which can * then sleep on the file semaphore without disrupting e.g. the nfs client). */ static void nlmsvc_grant_callback(struct rpc_task *task, void *data) { struct nlm_rqst *call = data; struct nlm_block *block = call->a_block; unsigned long timeout; dprintk("lockd: GRANT_MSG RPC callback\n"); spin_lock(&nlm_blocked_lock); /* if the block is not on a list at this point then it has * been invalidated. Don't try to requeue it. * * FIXME: it's possible that the block is removed from the list * after this check but before the nlmsvc_insert_block. In that * case it will be added back. Perhaps we need better locking * for nlm_blocked? */ if (list_empty(&block->b_list)) goto out; /* Technically, we should down the file semaphore here. Since we * move the block towards the head of the queue only, no harm * can be done, though. */ if (task->tk_status < 0) { /* RPC error: Re-insert for retransmission */ timeout = 10 * HZ; } else { /* Call was successful, now wait for client callback */ timeout = 60 * HZ; } nlmsvc_insert_block_locked(block, timeout); svc_wake_up(block->b_daemon); out: spin_unlock(&nlm_blocked_lock); } /* * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an * .rpc_release rpc_call_op */ static void nlmsvc_grant_release(void *data) { struct nlm_rqst *call = data; nlmsvc_release_block(call->a_block); } static const struct rpc_call_ops nlmsvc_grant_ops = { .rpc_call_done = nlmsvc_grant_callback, .rpc_release = nlmsvc_grant_release, }; /* * We received a GRANT_RES callback. Try to find the corresponding * block. */ void nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status) { struct nlm_block *block; dprintk("grant_reply: looking for cookie %x, s=%d \n", *(unsigned int *)(cookie->data), status); if (!(block = nlmsvc_find_block(cookie))) return; if (status == nlm_lck_denied_grace_period) { /* Try again in a couple of seconds */ nlmsvc_insert_block(block, 10 * HZ); } else { /* * Lock is now held by client, or has been rejected. * In both cases, the block should be removed. */ nlmsvc_unlink_block(block); } nlmsvc_release_block(block); } /* Helper function to handle retry of a deferred block. * If it is a blocking lock, call grant_blocked. * For a non-blocking lock or test lock, revisit the request. */ static void retry_deferred_block(struct nlm_block *block) { if (!(block->b_flags & B_GOT_CALLBACK)) block->b_flags |= B_TIMED_OUT; nlmsvc_insert_block(block, NLM_TIMEOUT); dprintk("revisit block %p flags %d\n", block, block->b_flags); if (block->b_deferred_req) { block->b_deferred_req->revisit(block->b_deferred_req, 0); block->b_deferred_req = NULL; } } /* * Retry all blocked locks that have been notified. This is where lockd * picks up locks that can be granted, or grant notifications that must * be retransmitted. */ unsigned long nlmsvc_retry_blocked(void) { unsigned long timeout = MAX_SCHEDULE_TIMEOUT; struct nlm_block *block; spin_lock(&nlm_blocked_lock); while (!list_empty(&nlm_blocked) && !kthread_should_stop()) { block = list_entry(nlm_blocked.next, struct nlm_block, b_list); if (block->b_when == NLM_NEVER) break; if (time_after(block->b_when, jiffies)) { timeout = block->b_when - jiffies; break; } spin_unlock(&nlm_blocked_lock); dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", block, block->b_when); if (block->b_flags & B_QUEUED) { dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n", block, block->b_granted, block->b_flags); retry_deferred_block(block); } else nlmsvc_grant_blocked(block); spin_lock(&nlm_blocked_lock); } spin_unlock(&nlm_blocked_lock); return timeout; }
./CrossVul/dataset_final_sorted/CWE-404/c/good_3351_1
crossvul-cpp_data_good_2525_1
/************************************************* * Exim - an Internet mail transport agent * *************************************************/ /* Copyright (c) University of Cambridge 1995 - 2016 */ /* See the file NOTICE for conditions of use and distribution. */ /* The main function: entry point, initialization, and high-level control. Also a few functions that don't naturally fit elsewhere. */ #include "exim.h" #if defined(__GLIBC__) && !defined(__UCLIBC__) # include <gnu/libc-version.h> #endif #ifdef USE_GNUTLS # include <gnutls/gnutls.h> # if GNUTLS_VERSION_NUMBER < 0x030103 && !defined(DISABLE_OCSP) # define DISABLE_OCSP # endif #endif extern void init_lookup_list(void); /************************************************* * Function interface to store functions * *************************************************/ /* We need some real functions to pass to the PCRE regular expression library for store allocation via Exim's store manager. The normal calls are actually macros that pass over location information to make tracing easier. These functions just interface to the standard macro calls. A good compiler will optimize out the tail recursion and so not make them too expensive. There are two sets of functions; one for use when we want to retain the compiled regular expression for a long time; the other for short-term use. */ static void * function_store_get(size_t size) { return store_get((int)size); } static void function_dummy_free(void *block) { block = block; } static void * function_store_malloc(size_t size) { return store_malloc((int)size); } static void function_store_free(void *block) { store_free(block); } /************************************************* * Enums for cmdline interface * *************************************************/ enum commandline_info { CMDINFO_NONE=0, CMDINFO_HELP, CMDINFO_SIEVE, CMDINFO_DSCP }; /************************************************* * Compile regular expression and panic on fail * *************************************************/ /* This function is called when failure to compile a regular expression leads to a panic exit. In other cases, pcre_compile() is called directly. In many cases where this function is used, the results of the compilation are to be placed in long-lived store, so we temporarily reset the store management functions that PCRE uses if the use_malloc flag is set. Argument: pattern the pattern to compile caseless TRUE if caseless matching is required use_malloc TRUE if compile into malloc store Returns: pointer to the compiled pattern */ const pcre * regex_must_compile(const uschar *pattern, BOOL caseless, BOOL use_malloc) { int offset; int options = PCRE_COPT; const pcre *yield; const uschar *error; if (use_malloc) { pcre_malloc = function_store_malloc; pcre_free = function_store_free; } if (caseless) options |= PCRE_CASELESS; yield = pcre_compile(CCS pattern, options, (const char **)&error, &offset, NULL); pcre_malloc = function_store_get; pcre_free = function_dummy_free; if (yield == NULL) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "regular expression error: " "%s at offset %d while compiling %s", error, offset, pattern); return yield; } /************************************************* * Execute regular expression and set strings * *************************************************/ /* This function runs a regular expression match, and sets up the pointers to the matched substrings. Arguments: re the compiled expression subject the subject string options additional PCRE options setup if < 0 do full setup if >= 0 setup from setup+1 onwards, excluding the full matched string Returns: TRUE or FALSE */ BOOL regex_match_and_setup(const pcre *re, const uschar *subject, int options, int setup) { int ovector[3*(EXPAND_MAXN+1)]; uschar * s = string_copy(subject); /* de-constifying */ int n = pcre_exec(re, NULL, CS s, Ustrlen(s), 0, PCRE_EOPT | options, ovector, sizeof(ovector)/sizeof(int)); BOOL yield = n >= 0; if (n == 0) n = EXPAND_MAXN + 1; if (yield) { int nn; expand_nmax = (setup < 0)? 0 : setup + 1; for (nn = (setup < 0)? 0 : 2; nn < n*2; nn += 2) { expand_nstring[expand_nmax] = s + ovector[nn]; expand_nlength[expand_nmax++] = ovector[nn+1] - ovector[nn]; } expand_nmax--; } return yield; } /************************************************* * Set up processing details * *************************************************/ /* Save a text string for dumping when SIGUSR1 is received. Do checks for overruns. Arguments: format and arguments, as for printf() Returns: nothing */ void set_process_info(const char *format, ...) { int len = sprintf(CS process_info, "%5d ", (int)getpid()); va_list ap; va_start(ap, format); if (!string_vformat(process_info + len, PROCESS_INFO_SIZE - len - 2, format, ap)) Ustrcpy(process_info + len, "**** string overflowed buffer ****"); len = Ustrlen(process_info); process_info[len+0] = '\n'; process_info[len+1] = '\0'; process_info_len = len + 1; DEBUG(D_process_info) debug_printf("set_process_info: %s", process_info); va_end(ap); } /************************************************* * Handler for SIGUSR1 * *************************************************/ /* SIGUSR1 causes any exim process to write to the process log details of what it is currently doing. It will only be used if the OS is capable of setting up a handler that causes automatic restarting of any system call that is in progress at the time. This function takes care to be signal-safe. Argument: the signal number (SIGUSR1) Returns: nothing */ static void usr1_handler(int sig) { int fd; os_restarting_signal(sig, usr1_handler); fd = Uopen(process_log_path, O_APPEND|O_WRONLY, LOG_MODE); if (fd < 0) { /* If we are already running as the Exim user, try to create it in the current process (assuming spool_directory exists). Otherwise, if we are root, do the creation in an exim:exim subprocess. */ int euid = geteuid(); if (euid == exim_uid) fd = Uopen(process_log_path, O_CREAT|O_APPEND|O_WRONLY, LOG_MODE); else if (euid == root_uid) fd = log_create_as_exim(process_log_path); } /* If we are neither exim nor root, or if we failed to create the log file, give up. There is not much useful we can do with errors, since we don't want to disrupt whatever is going on outside the signal handler. */ if (fd < 0) return; (void)write(fd, process_info, process_info_len); (void)close(fd); } /************************************************* * Timeout handler * *************************************************/ /* This handler is enabled most of the time that Exim is running. The handler doesn't actually get used unless alarm() has been called to set a timer, to place a time limit on a system call of some kind. When the handler is run, it re-enables itself. There are some other SIGALRM handlers that are used in special cases when more than just a flag setting is required; for example, when reading a message's input. These are normally set up in the code module that uses them, and the SIGALRM handler is reset to this one afterwards. Argument: the signal value (SIGALRM) Returns: nothing */ void sigalrm_handler(int sig) { sig = sig; /* Keep picky compilers happy */ sigalrm_seen = TRUE; os_non_restarting_signal(SIGALRM, sigalrm_handler); } /************************************************* * Sleep for a fractional time interval * *************************************************/ /* This function is called by millisleep() and exim_wait_tick() to wait for a period of time that may include a fraction of a second. The coding is somewhat tedious. We do not expect setitimer() ever to fail, but if it does, the process will wait for ever, so we panic in this instance. (There was a case of this when a bug in a function that calls milliwait() caused it to pass invalid data. That's when I added the check. :-) We assume it to be not worth sleeping for under 100us; this value will require revisiting as hardware advances. This avoids the issue of a zero-valued timer setting meaning "never fire". Argument: an itimerval structure containing the interval Returns: nothing */ static void milliwait(struct itimerval *itval) { sigset_t sigmask; sigset_t old_sigmask; if (itval->it_value.tv_usec < 100 && itval->it_value.tv_sec == 0) return; (void)sigemptyset(&sigmask); /* Empty mask */ (void)sigaddset(&sigmask, SIGALRM); /* Add SIGALRM */ (void)sigprocmask(SIG_BLOCK, &sigmask, &old_sigmask); /* Block SIGALRM */ if (setitimer(ITIMER_REAL, itval, NULL) < 0) /* Start timer */ log_write(0, LOG_MAIN|LOG_PANIC_DIE, "setitimer() failed: %s", strerror(errno)); (void)sigfillset(&sigmask); /* All signals */ (void)sigdelset(&sigmask, SIGALRM); /* Remove SIGALRM */ (void)sigsuspend(&sigmask); /* Until SIGALRM */ (void)sigprocmask(SIG_SETMASK, &old_sigmask, NULL); /* Restore mask */ } /************************************************* * Millisecond sleep function * *************************************************/ /* The basic sleep() function has a granularity of 1 second, which is too rough in some cases - for example, when using an increasing delay to slow down spammers. Argument: number of millseconds Returns: nothing */ void millisleep(int msec) { struct itimerval itval; itval.it_interval.tv_sec = 0; itval.it_interval.tv_usec = 0; itval.it_value.tv_sec = msec/1000; itval.it_value.tv_usec = (msec % 1000) * 1000; milliwait(&itval); } /************************************************* * Compare microsecond times * *************************************************/ /* Arguments: tv1 the first time tv2 the second time Returns: -1, 0, or +1 */ int exim_tvcmp(struct timeval *t1, struct timeval *t2) { if (t1->tv_sec > t2->tv_sec) return +1; if (t1->tv_sec < t2->tv_sec) return -1; if (t1->tv_usec > t2->tv_usec) return +1; if (t1->tv_usec < t2->tv_usec) return -1; return 0; } /************************************************* * Clock tick wait function * *************************************************/ /* Exim uses a time + a pid to generate a unique identifier in two places: its message IDs, and in file names for maildir deliveries. Because some OS now re-use pids within the same second, sub-second times are now being used. However, for absolute certainty, we must ensure the clock has ticked before allowing the relevant process to complete. At the time of implementation of this code (February 2003), the speed of processors is such that the clock will invariably have ticked already by the time a process has done its job. This function prepares for the time when things are faster - and it also copes with clocks that go backwards. Arguments: then_tv A timeval which was used to create uniqueness; its usec field has been rounded down to the value of the resolution. We want to be sure the current time is greater than this. resolution The resolution that was used to divide the microseconds (1 for maildir, larger for message ids) Returns: nothing */ void exim_wait_tick(struct timeval *then_tv, int resolution) { struct timeval now_tv; long int now_true_usec; (void)gettimeofday(&now_tv, NULL); now_true_usec = now_tv.tv_usec; now_tv.tv_usec = (now_true_usec/resolution) * resolution; if (exim_tvcmp(&now_tv, then_tv) <= 0) { struct itimerval itval; itval.it_interval.tv_sec = 0; itval.it_interval.tv_usec = 0; itval.it_value.tv_sec = then_tv->tv_sec - now_tv.tv_sec; itval.it_value.tv_usec = then_tv->tv_usec + resolution - now_true_usec; /* We know that, overall, "now" is less than or equal to "then". Therefore, a negative value for the microseconds is possible only in the case when "now" is more than a second less than "then". That means that itval.it_value.tv_sec is greater than zero. The following correction is therefore safe. */ if (itval.it_value.tv_usec < 0) { itval.it_value.tv_usec += 1000000; itval.it_value.tv_sec -= 1; } DEBUG(D_transport|D_receive) { if (!running_in_test_harness) { debug_printf("tick check: " TIME_T_FMT ".%06lu " TIME_T_FMT ".%06lu\n", then_tv->tv_sec, (long) then_tv->tv_usec, now_tv.tv_sec, (long) now_tv.tv_usec); debug_printf("waiting " TIME_T_FMT ".%06lu\n", itval.it_value.tv_sec, (long) itval.it_value.tv_usec); } } milliwait(&itval); } } /************************************************* * Call fopen() with umask 777 and adjust mode * *************************************************/ /* Exim runs with umask(0) so that files created with open() have the mode that is specified in the open() call. However, there are some files, typically in the spool directory, that are created with fopen(). They end up world-writeable if no precautions are taken. Although the spool directory is not accessible to the world, this is an untidiness. So this is a wrapper function for fopen() that sorts out the mode of the created file. Arguments: filename the file name options the fopen() options mode the required mode Returns: the fopened FILE or NULL */ FILE * modefopen(const uschar *filename, const char *options, mode_t mode) { mode_t saved_umask = umask(0777); FILE *f = Ufopen(filename, options); (void)umask(saved_umask); if (f != NULL) (void)fchmod(fileno(f), mode); return f; } /************************************************* * Ensure stdin, stdout, and stderr exist * *************************************************/ /* Some operating systems grumble if an exec() happens without a standard input, output, and error (fds 0, 1, 2) being defined. The worry is that some file will be opened and will use these fd values, and then some other bit of code will assume, for example, that it can write error messages to stderr. This function ensures that fds 0, 1, and 2 are open if they do not already exist, by connecting them to /dev/null. This function is also used to ensure that std{in,out,err} exist at all times, so that if any library that Exim calls tries to use them, it doesn't crash. Arguments: None Returns: Nothing */ void exim_nullstd(void) { int i; int devnull = -1; struct stat statbuf; for (i = 0; i <= 2; i++) { if (fstat(i, &statbuf) < 0 && errno == EBADF) { if (devnull < 0) devnull = open("/dev/null", O_RDWR); if (devnull < 0) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "%s", string_open_failed(errno, "/dev/null")); if (devnull != i) (void)dup2(devnull, i); } } if (devnull > 2) (void)close(devnull); } /************************************************* * Close unwanted file descriptors for delivery * *************************************************/ /* This function is called from a new process that has been forked to deliver an incoming message, either directly, or using exec. We want any smtp input streams to be closed in this new process. However, it has been observed that using fclose() here causes trouble. When reading in -bS input, duplicate copies of messages have been seen. The files will be sharing a file pointer with the parent process, and it seems that fclose() (at least on some systems - I saw this on Solaris 2.5.1) messes with that file pointer, at least sometimes. Hence we go for closing the underlying file descriptors. If TLS is active, we want to shut down the TLS library, but without molesting the parent's SSL connection. For delivery of a non-SMTP message, we want to close stdin and stdout (and stderr unless debugging) because the calling process might have set them up as pipes and be waiting for them to close before it waits for the submission process to terminate. If they aren't closed, they hold up the calling process until the initial delivery process finishes, which is not what we want. Exception: We do want it for synchronous delivery! And notwithstanding all the above, if D_resolver is set, implying resolver debugging, leave stdout open, because that's where the resolver writes its debugging output. When we close stderr (which implies we've also closed stdout), we also get rid of any controlling terminal. Arguments: None Returns: Nothing */ static void close_unwanted(void) { if (smtp_input) { #ifdef SUPPORT_TLS tls_close(TRUE, FALSE); /* Shut down the TLS library */ #endif (void)close(fileno(smtp_in)); (void)close(fileno(smtp_out)); smtp_in = NULL; } else { (void)close(0); /* stdin */ if ((debug_selector & D_resolver) == 0) (void)close(1); /* stdout */ if (debug_selector == 0) /* stderr */ { if (!synchronous_delivery) { (void)close(2); log_stderr = NULL; } (void)setsid(); } } } /************************************************* * Set uid and gid * *************************************************/ /* This function sets a new uid and gid permanently, optionally calling initgroups() to set auxiliary groups. There are some special cases when running Exim in unprivileged modes. In these situations the effective uid will not be root; if we already have the right effective uid/gid, and don't need to initialize any groups, leave things as they are. Arguments: uid the uid gid the gid igflag TRUE if initgroups() wanted msg text to use in debugging output and failure log Returns: nothing; bombs out on failure */ void exim_setugid(uid_t uid, gid_t gid, BOOL igflag, uschar *msg) { uid_t euid = geteuid(); gid_t egid = getegid(); if (euid == root_uid || euid != uid || egid != gid || igflag) { /* At least one OS returns +1 for initgroups failure, so just check for non-zero. */ if (igflag) { struct passwd *pw = getpwuid(uid); if (pw != NULL) { if (initgroups(pw->pw_name, gid) != 0) log_write(0,LOG_MAIN|LOG_PANIC_DIE,"initgroups failed for uid=%ld: %s", (long int)uid, strerror(errno)); } else log_write(0, LOG_MAIN|LOG_PANIC_DIE, "cannot run initgroups(): " "no passwd entry for uid=%ld", (long int)uid); } if (setgid(gid) < 0 || setuid(uid) < 0) { log_write(0, LOG_MAIN|LOG_PANIC_DIE, "unable to set gid=%ld or uid=%ld " "(euid=%ld): %s", (long int)gid, (long int)uid, (long int)euid, msg); } } /* Debugging output included uid/gid and all groups */ DEBUG(D_uid) { int group_count, save_errno; gid_t group_list[NGROUPS_MAX]; debug_printf("changed uid/gid: %s\n uid=%ld gid=%ld pid=%ld\n", msg, (long int)geteuid(), (long int)getegid(), (long int)getpid()); group_count = getgroups(NGROUPS_MAX, group_list); save_errno = errno; debug_printf(" auxiliary group list:"); if (group_count > 0) { int i; for (i = 0; i < group_count; i++) debug_printf(" %d", (int)group_list[i]); } else if (group_count < 0) debug_printf(" <error: %s>", strerror(save_errno)); else debug_printf(" <none>"); debug_printf("\n"); } } /************************************************* * Exit point * *************************************************/ /* Exim exits via this function so that it always clears up any open databases. Arguments: rc return code Returns: does not return */ void exim_exit(int rc) { search_tidyup(); DEBUG(D_any) debug_printf(">>>>>>>>>>>>>>>> Exim pid=%d terminating with rc=%d " ">>>>>>>>>>>>>>>>\n", (int)getpid(), rc); exit(rc); } /************************************************* * Extract port from host address * *************************************************/ /* Called to extract the port from the values given to -oMa and -oMi. It also checks the syntax of the address, and terminates it before the port data when a port is extracted. Argument: address the address, with possible port on the end Returns: the port, or zero if there isn't one bombs out on a syntax error */ static int check_port(uschar *address) { int port = host_address_extract_port(address); if (string_is_ip_address(address, NULL) == 0) { fprintf(stderr, "exim abandoned: \"%s\" is not an IP address\n", address); exit(EXIT_FAILURE); } return port; } /************************************************* * Test/verify an address * *************************************************/ /* This function is called by the -bv and -bt code. It extracts a working address from a full RFC 822 address. This isn't really necessary per se, but it has the effect of collapsing source routes. Arguments: s the address string flags flag bits for verify_address() exit_value to be set for failures Returns: nothing */ static void test_address(uschar *s, int flags, int *exit_value) { int start, end, domain; uschar *parse_error = NULL; uschar *address = parse_extract_address(s, &parse_error, &start, &end, &domain, FALSE); if (address == NULL) { fprintf(stdout, "syntax error: %s\n", parse_error); *exit_value = 2; } else { int rc = verify_address(deliver_make_addr(address,TRUE), stdout, flags, -1, -1, -1, NULL, NULL, NULL); if (rc == FAIL) *exit_value = 2; else if (rc == DEFER && *exit_value == 0) *exit_value = 1; } } /************************************************* * Show supported features * *************************************************/ /* This function is called for -bV/--version and for -d to output the optional features of the current Exim binary. Arguments: a FILE for printing Returns: nothing */ static void show_whats_supported(FILE *f) { auth_info *authi; #ifdef DB_VERSION_STRING fprintf(f, "Berkeley DB: %s\n", DB_VERSION_STRING); #elif defined(BTREEVERSION) && defined(HASHVERSION) #ifdef USE_DB fprintf(f, "Probably Berkeley DB version 1.8x (native mode)\n"); #else fprintf(f, "Probably Berkeley DB version 1.8x (compatibility mode)\n"); #endif #elif defined(_DBM_RDONLY) || defined(dbm_dirfno) fprintf(f, "Probably ndbm\n"); #elif defined(USE_TDB) fprintf(f, "Using tdb\n"); #else #ifdef USE_GDBM fprintf(f, "Probably GDBM (native mode)\n"); #else fprintf(f, "Probably GDBM (compatibility mode)\n"); #endif #endif fprintf(f, "Support for:"); #ifdef SUPPORT_CRYPTEQ fprintf(f, " crypteq"); #endif #if HAVE_ICONV fprintf(f, " iconv()"); #endif #if HAVE_IPV6 fprintf(f, " IPv6"); #endif #ifdef HAVE_SETCLASSRESOURCES fprintf(f, " use_setclassresources"); #endif #ifdef SUPPORT_PAM fprintf(f, " PAM"); #endif #ifdef EXIM_PERL fprintf(f, " Perl"); #endif #ifdef EXPAND_DLFUNC fprintf(f, " Expand_dlfunc"); #endif #ifdef USE_TCP_WRAPPERS fprintf(f, " TCPwrappers"); #endif #ifdef SUPPORT_TLS #ifdef USE_GNUTLS fprintf(f, " GnuTLS"); #else fprintf(f, " OpenSSL"); #endif #endif #ifdef SUPPORT_TRANSLATE_IP_ADDRESS fprintf(f, " translate_ip_address"); #endif #ifdef SUPPORT_MOVE_FROZEN_MESSAGES fprintf(f, " move_frozen_messages"); #endif #ifdef WITH_CONTENT_SCAN fprintf(f, " Content_Scanning"); #endif #ifndef DISABLE_DKIM fprintf(f, " DKIM"); #endif #ifndef DISABLE_DNSSEC fprintf(f, " DNSSEC"); #endif #ifndef DISABLE_EVENT fprintf(f, " Event"); #endif #ifdef SUPPORT_I18N fprintf(f, " I18N"); #endif #ifndef DISABLE_OCSP fprintf(f, " OCSP"); #endif #ifndef DISABLE_PRDR fprintf(f, " PRDR"); #endif #ifdef SUPPORT_PROXY fprintf(f, " PROXY"); #endif #ifdef SUPPORT_SOCKS fprintf(f, " SOCKS"); #endif #ifdef TCP_FASTOPEN fprintf(f, " TCP_Fast_Open"); #endif #ifdef EXPERIMENTAL_LMDB fprintf(f, " Experimental_LMDB"); #endif #ifdef EXPERIMENTAL_QUEUEFILE fprintf(f, " Experimental_QUEUEFILE"); #endif #ifdef EXPERIMENTAL_SPF fprintf(f, " Experimental_SPF"); #endif #ifdef EXPERIMENTAL_SRS fprintf(f, " Experimental_SRS"); #endif #ifdef EXPERIMENTAL_BRIGHTMAIL fprintf(f, " Experimental_Brightmail"); #endif #ifdef EXPERIMENTAL_DANE fprintf(f, " Experimental_DANE"); #endif #ifdef EXPERIMENTAL_DCC fprintf(f, " Experimental_DCC"); #endif #ifdef EXPERIMENTAL_DMARC fprintf(f, " Experimental_DMARC"); #endif #ifdef EXPERIMENTAL_DSN_INFO fprintf(f, " Experimental_DSN_info"); #endif fprintf(f, "\n"); fprintf(f, "Lookups (built-in):"); #if defined(LOOKUP_LSEARCH) && LOOKUP_LSEARCH!=2 fprintf(f, " lsearch wildlsearch nwildlsearch iplsearch"); #endif #if defined(LOOKUP_CDB) && LOOKUP_CDB!=2 fprintf(f, " cdb"); #endif #if defined(LOOKUP_DBM) && LOOKUP_DBM!=2 fprintf(f, " dbm dbmjz dbmnz"); #endif #if defined(LOOKUP_DNSDB) && LOOKUP_DNSDB!=2 fprintf(f, " dnsdb"); #endif #if defined(LOOKUP_DSEARCH) && LOOKUP_DSEARCH!=2 fprintf(f, " dsearch"); #endif #if defined(LOOKUP_IBASE) && LOOKUP_IBASE!=2 fprintf(f, " ibase"); #endif #if defined(LOOKUP_LDAP) && LOOKUP_LDAP!=2 fprintf(f, " ldap ldapdn ldapm"); #endif #ifdef EXPERIMENTAL_LMDB fprintf(f, " lmdb"); #endif #if defined(LOOKUP_MYSQL) && LOOKUP_MYSQL!=2 fprintf(f, " mysql"); #endif #if defined(LOOKUP_NIS) && LOOKUP_NIS!=2 fprintf(f, " nis nis0"); #endif #if defined(LOOKUP_NISPLUS) && LOOKUP_NISPLUS!=2 fprintf(f, " nisplus"); #endif #if defined(LOOKUP_ORACLE) && LOOKUP_ORACLE!=2 fprintf(f, " oracle"); #endif #if defined(LOOKUP_PASSWD) && LOOKUP_PASSWD!=2 fprintf(f, " passwd"); #endif #if defined(LOOKUP_PGSQL) && LOOKUP_PGSQL!=2 fprintf(f, " pgsql"); #endif #if defined(LOOKUP_REDIS) && LOOKUP_REDIS!=2 fprintf(f, " redis"); #endif #if defined(LOOKUP_SQLITE) && LOOKUP_SQLITE!=2 fprintf(f, " sqlite"); #endif #if defined(LOOKUP_TESTDB) && LOOKUP_TESTDB!=2 fprintf(f, " testdb"); #endif #if defined(LOOKUP_WHOSON) && LOOKUP_WHOSON!=2 fprintf(f, " whoson"); #endif fprintf(f, "\n"); fprintf(f, "Authenticators:"); #ifdef AUTH_CRAM_MD5 fprintf(f, " cram_md5"); #endif #ifdef AUTH_CYRUS_SASL fprintf(f, " cyrus_sasl"); #endif #ifdef AUTH_DOVECOT fprintf(f, " dovecot"); #endif #ifdef AUTH_GSASL fprintf(f, " gsasl"); #endif #ifdef AUTH_HEIMDAL_GSSAPI fprintf(f, " heimdal_gssapi"); #endif #ifdef AUTH_PLAINTEXT fprintf(f, " plaintext"); #endif #ifdef AUTH_SPA fprintf(f, " spa"); #endif #ifdef AUTH_TLS fprintf(f, " tls"); #endif fprintf(f, "\n"); fprintf(f, "Routers:"); #ifdef ROUTER_ACCEPT fprintf(f, " accept"); #endif #ifdef ROUTER_DNSLOOKUP fprintf(f, " dnslookup"); #endif #ifdef ROUTER_IPLITERAL fprintf(f, " ipliteral"); #endif #ifdef ROUTER_IPLOOKUP fprintf(f, " iplookup"); #endif #ifdef ROUTER_MANUALROUTE fprintf(f, " manualroute"); #endif #ifdef ROUTER_QUERYPROGRAM fprintf(f, " queryprogram"); #endif #ifdef ROUTER_REDIRECT fprintf(f, " redirect"); #endif fprintf(f, "\n"); fprintf(f, "Transports:"); #ifdef TRANSPORT_APPENDFILE fprintf(f, " appendfile"); #ifdef SUPPORT_MAILDIR fprintf(f, "/maildir"); #endif #ifdef SUPPORT_MAILSTORE fprintf(f, "/mailstore"); #endif #ifdef SUPPORT_MBX fprintf(f, "/mbx"); #endif #endif #ifdef TRANSPORT_AUTOREPLY fprintf(f, " autoreply"); #endif #ifdef TRANSPORT_LMTP fprintf(f, " lmtp"); #endif #ifdef TRANSPORT_PIPE fprintf(f, " pipe"); #endif #ifdef EXPERIMENTAL_QUEUEFILE fprintf(f, " queuefile"); #endif #ifdef TRANSPORT_SMTP fprintf(f, " smtp"); #endif fprintf(f, "\n"); if (fixed_never_users[0] > 0) { int i; fprintf(f, "Fixed never_users: "); for (i = 1; i <= (int)fixed_never_users[0] - 1; i++) fprintf(f, "%d:", (unsigned int)fixed_never_users[i]); fprintf(f, "%d\n", (unsigned int)fixed_never_users[i]); } fprintf(f, "Configure owner: %d:%d\n", config_uid, config_gid); fprintf(f, "Size of off_t: " SIZE_T_FMT "\n", sizeof(off_t)); /* Everything else is details which are only worth reporting when debugging. Perhaps the tls_version_report should move into this too. */ DEBUG(D_any) do { int i; /* clang defines __GNUC__ (at least, for me) so test for it first */ #if defined(__clang__) fprintf(f, "Compiler: CLang [%s]\n", __clang_version__); #elif defined(__GNUC__) fprintf(f, "Compiler: GCC [%s]\n", # ifdef __VERSION__ __VERSION__ # else "? unknown version ?" # endif ); #else fprintf(f, "Compiler: <unknown>\n"); #endif #if defined(__GLIBC__) && !defined(__UCLIBC__) fprintf(f, "Library version: Glibc: Compile: %d.%d\n", __GLIBC__, __GLIBC_MINOR__); if (__GLIBC_PREREQ(2, 1)) fprintf(f, " Runtime: %s\n", gnu_get_libc_version()); #endif #ifdef SUPPORT_TLS tls_version_report(f); #endif #ifdef SUPPORT_I18N utf8_version_report(f); #endif for (authi = auths_available; *authi->driver_name != '\0'; ++authi) if (authi->version_report) (*authi->version_report)(f); /* PCRE_PRERELEASE is either defined and empty or a bare sequence of characters; unless it's an ancient version of PCRE in which case it is not defined. */ #ifndef PCRE_PRERELEASE # define PCRE_PRERELEASE #endif #define QUOTE(X) #X #define EXPAND_AND_QUOTE(X) QUOTE(X) fprintf(f, "Library version: PCRE: Compile: %d.%d%s\n" " Runtime: %s\n", PCRE_MAJOR, PCRE_MINOR, EXPAND_AND_QUOTE(PCRE_PRERELEASE) "", pcre_version()); #undef QUOTE #undef EXPAND_AND_QUOTE init_lookup_list(); for (i = 0; i < lookup_list_count; i++) if (lookup_list[i]->version_report) lookup_list[i]->version_report(f); #ifdef WHITELIST_D_MACROS fprintf(f, "WHITELIST_D_MACROS: \"%s\"\n", WHITELIST_D_MACROS); #else fprintf(f, "WHITELIST_D_MACROS unset\n"); #endif #ifdef TRUSTED_CONFIG_LIST fprintf(f, "TRUSTED_CONFIG_LIST: \"%s\"\n", TRUSTED_CONFIG_LIST); #else fprintf(f, "TRUSTED_CONFIG_LIST unset\n"); #endif } while (0); } /************************************************* * Show auxiliary information about Exim * *************************************************/ static void show_exim_information(enum commandline_info request, FILE *stream) { const uschar **pp; switch(request) { case CMDINFO_NONE: fprintf(stream, "Oops, something went wrong.\n"); return; case CMDINFO_HELP: fprintf(stream, "The -bI: flag takes a string indicating which information to provide.\n" "If the string is not recognised, you'll get this help (on stderr).\n" "\n" " exim -bI:help this information\n" " exim -bI:dscp dscp value keywords known\n" " exim -bI:sieve list of supported sieve extensions, one per line.\n" ); return; case CMDINFO_SIEVE: for (pp = exim_sieve_extension_list; *pp; ++pp) fprintf(stream, "%s\n", *pp); return; case CMDINFO_DSCP: dscp_list_to_stream(stream); return; } } /************************************************* * Quote a local part * *************************************************/ /* This function is used when a sender address or a From: or Sender: header line is being created from the caller's login, or from an authenticated_id. It applies appropriate quoting rules for a local part. Argument: the local part Returns: the local part, quoted if necessary */ uschar * local_part_quote(uschar *lpart) { BOOL needs_quote = FALSE; int size, ptr; uschar *yield; uschar *t; for (t = lpart; !needs_quote && *t != 0; t++) { needs_quote = !isalnum(*t) && strchr("!#$%&'*+-/=?^_`{|}~", *t) == NULL && (*t != '.' || t == lpart || t[1] == 0); } if (!needs_quote) return lpart; size = ptr = 0; yield = string_catn(NULL, &size, &ptr, US"\"", 1); for (;;) { uschar *nq = US Ustrpbrk(lpart, "\\\""); if (nq == NULL) { yield = string_cat(yield, &size, &ptr, lpart); break; } yield = string_catn(yield, &size, &ptr, lpart, nq - lpart); yield = string_catn(yield, &size, &ptr, US"\\", 1); yield = string_catn(yield, &size, &ptr, nq, 1); lpart = nq + 1; } yield = string_catn(yield, &size, &ptr, US"\"", 1); yield[ptr] = 0; return yield; } #ifdef USE_READLINE /************************************************* * Load readline() functions * *************************************************/ /* This function is called from testing executions that read data from stdin, but only when running as the calling user. Currently, only -be does this. The function loads the readline() function library and passes back the functions. On some systems, it needs the curses library, so load that too, but try without it if loading fails. All this functionality has to be requested at build time. Arguments: fn_readline_ptr pointer to where to put the readline pointer fn_addhist_ptr pointer to where to put the addhistory function Returns: the dlopen handle or NULL on failure */ static void * set_readline(char * (**fn_readline_ptr)(const char *), void (**fn_addhist_ptr)(const char *)) { void *dlhandle; void *dlhandle_curses = dlopen("libcurses." DYNLIB_FN_EXT, RTLD_GLOBAL|RTLD_LAZY); dlhandle = dlopen("libreadline." DYNLIB_FN_EXT, RTLD_GLOBAL|RTLD_NOW); if (dlhandle_curses != NULL) dlclose(dlhandle_curses); if (dlhandle != NULL) { /* Checked manual pages; at least in GNU Readline 6.1, the prototypes are: * char * readline (const char *prompt); * void add_history (const char *string); */ *fn_readline_ptr = (char *(*)(const char*))dlsym(dlhandle, "readline"); *fn_addhist_ptr = (void(*)(const char*))dlsym(dlhandle, "add_history"); } else { DEBUG(D_any) debug_printf("failed to load readline: %s\n", dlerror()); } return dlhandle; } #endif /************************************************* * Get a line from stdin for testing things * *************************************************/ /* This function is called when running tests that can take a number of lines of input (for example, -be and -bt). It handles continuations and trailing spaces. And prompting and a blank line output on eof. If readline() is in use, the arguments are non-NULL and provide the relevant functions. Arguments: fn_readline readline function or NULL fn_addhist addhist function or NULL Returns: pointer to dynamic memory, or NULL at end of file */ static uschar * get_stdinput(char *(*fn_readline)(const char *), void(*fn_addhist)(const char *)) { int i; int size = 0; int ptr = 0; uschar *yield = NULL; if (fn_readline == NULL) { printf("> "); fflush(stdout); } for (i = 0;; i++) { uschar buffer[1024]; uschar *p, *ss; #ifdef USE_READLINE char *readline_line = NULL; if (fn_readline != NULL) { if ((readline_line = fn_readline((i > 0)? "":"> ")) == NULL) break; if (*readline_line != 0 && fn_addhist != NULL) fn_addhist(readline_line); p = US readline_line; } else #endif /* readline() not in use */ { if (Ufgets(buffer, sizeof(buffer), stdin) == NULL) break; p = buffer; } /* Handle the line */ ss = p + (int)Ustrlen(p); while (ss > p && isspace(ss[-1])) ss--; if (i > 0) { while (p < ss && isspace(*p)) p++; /* leading space after cont */ } yield = string_catn(yield, &size, &ptr, p, ss - p); #ifdef USE_READLINE if (fn_readline != NULL) free(readline_line); #endif /* yield can only be NULL if ss==p */ if (ss == p || yield[ptr-1] != '\\') { if (yield) yield[ptr] = 0; break; } yield[--ptr] = 0; } if (yield == NULL) printf("\n"); return yield; } /************************************************* * Output usage information for the program * *************************************************/ /* This function is called when there are no recipients or a specific --help argument was added. Arguments: progname information on what name we were called by Returns: DOES NOT RETURN */ static void exim_usage(uschar *progname) { /* Handle specific program invocation variants */ if (Ustrcmp(progname, US"-mailq") == 0) { fprintf(stderr, "mailq - list the contents of the mail queue\n\n" "For a list of options, see the Exim documentation.\n"); exit(EXIT_FAILURE); } /* Generic usage - we output this whatever happens */ fprintf(stderr, "Exim is a Mail Transfer Agent. It is normally called by Mail User Agents,\n" "not directly from a shell command line. Options and/or arguments control\n" "what it does when called. For a list of options, see the Exim documentation.\n"); exit(EXIT_FAILURE); } /************************************************* * Validate that the macros given are okay * *************************************************/ /* Typically, Exim will drop privileges if macros are supplied. In some cases, we want to not do so. Arguments: opt_D_used - true if the commandline had a "-D" option Returns: true if trusted, false otherwise */ static BOOL macros_trusted(BOOL opt_D_used) { #ifdef WHITELIST_D_MACROS macro_item *m; uschar *whitelisted, *end, *p, **whites, **w; int white_count, i, n; size_t len; BOOL prev_char_item, found; #endif if (!opt_D_used) return TRUE; #ifndef WHITELIST_D_MACROS return FALSE; #else /* We only trust -D overrides for some invoking users: root, the exim run-time user, the optional config owner user. I don't know why config-owner would be needed, but since they can own the config files anyway, there's no security risk to letting them override -D. */ if ( ! ((real_uid == root_uid) || (real_uid == exim_uid) #ifdef CONFIGURE_OWNER || (real_uid == config_uid) #endif )) { debug_printf("macros_trusted rejecting macros for uid %d\n", (int) real_uid); return FALSE; } /* Get a list of macros which are whitelisted */ whitelisted = string_copy_malloc(US WHITELIST_D_MACROS); prev_char_item = FALSE; white_count = 0; for (p = whitelisted; *p != '\0'; ++p) { if (*p == ':' || isspace(*p)) { *p = '\0'; if (prev_char_item) ++white_count; prev_char_item = FALSE; continue; } if (!prev_char_item) prev_char_item = TRUE; } end = p; if (prev_char_item) ++white_count; if (!white_count) return FALSE; whites = store_malloc(sizeof(uschar *) * (white_count+1)); for (p = whitelisted, i = 0; (p != end) && (i < white_count); ++p) { if (*p != '\0') { whites[i++] = p; if (i == white_count) break; while (*p != '\0' && p < end) ++p; } } whites[i] = NULL; /* The list of commandline macros should be very short. Accept the N*M complexity. */ for (m = macros; m; m = m->next) if (m->command_line) { found = FALSE; for (w = whites; *w; ++w) if (Ustrcmp(*w, m->name) == 0) { found = TRUE; break; } if (!found) return FALSE; if (m->replacement == NULL) continue; len = Ustrlen(m->replacement); if (len == 0) continue; n = pcre_exec(regex_whitelisted_macro, NULL, CS m->replacement, len, 0, PCRE_EOPT, NULL, 0); if (n < 0) { if (n != PCRE_ERROR_NOMATCH) debug_printf("macros_trusted checking %s returned %d\n", m->name, n); return FALSE; } } DEBUG(D_any) debug_printf("macros_trusted overridden to true by whitelisting\n"); return TRUE; #endif } /************************************************* * Entry point and high-level code * *************************************************/ /* Entry point for the Exim mailer. Analyse the arguments and arrange to take the appropriate action. All the necessary functions are present in the one binary. I originally thought one should split it up, but it turns out that so much of the apparatus is needed in each chunk that one might as well just have it all available all the time, which then makes the coding easier as well. Arguments: argc count of entries in argv argv argument strings, with argv[0] being the program name Returns: EXIT_SUCCESS if terminated successfully EXIT_FAILURE otherwise, except when a message has been sent to the sender, and -oee was given */ int main(int argc, char **cargv) { uschar **argv = USS cargv; int arg_receive_timeout = -1; int arg_smtp_receive_timeout = -1; int arg_error_handling = error_handling; int filter_sfd = -1; int filter_ufd = -1; int group_count; int i, rv; int list_queue_option = 0; int msg_action = 0; int msg_action_arg = -1; int namelen = (argv[0] == NULL)? 0 : Ustrlen(argv[0]); int queue_only_reason = 0; #ifdef EXIM_PERL int perl_start_option = 0; #endif int recipients_arg = argc; int sender_address_domain = 0; int test_retry_arg = -1; int test_rewrite_arg = -1; BOOL arg_queue_only = FALSE; BOOL bi_option = FALSE; BOOL checking = FALSE; BOOL count_queue = FALSE; BOOL expansion_test = FALSE; BOOL extract_recipients = FALSE; BOOL flag_G = FALSE; BOOL flag_n = FALSE; BOOL forced_delivery = FALSE; BOOL f_end_dot = FALSE; BOOL deliver_give_up = FALSE; BOOL list_queue = FALSE; BOOL list_options = FALSE; BOOL list_config = FALSE; BOOL local_queue_only; BOOL more = TRUE; BOOL one_msg_action = FALSE; BOOL opt_D_used = FALSE; BOOL queue_only_set = FALSE; BOOL receiving_message = TRUE; BOOL sender_ident_set = FALSE; BOOL session_local_queue_only; BOOL unprivileged; BOOL removed_privilege = FALSE; BOOL usage_wanted = FALSE; BOOL verify_address_mode = FALSE; BOOL verify_as_sender = FALSE; BOOL version_printed = FALSE; uschar *alias_arg = NULL; uschar *called_as = US""; uschar *cmdline_syslog_name = NULL; uschar *start_queue_run_id = NULL; uschar *stop_queue_run_id = NULL; uschar *expansion_test_message = NULL; uschar *ftest_domain = NULL; uschar *ftest_localpart = NULL; uschar *ftest_prefix = NULL; uschar *ftest_suffix = NULL; uschar *log_oneline = NULL; uschar *malware_test_file = NULL; uschar *real_sender_address; uschar *originator_home = US"/"; size_t sz; void *reset_point; struct passwd *pw; struct stat statbuf; pid_t passed_qr_pid = (pid_t)0; int passed_qr_pipe = -1; gid_t group_list[NGROUPS_MAX]; /* For the -bI: flag */ enum commandline_info info_flag = CMDINFO_NONE; BOOL info_stdout = FALSE; /* Possible options for -R and -S */ static uschar *rsopts[] = { US"f", US"ff", US"r", US"rf", US"rff" }; /* Need to define this in case we need to change the environment in order to get rid of a bogus time zone. We have to make it char rather than uschar because some OS define it in /usr/include/unistd.h. */ extern char **environ; /* If the Exim user and/or group and/or the configuration file owner/group were defined by ref:name at build time, we must now find the actual uid/gid values. This is a feature to make the lives of binary distributors easier. */ #ifdef EXIM_USERNAME if (route_finduser(US EXIM_USERNAME, &pw, &exim_uid)) { if (exim_uid == 0) { fprintf(stderr, "exim: refusing to run with uid 0 for \"%s\"\n", EXIM_USERNAME); exit(EXIT_FAILURE); } /* If ref:name uses a number as the name, route_finduser() returns TRUE with exim_uid set and pw coerced to NULL. */ if (pw) exim_gid = pw->pw_gid; #ifndef EXIM_GROUPNAME else { fprintf(stderr, "exim: ref:name should specify a usercode, not a group.\n" "exim: can't let you get away with it unless you also specify a group.\n"); exit(EXIT_FAILURE); } #endif } else { fprintf(stderr, "exim: failed to find uid for user name \"%s\"\n", EXIM_USERNAME); exit(EXIT_FAILURE); } #endif #ifdef EXIM_GROUPNAME if (!route_findgroup(US EXIM_GROUPNAME, &exim_gid)) { fprintf(stderr, "exim: failed to find gid for group name \"%s\"\n", EXIM_GROUPNAME); exit(EXIT_FAILURE); } #endif #ifdef CONFIGURE_OWNERNAME if (!route_finduser(US CONFIGURE_OWNERNAME, NULL, &config_uid)) { fprintf(stderr, "exim: failed to find uid for user name \"%s\"\n", CONFIGURE_OWNERNAME); exit(EXIT_FAILURE); } #endif /* We default the system_filter_user to be the Exim run-time user, as a sane non-root value. */ system_filter_uid = exim_uid; #ifdef CONFIGURE_GROUPNAME if (!route_findgroup(US CONFIGURE_GROUPNAME, &config_gid)) { fprintf(stderr, "exim: failed to find gid for group name \"%s\"\n", CONFIGURE_GROUPNAME); exit(EXIT_FAILURE); } #endif /* In the Cygwin environment, some initialization used to need doing. It was fudged in by means of this macro; now no longer but we'll leave it in case of others. */ #ifdef OS_INIT OS_INIT #endif /* Check a field which is patched when we are running Exim within its testing harness; do a fast initial check, and then the whole thing. */ running_in_test_harness = *running_status == '<' && Ustrcmp(running_status, "<<<testing>>>") == 0; /* The C standard says that the equivalent of setlocale(LC_ALL, "C") is obeyed at the start of a program; however, it seems that some environments do not follow this. A "strange" locale can affect the formatting of timestamps, so we make quite sure. */ setlocale(LC_ALL, "C"); /* Set up the default handler for timing using alarm(). */ os_non_restarting_signal(SIGALRM, sigalrm_handler); /* Ensure we have a buffer for constructing log entries. Use malloc directly, because store_malloc writes a log entry on failure. */ if (!(log_buffer = US malloc(LOG_BUFFER_SIZE))) { fprintf(stderr, "exim: failed to get store for log buffer\n"); exit(EXIT_FAILURE); } /* Initialize the default log options. */ bits_set(log_selector, log_selector_size, log_default); /* Set log_stderr to stderr, provided that stderr exists. This gets reset to NULL when the daemon is run and the file is closed. We have to use this indirection, because some systems don't allow writing to the variable "stderr". */ if (fstat(fileno(stderr), &statbuf) >= 0) log_stderr = stderr; /* Arrange for the PCRE regex library to use our store functions. Note that the normal calls are actually macros that add additional arguments for debugging purposes so we have to assign specially constructed functions here. The default is to use store in the stacking pool, but this is overridden in the regex_must_compile() function. */ pcre_malloc = function_store_get; pcre_free = function_dummy_free; /* Ensure there is a big buffer for temporary use in several places. It is put in malloc store so that it can be freed for enlargement if necessary. */ big_buffer = store_malloc(big_buffer_size); /* Set up the handler for the data request signal, and set the initial descriptive text. */ set_process_info("initializing"); os_restarting_signal(SIGUSR1, usr1_handler); /* SIGHUP is used to get the daemon to reconfigure. It gets set as appropriate in the daemon code. For the rest of Exim's uses, we ignore it. */ signal(SIGHUP, SIG_IGN); /* We don't want to die on pipe errors as the code is written to handle the write error instead. */ signal(SIGPIPE, SIG_IGN); /* Under some circumstance on some OS, Exim can get called with SIGCHLD set to SIG_IGN. This causes subprocesses that complete before the parent process waits for them not to hang around, so when Exim calls wait(), nothing is there. The wait() code has been made robust against this, but let's ensure that SIGCHLD is set to SIG_DFL, because it's tidier to wait and get a process ending status. We use sigaction rather than plain signal() on those OS where SA_NOCLDWAIT exists, because we want to be sure it is turned off. (There was a problem on AIX with this.) */ #ifdef SA_NOCLDWAIT { struct sigaction act; act.sa_handler = SIG_DFL; sigemptyset(&(act.sa_mask)); act.sa_flags = 0; sigaction(SIGCHLD, &act, NULL); } #else signal(SIGCHLD, SIG_DFL); #endif /* Save the arguments for use if we re-exec exim as a daemon after receiving SIGHUP. */ sighup_argv = argv; /* Set up the version number. Set up the leading 'E' for the external form of message ids, set the pointer to the internal form, and initialize it to indicate no message being processed. */ version_init(); message_id_option[0] = '-'; message_id_external = message_id_option + 1; message_id_external[0] = 'E'; message_id = message_id_external + 1; message_id[0] = 0; /* Set the umask to zero so that any files Exim creates using open() are created with the modes that it specifies. NOTE: Files created with fopen() have a problem, which was not recognized till rather late (February 2006). With this umask, such files will be world writeable. (They are all content scanning files in the spool directory, which isn't world-accessible, so this is not a disaster, but it's untidy.) I don't want to change this overall setting, however, because it will interact badly with the open() calls. Instead, there's now a function called modefopen() that fiddles with the umask while calling fopen(). */ (void)umask(0); /* Precompile the regular expression for matching a message id. Keep this in step with the code that generates ids in the accept.c module. We need to do this here, because the -M options check their arguments for syntactic validity using mac_ismsgid, which uses this. */ regex_ismsgid = regex_must_compile(US"^(?:[^\\W_]{6}-){2}[^\\W_]{2}$", FALSE, TRUE); /* Precompile the regular expression that is used for matching an SMTP error code, possibly extended, at the start of an error message. Note that the terminating whitespace character is included. */ regex_smtp_code = regex_must_compile(US"^\\d\\d\\d\\s(?:\\d\\.\\d\\d?\\d?\\.\\d\\d?\\d?\\s)?", FALSE, TRUE); #ifdef WHITELIST_D_MACROS /* Precompile the regular expression used to filter the content of macros given to -D for permissibility. */ regex_whitelisted_macro = regex_must_compile(US"^[A-Za-z0-9_/.-]*$", FALSE, TRUE); #endif for (i = 0; i < REGEX_VARS; i++) regex_vars[i] = NULL; /* If the program is called as "mailq" treat it as equivalent to "exim -bp"; this seems to be a generally accepted convention, since one finds symbolic links called "mailq" in standard OS configurations. */ if ((namelen == 5 && Ustrcmp(argv[0], "mailq") == 0) || (namelen > 5 && Ustrncmp(argv[0] + namelen - 6, "/mailq", 6) == 0)) { list_queue = TRUE; receiving_message = FALSE; called_as = US"-mailq"; } /* If the program is called as "rmail" treat it as equivalent to "exim -i -oee", thus allowing UUCP messages to be input using non-SMTP mode, i.e. preventing a single dot on a line from terminating the message, and returning with zero return code, even in cases of error (provided an error message has been sent). */ if ((namelen == 5 && Ustrcmp(argv[0], "rmail") == 0) || (namelen > 5 && Ustrncmp(argv[0] + namelen - 6, "/rmail", 6) == 0)) { dot_ends = FALSE; called_as = US"-rmail"; errors_sender_rc = EXIT_SUCCESS; } /* If the program is called as "rsmtp" treat it as equivalent to "exim -bS"; this is a smail convention. */ if ((namelen == 5 && Ustrcmp(argv[0], "rsmtp") == 0) || (namelen > 5 && Ustrncmp(argv[0] + namelen - 6, "/rsmtp", 6) == 0)) { smtp_input = smtp_batched_input = TRUE; called_as = US"-rsmtp"; } /* If the program is called as "runq" treat it as equivalent to "exim -q"; this is a smail convention. */ if ((namelen == 4 && Ustrcmp(argv[0], "runq") == 0) || (namelen > 4 && Ustrncmp(argv[0] + namelen - 5, "/runq", 5) == 0)) { queue_interval = 0; receiving_message = FALSE; called_as = US"-runq"; } /* If the program is called as "newaliases" treat it as equivalent to "exim -bi"; this is a sendmail convention. */ if ((namelen == 10 && Ustrcmp(argv[0], "newaliases") == 0) || (namelen > 10 && Ustrncmp(argv[0] + namelen - 11, "/newaliases", 11) == 0)) { bi_option = TRUE; receiving_message = FALSE; called_as = US"-newaliases"; } /* Save the original effective uid for a couple of uses later. It should normally be root, but in some esoteric environments it may not be. */ original_euid = geteuid(); /* Get the real uid and gid. If the caller is root, force the effective uid/gid to be the same as the real ones. This makes a difference only if Exim is setuid (or setgid) to something other than root, which could be the case in some special configurations. */ real_uid = getuid(); real_gid = getgid(); if (real_uid == root_uid) { rv = setgid(real_gid); if (rv) { fprintf(stderr, "exim: setgid(%ld) failed: %s\n", (long int)real_gid, strerror(errno)); exit(EXIT_FAILURE); } rv = setuid(real_uid); if (rv) { fprintf(stderr, "exim: setuid(%ld) failed: %s\n", (long int)real_uid, strerror(errno)); exit(EXIT_FAILURE); } } /* If neither the original real uid nor the original euid was root, Exim is running in an unprivileged state. */ unprivileged = (real_uid != root_uid && original_euid != root_uid); /* Scan the program's arguments. Some can be dealt with right away; others are simply recorded for checking and handling afterwards. Do a high-level switch on the second character (the one after '-'), to save some effort. */ for (i = 1; i < argc; i++) { BOOL badarg = FALSE; uschar *arg = argv[i]; uschar *argrest; int switchchar; /* An argument not starting with '-' is the start of a recipients list; break out of the options-scanning loop. */ if (arg[0] != '-') { recipients_arg = i; break; } /* An option consisting of -- terminates the options */ if (Ustrcmp(arg, "--") == 0) { recipients_arg = i + 1; break; } /* Handle flagged options */ switchchar = arg[1]; argrest = arg+2; /* Make all -ex options synonymous with -oex arguments, since that is assumed by various callers. Also make -qR options synonymous with -R options, as that seems to be required as well. Allow for -qqR too, and the same for -S options. */ if (Ustrncmp(arg+1, "oe", 2) == 0 || Ustrncmp(arg+1, "qR", 2) == 0 || Ustrncmp(arg+1, "qS", 2) == 0) { switchchar = arg[2]; argrest++; } else if (Ustrncmp(arg+1, "qqR", 3) == 0 || Ustrncmp(arg+1, "qqS", 3) == 0) { switchchar = arg[3]; argrest += 2; queue_2stage = TRUE; } /* Make -r synonymous with -f, since it is a documented alias */ else if (arg[1] == 'r') switchchar = 'f'; /* Make -ov synonymous with -v */ else if (Ustrcmp(arg, "-ov") == 0) { switchchar = 'v'; argrest++; } /* deal with --option_aliases */ else if (switchchar == '-') { if (Ustrcmp(argrest, "help") == 0) { usage_wanted = TRUE; break; } else if (Ustrcmp(argrest, "version") == 0) { switchchar = 'b'; argrest = US"V"; } } /* High-level switch on active initial letter */ switch(switchchar) { /* sendmail uses -Ac and -Am to control which .cf file is used; we ignore them. */ case 'A': if (*argrest == '\0') { badarg = TRUE; break; } else { BOOL ignore = FALSE; switch (*argrest) { case 'c': case 'm': if (*(argrest + 1) == '\0') ignore = TRUE; break; } if (!ignore) { badarg = TRUE; break; } } break; /* -Btype is a sendmail option for 7bit/8bit setting. Exim is 8-bit clean so has no need of it. */ case 'B': if (*argrest == 0) i++; /* Skip over the type */ break; case 'b': receiving_message = FALSE; /* Reset TRUE for -bm, -bS, -bs below */ /* -bd: Run in daemon mode, awaiting SMTP connections. -bdf: Ditto, but in the foreground. */ if (*argrest == 'd') { daemon_listen = TRUE; if (*(++argrest) == 'f') background_daemon = FALSE; else if (*argrest != 0) { badarg = TRUE; break; } } /* -be: Run in expansion test mode -bem: Ditto, but read a message from a file first */ else if (*argrest == 'e') { expansion_test = checking = TRUE; if (argrest[1] == 'm') { if (++i >= argc) { badarg = TRUE; break; } expansion_test_message = argv[i]; argrest++; } if (argrest[1] != 0) { badarg = TRUE; break; } } /* -bF: Run system filter test */ else if (*argrest == 'F') { filter_test |= checking = FTEST_SYSTEM; if (*(++argrest) != 0) { badarg = TRUE; break; } if (++i < argc) filter_test_sfile = argv[i]; else { fprintf(stderr, "exim: file name expected after %s\n", argv[i-1]); exit(EXIT_FAILURE); } } /* -bf: Run user filter test -bfd: Set domain for filter testing -bfl: Set local part for filter testing -bfp: Set prefix for filter testing -bfs: Set suffix for filter testing */ else if (*argrest == 'f') { if (*(++argrest) == 0) { filter_test |= checking = FTEST_USER; if (++i < argc) filter_test_ufile = argv[i]; else { fprintf(stderr, "exim: file name expected after %s\n", argv[i-1]); exit(EXIT_FAILURE); } } else { if (++i >= argc) { fprintf(stderr, "exim: string expected after %s\n", arg); exit(EXIT_FAILURE); } if (Ustrcmp(argrest, "d") == 0) ftest_domain = argv[i]; else if (Ustrcmp(argrest, "l") == 0) ftest_localpart = argv[i]; else if (Ustrcmp(argrest, "p") == 0) ftest_prefix = argv[i]; else if (Ustrcmp(argrest, "s") == 0) ftest_suffix = argv[i]; else { badarg = TRUE; break; } } } /* -bh: Host checking - an IP address must follow. */ else if (Ustrcmp(argrest, "h") == 0 || Ustrcmp(argrest, "hc") == 0) { if (++i >= argc) { badarg = TRUE; break; } sender_host_address = argv[i]; host_checking = checking = log_testing_mode = TRUE; host_checking_callout = argrest[1] == 'c'; message_logs = FALSE; } /* -bi: This option is used by sendmail to initialize *the* alias file, though it has the -oA option to specify a different file. Exim has no concept of *the* alias file, but since Sun's YP make script calls sendmail this way, some support must be provided. */ else if (Ustrcmp(argrest, "i") == 0) bi_option = TRUE; /* -bI: provide information, of the type to follow after a colon. This is an Exim flag. */ else if (argrest[0] == 'I' && Ustrlen(argrest) >= 2 && argrest[1] == ':') { uschar *p = &argrest[2]; info_flag = CMDINFO_HELP; if (Ustrlen(p)) { if (strcmpic(p, CUS"sieve") == 0) { info_flag = CMDINFO_SIEVE; info_stdout = TRUE; } else if (strcmpic(p, CUS"dscp") == 0) { info_flag = CMDINFO_DSCP; info_stdout = TRUE; } else if (strcmpic(p, CUS"help") == 0) { info_stdout = TRUE; } } } /* -bm: Accept and deliver message - the default option. Reinstate receiving_message, which got turned off for all -b options. */ else if (Ustrcmp(argrest, "m") == 0) receiving_message = TRUE; /* -bmalware: test the filename given for malware */ else if (Ustrcmp(argrest, "malware") == 0) { if (++i >= argc) { badarg = TRUE; break; } checking = TRUE; malware_test_file = argv[i]; } /* -bnq: For locally originating messages, do not qualify unqualified addresses. In the envelope, this causes errors; in header lines they just get left. */ else if (Ustrcmp(argrest, "nq") == 0) { allow_unqualified_sender = FALSE; allow_unqualified_recipient = FALSE; } /* -bpxx: List the contents of the mail queue, in various forms. If the option is -bpc, just a queue count is needed. Otherwise, if the first letter after p is r, then order is random. */ else if (*argrest == 'p') { if (*(++argrest) == 'c') { count_queue = TRUE; if (*(++argrest) != 0) badarg = TRUE; break; } if (*argrest == 'r') { list_queue_option = 8; argrest++; } else list_queue_option = 0; list_queue = TRUE; /* -bp: List the contents of the mail queue, top-level only */ if (*argrest == 0) {} /* -bpu: List the contents of the mail queue, top-level undelivered */ else if (Ustrcmp(argrest, "u") == 0) list_queue_option += 1; /* -bpa: List the contents of the mail queue, including all delivered */ else if (Ustrcmp(argrest, "a") == 0) list_queue_option += 2; /* Unknown after -bp[r] */ else { badarg = TRUE; break; } } /* -bP: List the configuration variables given as the address list. Force -v, so configuration errors get displayed. */ else if (Ustrcmp(argrest, "P") == 0) { /* -bP config: we need to setup here, because later, * when list_options is checked, the config is read already */ if (argv[i+1] && Ustrcmp(argv[i+1], "config") == 0) { list_config = TRUE; readconf_save_config(version_string); } else { list_options = TRUE; debug_selector |= D_v; debug_file = stderr; } } /* -brt: Test retry configuration lookup */ else if (Ustrcmp(argrest, "rt") == 0) { checking = TRUE; test_retry_arg = i + 1; goto END_ARG; } /* -brw: Test rewrite configuration */ else if (Ustrcmp(argrest, "rw") == 0) { checking = TRUE; test_rewrite_arg = i + 1; goto END_ARG; } /* -bS: Read SMTP commands on standard input, but produce no replies - all errors are reported by sending messages. */ else if (Ustrcmp(argrest, "S") == 0) smtp_input = smtp_batched_input = receiving_message = TRUE; /* -bs: Read SMTP commands on standard input and produce SMTP replies on standard output. */ else if (Ustrcmp(argrest, "s") == 0) smtp_input = receiving_message = TRUE; /* -bt: address testing mode */ else if (Ustrcmp(argrest, "t") == 0) address_test_mode = checking = log_testing_mode = TRUE; /* -bv: verify addresses */ else if (Ustrcmp(argrest, "v") == 0) verify_address_mode = checking = log_testing_mode = TRUE; /* -bvs: verify sender addresses */ else if (Ustrcmp(argrest, "vs") == 0) { verify_address_mode = checking = log_testing_mode = TRUE; verify_as_sender = TRUE; } /* -bV: Print version string and support details */ else if (Ustrcmp(argrest, "V") == 0) { printf("Exim version %s #%s built %s\n", version_string, version_cnumber, version_date); printf("%s\n", CS version_copyright); version_printed = TRUE; show_whats_supported(stdout); log_testing_mode = TRUE; } /* -bw: inetd wait mode, accept a listening socket as stdin */ else if (*argrest == 'w') { inetd_wait_mode = TRUE; background_daemon = FALSE; daemon_listen = TRUE; if (*(++argrest) != '\0') { inetd_wait_timeout = readconf_readtime(argrest, 0, FALSE); if (inetd_wait_timeout <= 0) { fprintf(stderr, "exim: bad time value %s: abandoned\n", argv[i]); exit(EXIT_FAILURE); } } } else badarg = TRUE; break; /* -C: change configuration file list; ignore if it isn't really a change! Enforce a prefix check if required. */ case 'C': if (*argrest == 0) { if(++i < argc) argrest = argv[i]; else { badarg = TRUE; break; } } if (Ustrcmp(config_main_filelist, argrest) != 0) { #ifdef ALT_CONFIG_PREFIX int sep = 0; int len = Ustrlen(ALT_CONFIG_PREFIX); const uschar *list = argrest; uschar *filename; while((filename = string_nextinlist(&list, &sep, big_buffer, big_buffer_size)) != NULL) { if ((Ustrlen(filename) < len || Ustrncmp(filename, ALT_CONFIG_PREFIX, len) != 0 || Ustrstr(filename, "/../") != NULL) && (Ustrcmp(filename, "/dev/null") != 0 || real_uid != root_uid)) { fprintf(stderr, "-C Permission denied\n"); exit(EXIT_FAILURE); } } #endif if (real_uid != root_uid) { #ifdef TRUSTED_CONFIG_LIST if (real_uid != exim_uid #ifdef CONFIGURE_OWNER && real_uid != config_uid #endif ) trusted_config = FALSE; else { FILE *trust_list = Ufopen(TRUSTED_CONFIG_LIST, "rb"); if (trust_list) { struct stat statbuf; if (fstat(fileno(trust_list), &statbuf) != 0 || (statbuf.st_uid != root_uid /* owner not root */ #ifdef CONFIGURE_OWNER && statbuf.st_uid != config_uid /* owner not the special one */ #endif ) || /* or */ (statbuf.st_gid != root_gid /* group not root */ #ifdef CONFIGURE_GROUP && statbuf.st_gid != config_gid /* group not the special one */ #endif && (statbuf.st_mode & 020) != 0 /* group writeable */ ) || /* or */ (statbuf.st_mode & 2) != 0) /* world writeable */ { trusted_config = FALSE; fclose(trust_list); } else { /* Well, the trust list at least is up to scratch... */ void *reset_point = store_get(0); uschar *trusted_configs[32]; int nr_configs = 0; int i = 0; while (Ufgets(big_buffer, big_buffer_size, trust_list)) { uschar *start = big_buffer, *nl; while (*start && isspace(*start)) start++; if (*start != '/') continue; nl = Ustrchr(start, '\n'); if (nl) *nl = 0; trusted_configs[nr_configs++] = string_copy(start); if (nr_configs == 32) break; } fclose(trust_list); if (nr_configs) { int sep = 0; const uschar *list = argrest; uschar *filename; while (trusted_config && (filename = string_nextinlist(&list, &sep, big_buffer, big_buffer_size)) != NULL) { for (i=0; i < nr_configs; i++) { if (Ustrcmp(filename, trusted_configs[i]) == 0) break; } if (i == nr_configs) { trusted_config = FALSE; break; } } store_reset(reset_point); } else { /* No valid prefixes found in trust_list file. */ trusted_config = FALSE; } } } else { /* Could not open trust_list file. */ trusted_config = FALSE; } } #else /* Not root; don't trust config */ trusted_config = FALSE; #endif } config_main_filelist = argrest; config_changed = TRUE; } break; /* -D: set up a macro definition */ case 'D': #ifdef DISABLE_D_OPTION fprintf(stderr, "exim: -D is not available in this Exim binary\n"); exit(EXIT_FAILURE); #else { int ptr = 0; macro_item *m; uschar name[24]; uschar *s = argrest; opt_D_used = TRUE; while (isspace(*s)) s++; if (*s < 'A' || *s > 'Z') { fprintf(stderr, "exim: macro name set by -D must start with " "an upper case letter\n"); exit(EXIT_FAILURE); } while (isalnum(*s) || *s == '_') { if (ptr < sizeof(name)-1) name[ptr++] = *s; s++; } name[ptr] = 0; if (ptr == 0) { badarg = TRUE; break; } while (isspace(*s)) s++; if (*s != 0) { if (*s++ != '=') { badarg = TRUE; break; } while (isspace(*s)) s++; } for (m = macros; m; m = m->next) if (Ustrcmp(m->name, name) == 0) { fprintf(stderr, "exim: duplicated -D in command line\n"); exit(EXIT_FAILURE); } m = macro_create(name, s, TRUE, FALSE); if (clmacro_count >= MAX_CLMACROS) { fprintf(stderr, "exim: too many -D options on command line\n"); exit(EXIT_FAILURE); } clmacros[clmacro_count++] = string_sprintf("-D%s=%s", m->name, m->replacement); } #endif break; /* -d: Set debug level (see also -v below) or set the drop_cr option. The latter is now a no-op, retained for compatibility only. If -dd is used, debugging subprocesses of the daemon is disabled. */ case 'd': if (Ustrcmp(argrest, "ropcr") == 0) { /* drop_cr = TRUE; */ } /* Use an intermediate variable so that we don't set debugging while decoding the debugging bits. */ else { unsigned int selector = D_default; debug_selector = 0; debug_file = NULL; if (*argrest == 'd') { debug_daemon = TRUE; argrest++; } if (*argrest != 0) decode_bits(&selector, 1, debug_notall, argrest, debug_options, debug_options_count, US"debug", 0); debug_selector = selector; } break; /* -E: This is a local error message. This option is not intended for external use at all, but is not restricted to trusted callers because it does no harm (just suppresses certain error messages) and if Exim is run not setuid root it won't always be trusted when it generates error messages using this option. If there is a message id following -E, point message_reference at it, for logging. */ case 'E': local_error_message = TRUE; if (mac_ismsgid(argrest)) message_reference = argrest; break; /* -ex: The vacation program calls sendmail with the undocumented "-eq" option, so it looks as if historically the -oex options are also callable without the leading -o. So we have to accept them. Before the switch, anything starting -oe has been converted to -e. Exim does not support all of the sendmail error options. */ case 'e': if (Ustrcmp(argrest, "e") == 0) { arg_error_handling = ERRORS_SENDER; errors_sender_rc = EXIT_SUCCESS; } else if (Ustrcmp(argrest, "m") == 0) arg_error_handling = ERRORS_SENDER; else if (Ustrcmp(argrest, "p") == 0) arg_error_handling = ERRORS_STDERR; else if (Ustrcmp(argrest, "q") == 0) arg_error_handling = ERRORS_STDERR; else if (Ustrcmp(argrest, "w") == 0) arg_error_handling = ERRORS_SENDER; else badarg = TRUE; break; /* -F: Set sender's full name, used instead of the gecos entry from the password file. Since users can usually alter their gecos entries, there's no security involved in using this instead. The data can follow the -F or be in the next argument. */ case 'F': if (*argrest == 0) { if(++i < argc) argrest = argv[i]; else { badarg = TRUE; break; } } originator_name = argrest; sender_name_forced = TRUE; break; /* -f: Set sender's address - this value is only actually used if Exim is run by a trusted user, or if untrusted_set_sender is set and matches the address, except that the null address can always be set by any user. The test for this happens later, when the value given here is ignored when not permitted. For an untrusted user, the actual sender is still put in Sender: if it doesn't match the From: header (unless no_local_from_check is set). The data can follow the -f or be in the next argument. The -r switch is an obsolete form of -f but since there appear to be programs out there that use anything that sendmail has ever supported, better accept it - the synonymizing is done before the switch above. At this stage, we must allow domain literal addresses, because we don't know what the setting of allow_domain_literals is yet. Ditto for trailing dots and strip_trailing_dot. */ case 'f': { int dummy_start, dummy_end; uschar *errmess; if (*argrest == 0) { if (i+1 < argc) argrest = argv[++i]; else { badarg = TRUE; break; } } if (*argrest == 0) sender_address = string_sprintf(""); /* Ensure writeable memory */ else { uschar *temp = argrest + Ustrlen(argrest) - 1; while (temp >= argrest && isspace(*temp)) temp--; if (temp >= argrest && *temp == '.') f_end_dot = TRUE; allow_domain_literals = TRUE; strip_trailing_dot = TRUE; #ifdef SUPPORT_I18N allow_utf8_domains = TRUE; #endif sender_address = parse_extract_address(argrest, &errmess, &dummy_start, &dummy_end, &sender_address_domain, TRUE); #ifdef SUPPORT_I18N message_smtputf8 = string_is_utf8(sender_address); allow_utf8_domains = FALSE; #endif allow_domain_literals = FALSE; strip_trailing_dot = FALSE; if (sender_address == NULL) { fprintf(stderr, "exim: bad -f address \"%s\": %s\n", argrest, errmess); return EXIT_FAILURE; } } sender_address_forced = TRUE; } break; /* -G: sendmail invocation to specify that it's a gateway submission and sendmail may complain about problems instead of fixing them. We make it equivalent to an ACL "control = suppress_local_fixups" and do not at this time complain about problems. */ case 'G': flag_G = TRUE; break; /* -h: Set the hop count for an incoming message. Exim does not currently support this; it always computes it by counting the Received: headers. To put it in will require a change to the spool header file format. */ case 'h': if (*argrest == 0) { if(++i < argc) argrest = argv[i]; else { badarg = TRUE; break; } } if (!isdigit(*argrest)) badarg = TRUE; break; /* -i: Set flag so dot doesn't end non-SMTP input (same as -oi, seems not to be documented for sendmail but mailx (at least) uses it) */ case 'i': if (*argrest == 0) dot_ends = FALSE; else badarg = TRUE; break; /* -L: set the identifier used for syslog; equivalent to setting syslog_processname in the config file, but needs to be an admin option. */ case 'L': if (*argrest == '\0') { if(++i < argc) argrest = argv[i]; else { badarg = TRUE; break; } } sz = Ustrlen(argrest); if (sz > 32) { fprintf(stderr, "exim: the -L syslog name is too long: \"%s\"\n", argrest); return EXIT_FAILURE; } if (sz < 1) { fprintf(stderr, "exim: the -L syslog name is too short\n"); return EXIT_FAILURE; } cmdline_syslog_name = argrest; break; case 'M': receiving_message = FALSE; /* -MC: continue delivery of another message via an existing open file descriptor. This option is used for an internal call by the smtp transport when there is a pending message waiting to go to an address to which it has got a connection. Five subsequent arguments are required: transport name, host name, IP address, sequence number, and message_id. Transports may decline to create new processes if the sequence number gets too big. The channel is stdin. This (-MC) must be the last argument. There's a subsequent check that the real-uid is privileged. If we are running in the test harness. delay for a bit, to let the process that set this one up complete. This makes for repeatability of the logging, etc. output. */ if (Ustrcmp(argrest, "C") == 0) { union sockaddr_46 interface_sock; EXIM_SOCKLEN_T size = sizeof(interface_sock); if (argc != i + 6) { fprintf(stderr, "exim: too many or too few arguments after -MC\n"); return EXIT_FAILURE; } if (msg_action_arg >= 0) { fprintf(stderr, "exim: incompatible arguments\n"); return EXIT_FAILURE; } continue_transport = argv[++i]; continue_hostname = argv[++i]; continue_host_address = argv[++i]; continue_sequence = Uatoi(argv[++i]); msg_action = MSG_DELIVER; msg_action_arg = ++i; forced_delivery = TRUE; queue_run_pid = passed_qr_pid; queue_run_pipe = passed_qr_pipe; if (!mac_ismsgid(argv[i])) { fprintf(stderr, "exim: malformed message id %s after -MC option\n", argv[i]); return EXIT_FAILURE; } /* Set up $sending_ip_address and $sending_port, unless proxied */ if (!continue_proxy_cipher) if (getsockname(fileno(stdin), (struct sockaddr *)(&interface_sock), &size) == 0) sending_ip_address = host_ntoa(-1, &interface_sock, NULL, &sending_port); else { fprintf(stderr, "exim: getsockname() failed after -MC option: %s\n", strerror(errno)); return EXIT_FAILURE; } if (running_in_test_harness) millisleep(500); break; } else if (*argrest == 'C' && argrest[1] && !argrest[2]) { switch(argrest[1]) { /* -MCA: set the smtp_authenticated flag; this is useful only when it precedes -MC (see above). The flag indicates that the host to which Exim is connected has accepted an AUTH sequence. */ case 'A': smtp_authenticated = TRUE; break; /* -MCD: set the smtp_use_dsn flag; this indicates that the host that exim is connected to supports the esmtp extension DSN */ case 'D': smtp_peer_options |= PEER_OFFERED_DSN; break; /* -MCG: set the queue name, to a non-default value */ case 'G': if (++i < argc) queue_name = string_copy(argv[i]); else badarg = TRUE; break; /* -MCK: the peer offered CHUNKING. Must precede -MC */ case 'K': smtp_peer_options |= PEER_OFFERED_CHUNKING; break; /* -MCP: set the smtp_use_pipelining flag; this is useful only when it preceded -MC (see above) */ case 'P': smtp_peer_options |= PEER_OFFERED_PIPE; break; /* -MCQ: pass on the pid of the queue-running process that started this chain of deliveries and the fd of its synchronizing pipe; this is useful only when it precedes -MC (see above) */ case 'Q': if (++i < argc) passed_qr_pid = (pid_t)(Uatol(argv[i])); else badarg = TRUE; if (++i < argc) passed_qr_pipe = (int)(Uatol(argv[i])); else badarg = TRUE; break; /* -MCS: set the smtp_use_size flag; this is useful only when it precedes -MC (see above) */ case 'S': smtp_peer_options |= PEER_OFFERED_SIZE; break; #ifdef SUPPORT_TLS /* -MCt: similar to -MCT below but the connection is still open via a proxy proces which handles the TLS context and coding. Require three arguments for the proxied local address and port, and the TLS cipher. */ case 't': if (++i < argc) sending_ip_address = argv[i]; else badarg = TRUE; if (++i < argc) sending_port = (int)(Uatol(argv[i])); else badarg = TRUE; if (++i < argc) continue_proxy_cipher = argv[i]; else badarg = TRUE; /*FALLTHROUGH*/ /* -MCT: set the tls_offered flag; this is useful only when it precedes -MC (see above). The flag indicates that the host to which Exim is connected has offered TLS support. */ case 'T': smtp_peer_options |= PEER_OFFERED_TLS; break; #endif default: badarg = TRUE; break; } break; } /* -M[x]: various operations on the following list of message ids: -M deliver the messages, ignoring next retry times and thawing -Mc deliver the messages, checking next retry times, no thawing -Mf freeze the messages -Mg give up on the messages -Mt thaw the messages -Mrm remove the messages In the above cases, this must be the last option. There are also the following options which are followed by a single message id, and which act on that message. Some of them use the "recipient" addresses as well. -Mar add recipient(s) -Mmad mark all recipients delivered -Mmd mark recipients(s) delivered -Mes edit sender -Mset load a message for use with -be -Mvb show body -Mvc show copy (of whole message, in RFC 2822 format) -Mvh show header -Mvl show log */ else if (*argrest == 0) { msg_action = MSG_DELIVER; forced_delivery = deliver_force_thaw = TRUE; } else if (Ustrcmp(argrest, "ar") == 0) { msg_action = MSG_ADD_RECIPIENT; one_msg_action = TRUE; } else if (Ustrcmp(argrest, "c") == 0) msg_action = MSG_DELIVER; else if (Ustrcmp(argrest, "es") == 0) { msg_action = MSG_EDIT_SENDER; one_msg_action = TRUE; } else if (Ustrcmp(argrest, "f") == 0) msg_action = MSG_FREEZE; else if (Ustrcmp(argrest, "g") == 0) { msg_action = MSG_DELIVER; deliver_give_up = TRUE; } else if (Ustrcmp(argrest, "mad") == 0) { msg_action = MSG_MARK_ALL_DELIVERED; } else if (Ustrcmp(argrest, "md") == 0) { msg_action = MSG_MARK_DELIVERED; one_msg_action = TRUE; } else if (Ustrcmp(argrest, "rm") == 0) msg_action = MSG_REMOVE; else if (Ustrcmp(argrest, "set") == 0) { msg_action = MSG_LOAD; one_msg_action = TRUE; } else if (Ustrcmp(argrest, "t") == 0) msg_action = MSG_THAW; else if (Ustrcmp(argrest, "vb") == 0) { msg_action = MSG_SHOW_BODY; one_msg_action = TRUE; } else if (Ustrcmp(argrest, "vc") == 0) { msg_action = MSG_SHOW_COPY; one_msg_action = TRUE; } else if (Ustrcmp(argrest, "vh") == 0) { msg_action = MSG_SHOW_HEADER; one_msg_action = TRUE; } else if (Ustrcmp(argrest, "vl") == 0) { msg_action = MSG_SHOW_LOG; one_msg_action = TRUE; } else { badarg = TRUE; break; } /* All the -Mxx options require at least one message id. */ msg_action_arg = i + 1; if (msg_action_arg >= argc) { fprintf(stderr, "exim: no message ids given after %s option\n", arg); return EXIT_FAILURE; } /* Some require only message ids to follow */ if (!one_msg_action) { int j; for (j = msg_action_arg; j < argc; j++) if (!mac_ismsgid(argv[j])) { fprintf(stderr, "exim: malformed message id %s after %s option\n", argv[j], arg); return EXIT_FAILURE; } goto END_ARG; /* Remaining args are ids */ } /* Others require only one message id, possibly followed by addresses, which will be handled as normal arguments. */ else { if (!mac_ismsgid(argv[msg_action_arg])) { fprintf(stderr, "exim: malformed message id %s after %s option\n", argv[msg_action_arg], arg); return EXIT_FAILURE; } i++; } break; /* Some programs seem to call the -om option without the leading o; for sendmail it askes for "me too". Exim always does this. */ case 'm': if (*argrest != 0) badarg = TRUE; break; /* -N: don't do delivery - a debugging option that stops transports doing their thing. It implies debugging at the D_v level. */ case 'N': if (*argrest == 0) { dont_deliver = TRUE; debug_selector |= D_v; debug_file = stderr; } else badarg = TRUE; break; /* -n: This means "don't alias" in sendmail, apparently. For normal invocations, it has no effect. It may affect some other options. */ case 'n': flag_n = TRUE; break; /* -O: Just ignore it. In sendmail, apparently -O option=value means set option to the specified value. This form uses long names. We need to handle -O option=value and -Ooption=value. */ case 'O': if (*argrest == 0) { if (++i >= argc) { fprintf(stderr, "exim: string expected after -O\n"); exit(EXIT_FAILURE); } } break; case 'o': /* -oA: Set an argument for the bi command (sendmail's "alternate alias file" option). */ if (*argrest == 'A') { alias_arg = argrest + 1; if (alias_arg[0] == 0) { if (i+1 < argc) alias_arg = argv[++i]; else { fprintf(stderr, "exim: string expected after -oA\n"); exit(EXIT_FAILURE); } } } /* -oB: Set a connection message max value for remote deliveries */ else if (*argrest == 'B') { uschar *p = argrest + 1; if (p[0] == 0) { if (i+1 < argc && isdigit((argv[i+1][0]))) p = argv[++i]; else { connection_max_messages = 1; p = NULL; } } if (p != NULL) { if (!isdigit(*p)) { fprintf(stderr, "exim: number expected after -oB\n"); exit(EXIT_FAILURE); } connection_max_messages = Uatoi(p); } } /* -odb: background delivery */ else if (Ustrcmp(argrest, "db") == 0) { synchronous_delivery = FALSE; arg_queue_only = FALSE; queue_only_set = TRUE; } /* -odf: foreground delivery (smail-compatible option); same effect as -odi: interactive (synchronous) delivery (sendmail-compatible option) */ else if (Ustrcmp(argrest, "df") == 0 || Ustrcmp(argrest, "di") == 0) { synchronous_delivery = TRUE; arg_queue_only = FALSE; queue_only_set = TRUE; } /* -odq: queue only */ else if (Ustrcmp(argrest, "dq") == 0) { synchronous_delivery = FALSE; arg_queue_only = TRUE; queue_only_set = TRUE; } /* -odqs: queue SMTP only - do local deliveries and remote routing, but no remote delivery */ else if (Ustrcmp(argrest, "dqs") == 0) { queue_smtp = TRUE; arg_queue_only = FALSE; queue_only_set = TRUE; } /* -oex: Sendmail error flags. As these are also accepted without the leading -o prefix, for compatibility with vacation and other callers, they are handled with -e above. */ /* -oi: Set flag so dot doesn't end non-SMTP input (same as -i) -oitrue: Another sendmail syntax for the same */ else if (Ustrcmp(argrest, "i") == 0 || Ustrcmp(argrest, "itrue") == 0) dot_ends = FALSE; /* -oM*: Set various characteristics for an incoming message; actually acted on for trusted callers only. */ else if (*argrest == 'M') { if (i+1 >= argc) { fprintf(stderr, "exim: data expected after -o%s\n", argrest); exit(EXIT_FAILURE); } /* -oMa: Set sender host address */ if (Ustrcmp(argrest, "Ma") == 0) sender_host_address = argv[++i]; /* -oMaa: Set authenticator name */ else if (Ustrcmp(argrest, "Maa") == 0) sender_host_authenticated = argv[++i]; /* -oMas: setting authenticated sender */ else if (Ustrcmp(argrest, "Mas") == 0) authenticated_sender = argv[++i]; /* -oMai: setting authenticated id */ else if (Ustrcmp(argrest, "Mai") == 0) authenticated_id = argv[++i]; /* -oMi: Set incoming interface address */ else if (Ustrcmp(argrest, "Mi") == 0) interface_address = argv[++i]; /* -oMm: Message reference */ else if (Ustrcmp(argrest, "Mm") == 0) { if (!mac_ismsgid(argv[i+1])) { fprintf(stderr,"-oMm must be a valid message ID\n"); exit(EXIT_FAILURE); } if (!trusted_config) { fprintf(stderr,"-oMm must be called by a trusted user/config\n"); exit(EXIT_FAILURE); } message_reference = argv[++i]; } /* -oMr: Received protocol */ else if (Ustrcmp(argrest, "Mr") == 0) if (received_protocol) { fprintf(stderr, "received_protocol is set already\n"); exit(EXIT_FAILURE); } else received_protocol = argv[++i]; /* -oMs: Set sender host name */ else if (Ustrcmp(argrest, "Ms") == 0) sender_host_name = argv[++i]; /* -oMt: Set sender ident */ else if (Ustrcmp(argrest, "Mt") == 0) { sender_ident_set = TRUE; sender_ident = argv[++i]; } /* Else a bad argument */ else { badarg = TRUE; break; } } /* -om: Me-too flag for aliases. Exim always does this. Some programs seem to call this as -m (undocumented), so that is also accepted (see above). */ else if (Ustrcmp(argrest, "m") == 0) {} /* -oo: An ancient flag for old-style addresses which still seems to crop up in some calls (see in SCO). */ else if (Ustrcmp(argrest, "o") == 0) {} /* -oP <name>: set pid file path for daemon */ else if (Ustrcmp(argrest, "P") == 0) override_pid_file_path = argv[++i]; /* -or <n>: set timeout for non-SMTP acceptance -os <n>: set timeout for SMTP acceptance */ else if (*argrest == 'r' || *argrest == 's') { int *tp = (*argrest == 'r')? &arg_receive_timeout : &arg_smtp_receive_timeout; if (argrest[1] == 0) { if (i+1 < argc) *tp= readconf_readtime(argv[++i], 0, FALSE); } else *tp = readconf_readtime(argrest + 1, 0, FALSE); if (*tp < 0) { fprintf(stderr, "exim: bad time value %s: abandoned\n", argv[i]); exit(EXIT_FAILURE); } } /* -oX <list>: Override local_interfaces and/or default daemon ports */ else if (Ustrcmp(argrest, "X") == 0) override_local_interfaces = argv[++i]; /* Unknown -o argument */ else badarg = TRUE; break; /* -ps: force Perl startup; -pd force delayed Perl startup */ case 'p': #ifdef EXIM_PERL if (*argrest == 's' && argrest[1] == 0) { perl_start_option = 1; break; } if (*argrest == 'd' && argrest[1] == 0) { perl_start_option = -1; break; } #endif /* -panythingelse is taken as the Sendmail-compatible argument -prval:sval, which sets the host protocol and host name */ if (*argrest == 0) { if (i+1 < argc) argrest = argv[++i]; else { badarg = TRUE; break; } } if (*argrest != 0) { uschar *hn; if (received_protocol) { fprintf(stderr, "received_protocol is set already\n"); exit(EXIT_FAILURE); } hn = Ustrchr(argrest, ':'); if (hn == NULL) { received_protocol = argrest; } else { int old_pool = store_pool; store_pool = POOL_PERM; received_protocol = string_copyn(argrest, hn - argrest); store_pool = old_pool; sender_host_name = hn + 1; } } break; case 'q': receiving_message = FALSE; if (queue_interval >= 0) { fprintf(stderr, "exim: -q specified more than once\n"); exit(EXIT_FAILURE); } /* -qq...: Do queue runs in a 2-stage manner */ if (*argrest == 'q') { queue_2stage = TRUE; argrest++; } /* -qi...: Do only first (initial) deliveries */ if (*argrest == 'i') { queue_run_first_delivery = TRUE; argrest++; } /* -qf...: Run the queue, forcing deliveries -qff..: Ditto, forcing thawing as well */ if (*argrest == 'f') { queue_run_force = TRUE; if (*++argrest == 'f') { deliver_force_thaw = TRUE; argrest++; } } /* -q[f][f]l...: Run the queue only on local deliveries */ if (*argrest == 'l') { queue_run_local = TRUE; argrest++; } /* -q[f][f][l][G<name>]... Work on the named queue */ if (*argrest == 'G') { int i; for (argrest++, i = 0; argrest[i] && argrest[i] != '/'; ) i++; queue_name = string_copyn(argrest, i); argrest += i; if (*argrest == '/') argrest++; } /* -q[f][f][l][G<name>]: Run the queue, optionally forced, optionally local only, optionally named, optionally starting from a given message id. */ if (*argrest == 0 && (i + 1 >= argc || argv[i+1][0] == '-' || mac_ismsgid(argv[i+1]))) { queue_interval = 0; if (i+1 < argc && mac_ismsgid(argv[i+1])) start_queue_run_id = argv[++i]; if (i+1 < argc && mac_ismsgid(argv[i+1])) stop_queue_run_id = argv[++i]; } /* -q[f][f][l][G<name>/]<n>: Run the queue at regular intervals, optionally forced, optionally local only, optionally named. */ else if ((queue_interval = readconf_readtime(*argrest ? argrest : argv[++i], 0, FALSE)) <= 0) { fprintf(stderr, "exim: bad time value %s: abandoned\n", argv[i]); exit(EXIT_FAILURE); } break; case 'R': /* Synonymous with -qR... */ receiving_message = FALSE; /* -Rf: As -R (below) but force all deliveries, -Rff: Ditto, but also thaw all frozen messages, -Rr: String is regex -Rrf: Regex and force -Rrff: Regex and force and thaw in all cases provided there are no further characters in this argument. */ if (*argrest != 0) { int i; for (i = 0; i < nelem(rsopts); i++) if (Ustrcmp(argrest, rsopts[i]) == 0) { if (i != 2) queue_run_force = TRUE; if (i >= 2) deliver_selectstring_regex = TRUE; if (i == 1 || i == 4) deliver_force_thaw = TRUE; argrest += Ustrlen(rsopts[i]); } } /* -R: Set string to match in addresses for forced queue run to pick out particular messages. */ if (*argrest) deliver_selectstring = argrest; else if (i+1 < argc) deliver_selectstring = argv[++i]; else { fprintf(stderr, "exim: string expected after -R\n"); exit(EXIT_FAILURE); } break; /* -r: an obsolete synonym for -f (see above) */ /* -S: Like -R but works on sender. */ case 'S': /* Synonymous with -qS... */ receiving_message = FALSE; /* -Sf: As -S (below) but force all deliveries, -Sff: Ditto, but also thaw all frozen messages, -Sr: String is regex -Srf: Regex and force -Srff: Regex and force and thaw in all cases provided there are no further characters in this argument. */ if (*argrest) { int i; for (i = 0; i < nelem(rsopts); i++) if (Ustrcmp(argrest, rsopts[i]) == 0) { if (i != 2) queue_run_force = TRUE; if (i >= 2) deliver_selectstring_sender_regex = TRUE; if (i == 1 || i == 4) deliver_force_thaw = TRUE; argrest += Ustrlen(rsopts[i]); } } /* -S: Set string to match in addresses for forced queue run to pick out particular messages. */ if (*argrest) deliver_selectstring_sender = argrest; else if (i+1 < argc) deliver_selectstring_sender = argv[++i]; else { fprintf(stderr, "exim: string expected after -S\n"); exit(EXIT_FAILURE); } break; /* -Tqt is an option that is exclusively for use by the testing suite. It is not recognized in other circumstances. It allows for the setting up of explicit "queue times" so that various warning/retry things can be tested. Otherwise variability of clock ticks etc. cause problems. */ case 'T': if (running_in_test_harness && Ustrcmp(argrest, "qt") == 0) fudged_queue_times = argv[++i]; else badarg = TRUE; break; /* -t: Set flag to extract recipients from body of message. */ case 't': if (*argrest == 0) extract_recipients = TRUE; /* -ti: Set flag to extract recipients from body of message, and also specify that dot does not end the message. */ else if (Ustrcmp(argrest, "i") == 0) { extract_recipients = TRUE; dot_ends = FALSE; } /* -tls-on-connect: don't wait for STARTTLS (for old clients) */ #ifdef SUPPORT_TLS else if (Ustrcmp(argrest, "ls-on-connect") == 0) tls_in.on_connect = TRUE; #endif else badarg = TRUE; break; /* -U: This means "initial user submission" in sendmail, apparently. The doc claims that in future sendmail may refuse syntactically invalid messages instead of fixing them. For the moment, we just ignore it. */ case 'U': break; /* -v: verify things - this is a very low-level debugging */ case 'v': if (*argrest == 0) { debug_selector |= D_v; debug_file = stderr; } else badarg = TRUE; break; /* -x: AIX uses this to indicate some fancy 8-bit character stuff: The -x flag tells the sendmail command that mail from a local mail program has National Language Support (NLS) extended characters in the body of the mail item. The sendmail command can send mail with extended NLS characters across networks that normally corrupts these 8-bit characters. As Exim is 8-bit clean, it just ignores this flag. */ case 'x': if (*argrest != 0) badarg = TRUE; break; /* -X: in sendmail: takes one parameter, logfile, and sends debugging logs to that file. We swallow the parameter and otherwise ignore it. */ case 'X': if (*argrest == '\0') if (++i >= argc) { fprintf(stderr, "exim: string expected after -X\n"); exit(EXIT_FAILURE); } break; case 'z': if (*argrest == '\0') if (++i < argc) log_oneline = argv[i]; else { fprintf(stderr, "exim: file name expected after %s\n", argv[i-1]); exit(EXIT_FAILURE); } break; /* All other initial characters are errors */ default: badarg = TRUE; break; } /* End of high-level switch statement */ /* Failed to recognize the option, or syntax error */ if (badarg) { fprintf(stderr, "exim abandoned: unknown, malformed, or incomplete " "option %s\n", arg); exit(EXIT_FAILURE); } } /* If -R or -S have been specified without -q, assume a single queue run. */ if ( (deliver_selectstring || deliver_selectstring_sender) && queue_interval < 0) queue_interval = 0; END_ARG: /* If usage_wanted is set we call the usage function - which never returns */ if (usage_wanted) exim_usage(called_as); /* Arguments have been processed. Check for incompatibilities. */ if (( (smtp_input || extract_recipients || recipients_arg < argc) && (daemon_listen || queue_interval >= 0 || bi_option || test_retry_arg >= 0 || test_rewrite_arg >= 0 || filter_test != FTEST_NONE || (msg_action_arg > 0 && !one_msg_action)) ) || ( msg_action_arg > 0 && (daemon_listen || queue_interval > 0 || list_options || (checking && msg_action != MSG_LOAD) || bi_option || test_retry_arg >= 0 || test_rewrite_arg >= 0) ) || ( (daemon_listen || queue_interval > 0) && (sender_address != NULL || list_options || list_queue || checking || bi_option) ) || ( daemon_listen && queue_interval == 0 ) || ( inetd_wait_mode && queue_interval >= 0 ) || ( list_options && (checking || smtp_input || extract_recipients || filter_test != FTEST_NONE || bi_option) ) || ( verify_address_mode && (address_test_mode || smtp_input || extract_recipients || filter_test != FTEST_NONE || bi_option) ) || ( address_test_mode && (smtp_input || extract_recipients || filter_test != FTEST_NONE || bi_option) ) || ( smtp_input && (sender_address != NULL || filter_test != FTEST_NONE || extract_recipients) ) || ( deliver_selectstring != NULL && queue_interval < 0 ) || ( msg_action == MSG_LOAD && (!expansion_test || expansion_test_message != NULL) ) ) { fprintf(stderr, "exim: incompatible command-line options or arguments\n"); exit(EXIT_FAILURE); } /* If debugging is set up, set the file and the file descriptor to pass on to child processes. It should, of course, be 2 for stderr. Also, force the daemon to run in the foreground. */ if (debug_selector != 0) { debug_file = stderr; debug_fd = fileno(debug_file); background_daemon = FALSE; if (running_in_test_harness) millisleep(100); /* lets caller finish */ if (debug_selector != D_v) /* -v only doesn't show this */ { debug_printf("Exim version %s uid=%ld gid=%ld pid=%d D=%x\n", version_string, (long int)real_uid, (long int)real_gid, (int)getpid(), debug_selector); if (!version_printed) show_whats_supported(stderr); } } /* When started with root privilege, ensure that the limits on the number of open files and the number of processes (where that is accessible) are sufficiently large, or are unset, in case Exim has been called from an environment where the limits are screwed down. Not all OS have the ability to change some of these limits. */ if (unprivileged) { DEBUG(D_any) debug_print_ids(US"Exim has no root privilege:"); } else { struct rlimit rlp; #ifdef RLIMIT_NOFILE if (getrlimit(RLIMIT_NOFILE, &rlp) < 0) { log_write(0, LOG_MAIN|LOG_PANIC, "getrlimit(RLIMIT_NOFILE) failed: %s", strerror(errno)); rlp.rlim_cur = rlp.rlim_max = 0; } /* I originally chose 1000 as a nice big number that was unlikely to be exceeded. It turns out that some older OS have a fixed upper limit of 256. */ if (rlp.rlim_cur < 1000) { rlp.rlim_cur = rlp.rlim_max = 1000; if (setrlimit(RLIMIT_NOFILE, &rlp) < 0) { rlp.rlim_cur = rlp.rlim_max = 256; if (setrlimit(RLIMIT_NOFILE, &rlp) < 0) log_write(0, LOG_MAIN|LOG_PANIC, "setrlimit(RLIMIT_NOFILE) failed: %s", strerror(errno)); } } #endif #ifdef RLIMIT_NPROC if (getrlimit(RLIMIT_NPROC, &rlp) < 0) { log_write(0, LOG_MAIN|LOG_PANIC, "getrlimit(RLIMIT_NPROC) failed: %s", strerror(errno)); rlp.rlim_cur = rlp.rlim_max = 0; } #ifdef RLIM_INFINITY if (rlp.rlim_cur != RLIM_INFINITY && rlp.rlim_cur < 1000) { rlp.rlim_cur = rlp.rlim_max = RLIM_INFINITY; #else if (rlp.rlim_cur < 1000) { rlp.rlim_cur = rlp.rlim_max = 1000; #endif if (setrlimit(RLIMIT_NPROC, &rlp) < 0) log_write(0, LOG_MAIN|LOG_PANIC, "setrlimit(RLIMIT_NPROC) failed: %s", strerror(errno)); } #endif } /* Exim is normally entered as root (but some special configurations are possible that don't do this). However, it always spins off sub-processes that set their uid and gid as required for local delivery. We don't want to pass on any extra groups that root may belong to, so we want to get rid of them all at this point. We need to obey setgroups() at this stage, before possibly giving up root privilege for a changed configuration file, but later on we might need to check on the additional groups for the admin user privilege - can't do that till after reading the config, which might specify the exim gid. Therefore, save the group list here first. */ group_count = getgroups(NGROUPS_MAX, group_list); if (group_count < 0) { fprintf(stderr, "exim: getgroups() failed: %s\n", strerror(errno)); exit(EXIT_FAILURE); } /* There is a fundamental difference in some BSD systems in the matter of groups. FreeBSD and BSDI are known to be different; NetBSD and OpenBSD are known not to be different. On the "different" systems there is a single group list, and the first entry in it is the current group. On all other versions of Unix there is a supplementary group list, which is in *addition* to the current group. Consequently, to get rid of all extraneous groups on a "standard" system you pass over 0 groups to setgroups(), while on a "different" system you pass over a single group - the current group, which is always the first group in the list. Calling setgroups() with zero groups on a "different" system results in an error return. The following code should cope with both types of system. However, if this process isn't running as root, setgroups() can't be used since you have to be root to run it, even if throwing away groups. Not being root here happens only in some unusual configurations. We just ignore the error. */ if (setgroups(0, NULL) != 0) { if (setgroups(1, group_list) != 0 && !unprivileged) { fprintf(stderr, "exim: setgroups() failed: %s\n", strerror(errno)); exit(EXIT_FAILURE); } } /* If the configuration file name has been altered by an argument on the command line (either a new file name or a macro definition) and the caller is not root, or if this is a filter testing run, remove any setuid privilege the program has and run as the underlying user. The exim user is locked out of this, which severely restricts the use of -C for some purposes. Otherwise, set the real ids to the effective values (should be root unless run from inetd, which it can either be root or the exim uid, if one is configured). There is a private mechanism for bypassing some of this, in order to make it possible to test lots of configurations automatically, without having either to recompile each time, or to patch in an actual configuration file name and other values (such as the path name). If running in the test harness, pretend that configuration file changes and macro definitions haven't happened. */ if (( /* EITHER */ (!trusted_config || /* Config changed, or */ !macros_trusted(opt_D_used)) && /* impermissible macros and */ real_uid != root_uid && /* Not root, and */ !running_in_test_harness /* Not fudged */ ) || /* OR */ expansion_test /* expansion testing */ || /* OR */ filter_test != FTEST_NONE) /* Filter testing */ { setgroups(group_count, group_list); exim_setugid(real_uid, real_gid, FALSE, US"-C, -D, -be or -bf forces real uid"); removed_privilege = TRUE; /* In the normal case when Exim is called like this, stderr is available and should be used for any logging information because attempts to write to the log will usually fail. To arrange this, we unset really_exim. However, if no stderr is available there is no point - we might as well have a go at the log (if it fails, syslog will be written). Note that if the invoker is Exim, the logs remain available. Messing with this causes unlogged successful deliveries. */ if ((log_stderr != NULL) && (real_uid != exim_uid)) really_exim = FALSE; } /* Privilege is to be retained for the moment. It may be dropped later, depending on the job that this Exim process has been asked to do. For now, set the real uid to the effective so that subsequent re-execs of Exim are done by a privileged user. */ else exim_setugid(geteuid(), getegid(), FALSE, US"forcing real = effective"); /* If testing a filter, open the file(s) now, before wasting time doing other setups and reading the message. */ if ((filter_test & FTEST_SYSTEM) != 0) { filter_sfd = Uopen(filter_test_sfile, O_RDONLY, 0); if (filter_sfd < 0) { fprintf(stderr, "exim: failed to open %s: %s\n", filter_test_sfile, strerror(errno)); return EXIT_FAILURE; } } if ((filter_test & FTEST_USER) != 0) { filter_ufd = Uopen(filter_test_ufile, O_RDONLY, 0); if (filter_ufd < 0) { fprintf(stderr, "exim: failed to open %s: %s\n", filter_test_ufile, strerror(errno)); return EXIT_FAILURE; } } /* Initialise lookup_list If debugging, already called above via version reporting. In either case, we initialise the list of available lookups while running as root. All dynamically modules are loaded from a directory which is hard-coded into the binary and is code which, if not a module, would be part of Exim already. Ability to modify the content of the directory is equivalent to the ability to modify a setuid binary! This needs to happen before we read the main configuration. */ init_lookup_list(); #ifdef SUPPORT_I18N if (running_in_test_harness) smtputf8_advertise_hosts = NULL; #endif /* Read the main runtime configuration data; this gives up if there is a failure. It leaves the configuration file open so that the subsequent configuration data for delivery can be read if needed. NOTE: immediatly after opening the configuration file we change the working directory to "/"! Later we change to $spool_directory. We do it there, because during readconf_main() some expansion takes place already. */ /* Store the initial cwd before we change directories */ if ((initial_cwd = os_getcwd(NULL, 0)) == NULL) { perror("exim: can't get the current working directory"); exit(EXIT_FAILURE); } /* checking: -be[m] expansion test - -b[fF] filter test new -bh[c] host test - -bmalware malware_test_file new -brt retry test new -brw rewrite test new -bt address test - -bv[s] address verify - list_options: -bP <option> (except -bP config, which sets list_config) If any of these options is set, we suppress warnings about configuration issues (currently about tls_advertise_hosts and keep_environment not being defined) */ readconf_main(checking || list_options); if (builtin_macros_create_trigger) DEBUG(D_any) debug_printf("Builtin macros created (expensive) due to config line '%.*s'\n", Ustrlen(builtin_macros_create_trigger)-1, builtin_macros_create_trigger); /* Now in directory "/" */ if (cleanup_environment() == FALSE) log_write(0, LOG_PANIC_DIE, "Can't cleanup environment"); /* If an action on specific messages is requested, or if a daemon or queue runner is being started, we need to know if Exim was called by an admin user. This is the case if the real user is root or exim, or if the real group is exim, or if one of the supplementary groups is exim or a group listed in admin_groups. We don't fail all message actions immediately if not admin_user, since some actions can be performed by non-admin users. Instead, set admin_user for later interrogation. */ if (real_uid == root_uid || real_uid == exim_uid || real_gid == exim_gid) admin_user = TRUE; else { int i, j; for (i = 0; i < group_count && !admin_user; i++) if (group_list[i] == exim_gid) admin_user = TRUE; else if (admin_groups) for (j = 1; j <= (int)admin_groups[0] && !admin_user; j++) if (admin_groups[j] == group_list[i]) admin_user = TRUE; } /* Another group of privileged users are the trusted users. These are root, exim, and any caller matching trusted_users or trusted_groups. Trusted callers are permitted to specify sender_addresses with -f on the command line, and other message parameters as well. */ if (real_uid == root_uid || real_uid == exim_uid) trusted_caller = TRUE; else { int i, j; if (trusted_users) for (i = 1; i <= (int)trusted_users[0] && !trusted_caller; i++) if (trusted_users[i] == real_uid) trusted_caller = TRUE; if (trusted_groups) for (i = 1; i <= (int)trusted_groups[0] && !trusted_caller; i++) if (trusted_groups[i] == real_gid) trusted_caller = TRUE; else for (j = 0; j < group_count && !trusted_caller; j++) if (trusted_groups[i] == group_list[j]) trusted_caller = TRUE; } /* At this point, we know if the user is privileged and some command-line options become possibly imperssible, depending upon the configuration file. */ if (checking && commandline_checks_require_admin && !admin_user) { fprintf(stderr, "exim: those command-line flags are set to require admin\n"); exit(EXIT_FAILURE); } /* Handle the decoding of logging options. */ decode_bits(log_selector, log_selector_size, log_notall, log_selector_string, log_options, log_options_count, US"log", 0); DEBUG(D_any) { int i; debug_printf("configuration file is %s\n", config_main_filename); debug_printf("log selectors ="); for (i = 0; i < log_selector_size; i++) debug_printf(" %08x", log_selector[i]); debug_printf("\n"); } /* If domain literals are not allowed, check the sender address that was supplied with -f. Ditto for a stripped trailing dot. */ if (sender_address != NULL) { if (sender_address[sender_address_domain] == '[' && !allow_domain_literals) { fprintf(stderr, "exim: bad -f address \"%s\": domain literals not " "allowed\n", sender_address); return EXIT_FAILURE; } if (f_end_dot && !strip_trailing_dot) { fprintf(stderr, "exim: bad -f address \"%s.\": domain is malformed " "(trailing dot not allowed)\n", sender_address); return EXIT_FAILURE; } } /* See if an admin user overrode our logging. */ if (cmdline_syslog_name != NULL) { if (admin_user) { syslog_processname = cmdline_syslog_name; log_file_path = string_copy(CUS"syslog"); } else { /* not a panic, non-privileged users should not be able to spam paniclog */ fprintf(stderr, "exim: you lack sufficient privilege to specify syslog process name\n"); return EXIT_FAILURE; } } /* Paranoia check of maximum lengths of certain strings. There is a check on the length of the log file path in log.c, which will come into effect if there are any calls to write the log earlier than this. However, if we get this far but the string is very long, it is better to stop now than to carry on and (e.g.) receive a message and then have to collapse. The call to log_write() from here will cause the ultimate panic collapse if the complete file name exceeds the buffer length. */ if (Ustrlen(log_file_path) > 200) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "log_file_path is longer than 200 chars: aborting"); if (Ustrlen(pid_file_path) > 200) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "pid_file_path is longer than 200 chars: aborting"); if (Ustrlen(spool_directory) > 200) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "spool_directory is longer than 200 chars: aborting"); /* Length check on the process name given to syslog for its TAG field, which is only permitted to be 32 characters or less. See RFC 3164. */ if (Ustrlen(syslog_processname) > 32) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "syslog_processname is longer than 32 chars: aborting"); if (log_oneline) if (admin_user) { log_write(0, LOG_MAIN, "%s", log_oneline); return EXIT_SUCCESS; } else return EXIT_FAILURE; /* In some operating systems, the environment variable TMPDIR controls where temporary files are created; Exim doesn't use these (apart from when delivering to MBX mailboxes), but called libraries such as DBM libraries may require them. If TMPDIR is found in the environment, reset it to the value defined in the EXIM_TMPDIR macro, if this macro is defined. For backward compatibility this macro may be called TMPDIR in old "Local/Makefile"s. It's converted to EXIM_TMPDIR by the build scripts. */ #ifdef EXIM_TMPDIR { uschar **p; if (environ) for (p = USS environ; *p; p++) if (Ustrncmp(*p, "TMPDIR=", 7) == 0 && Ustrcmp(*p+7, EXIM_TMPDIR) != 0) { uschar * newp = store_malloc(Ustrlen(EXIM_TMPDIR) + 8); sprintf(CS newp, "TMPDIR=%s", EXIM_TMPDIR); *p = newp; DEBUG(D_any) debug_printf("reset TMPDIR=%s in environment\n", EXIM_TMPDIR); } } #endif /* Timezone handling. If timezone_string is "utc", set a flag to cause all timestamps to be in UTC (gmtime() is used instead of localtime()). Otherwise, we may need to get rid of a bogus timezone setting. This can arise when Exim is called by a user who has set the TZ variable. This then affects the timestamps in log files and in Received: headers, and any created Date: header lines. The required timezone is settable in the configuration file, so nothing can be done about this earlier - but hopefully nothing will normally be logged earlier than this. We have to make a new environment if TZ is wrong, but don't bother if timestamps_utc is set, because then all times are in UTC anyway. */ if (timezone_string && strcmpic(timezone_string, US"UTC") == 0) timestamps_utc = TRUE; else { uschar *envtz = US getenv("TZ"); if (envtz ? !timezone_string || Ustrcmp(timezone_string, envtz) != 0 : timezone_string != NULL ) { uschar **p = USS environ; uschar **new; uschar **newp; int count = 0; if (environ) while (*p++) count++; if (!envtz) count++; newp = new = store_malloc(sizeof(uschar *) * (count + 1)); if (environ) for (p = USS environ; *p; p++) if (Ustrncmp(*p, "TZ=", 3) != 0) *newp++ = *p; if (timezone_string) { *newp = store_malloc(Ustrlen(timezone_string) + 4); sprintf(CS *newp++, "TZ=%s", timezone_string); } *newp = NULL; environ = CSS new; tzset(); DEBUG(D_any) debug_printf("Reset TZ to %s: time is %s\n", timezone_string, tod_stamp(tod_log)); } } /* Handle the case when we have removed the setuid privilege because of -C or -D. This means that the caller of Exim was not root. There is a problem if we were running as the Exim user. The sysadmin may expect this case to retain privilege because "the binary was called by the Exim user", but it hasn't, because either the -D option set macros, or the -C option set a non-trusted configuration file. There are two possibilities: (1) If deliver_drop_privilege is set, Exim is not going to re-exec in order to do message deliveries. Thus, the fact that it is running as a non-privileged user is plausible, and might be wanted in some special configurations. However, really_exim will have been set false when privilege was dropped, to stop Exim trying to write to its normal log files. Therefore, re-enable normal log processing, assuming the sysadmin has set up the log directory correctly. (2) If deliver_drop_privilege is not set, the configuration won't work as apparently intended, and so we log a panic message. In order to retain root for -C or -D, the caller must either be root or be invoking a trusted configuration file (when deliver_drop_privilege is false). */ if ( removed_privilege && (!trusted_config || opt_D_used) && real_uid == exim_uid) if (deliver_drop_privilege) really_exim = TRUE; /* let logging work normally */ else log_write(0, LOG_MAIN|LOG_PANIC, "exim user lost privilege for using %s option", trusted_config? "-D" : "-C"); /* Start up Perl interpreter if Perl support is configured and there is a perl_startup option, and the configuration or the command line specifies initializing starting. Note that the global variables are actually called opt_perl_xxx to avoid clashing with perl's namespace (perl_*). */ #ifdef EXIM_PERL if (perl_start_option != 0) opt_perl_at_start = (perl_start_option > 0); if (opt_perl_at_start && opt_perl_startup != NULL) { uschar *errstr; DEBUG(D_any) debug_printf("Starting Perl interpreter\n"); errstr = init_perl(opt_perl_startup); if (errstr != NULL) { fprintf(stderr, "exim: error in perl_startup code: %s\n", errstr); return EXIT_FAILURE; } opt_perl_started = TRUE; } #endif /* EXIM_PERL */ /* Log the arguments of the call if the configuration file said so. This is a debugging feature for finding out what arguments certain MUAs actually use. Don't attempt it if logging is disabled, or if listing variables or if verifying/testing addresses or expansions. */ if (((debug_selector & D_any) != 0 || LOGGING(arguments)) && really_exim && !list_options && !checking) { int i; uschar *p = big_buffer; Ustrcpy(p, "cwd= (failed)"); Ustrncpy(p + 4, initial_cwd, big_buffer_size-5); while (*p) p++; (void)string_format(p, big_buffer_size - (p - big_buffer), " %d args:", argc); while (*p) p++; for (i = 0; i < argc; i++) { int len = Ustrlen(argv[i]); const uschar *printing; uschar *quote; if (p + len + 8 >= big_buffer + big_buffer_size) { Ustrcpy(p, " ..."); log_write(0, LOG_MAIN, "%s", big_buffer); Ustrcpy(big_buffer, "..."); p = big_buffer + 3; } printing = string_printing(argv[i]); if (printing[0] == 0) quote = US"\""; else { const uschar *pp = printing; quote = US""; while (*pp != 0) if (isspace(*pp++)) { quote = US"\""; break; } } sprintf(CS p, " %s%.*s%s", quote, (int)(big_buffer_size - (p - big_buffer) - 4), printing, quote); while (*p) p++; } if (LOGGING(arguments)) log_write(0, LOG_MAIN, "%s", big_buffer); else debug_printf("%s\n", big_buffer); } /* Set the working directory to be the top-level spool directory. We don't rely on this in the code, which always uses fully qualified names, but it's useful for core dumps etc. Don't complain if it fails - the spool directory might not be generally accessible and calls with the -C option (and others) have lost privilege by now. Before the chdir, we try to ensure that the directory exists. */ if (Uchdir(spool_directory) != 0) { int dummy; (void)directory_make(spool_directory, US"", SPOOL_DIRECTORY_MODE, FALSE); dummy = /* quieten compiler */ Uchdir(spool_directory); } /* Handle calls with the -bi option. This is a sendmail option to rebuild *the* alias file. Exim doesn't have such a concept, but this call is screwed into Sun's YP makefiles. Handle this by calling a configured script, as the real user who called Exim. The -oA option can be used to pass an argument to the script. */ if (bi_option) { (void)fclose(config_file); if (bi_command != NULL) { int i = 0; uschar *argv[3]; argv[i++] = bi_command; if (alias_arg != NULL) argv[i++] = alias_arg; argv[i++] = NULL; setgroups(group_count, group_list); exim_setugid(real_uid, real_gid, FALSE, US"running bi_command"); DEBUG(D_exec) debug_printf("exec %.256s %.256s\n", argv[0], (argv[1] == NULL)? US"" : argv[1]); execv(CS argv[0], (char *const *)argv); fprintf(stderr, "exim: exec failed: %s\n", strerror(errno)); exit(EXIT_FAILURE); } else { DEBUG(D_any) debug_printf("-bi used but bi_command not set; exiting\n"); exit(EXIT_SUCCESS); } } /* We moved the admin/trusted check to be immediately after reading the configuration file. We leave these prints here to ensure that syslog setup, logfile setup, and so on has already happened. */ if (trusted_caller) DEBUG(D_any) debug_printf("trusted user\n"); if (admin_user) DEBUG(D_any) debug_printf("admin user\n"); /* Only an admin user may start the daemon or force a queue run in the default configuration, but the queue run restriction can be relaxed. Only an admin user may request that a message be returned to its sender forthwith. Only an admin user may specify a debug level greater than D_v (because it might show passwords, etc. in lookup queries). Only an admin user may request a queue count. Only an admin user can use the test interface to scan for email (because Exim will be in the spool dir and able to look at mails). */ if (!admin_user) { BOOL debugset = (debug_selector & ~D_v) != 0; if (deliver_give_up || daemon_listen || malware_test_file || (count_queue && queue_list_requires_admin) || (list_queue && queue_list_requires_admin) || (queue_interval >= 0 && prod_requires_admin) || (debugset && !running_in_test_harness)) { fprintf(stderr, "exim:%s permission denied\n", debugset? " debugging" : ""); exit(EXIT_FAILURE); } } /* If the real user is not root or the exim uid, the argument for passing in an open TCP/IP connection for another message is not permitted, nor is running with the -N option for any delivery action, unless this call to exim is one that supplied an input message, or we are using a patched exim for regression testing. */ if (real_uid != root_uid && real_uid != exim_uid && (continue_hostname != NULL || (dont_deliver && (queue_interval >= 0 || daemon_listen || msg_action_arg > 0) )) && !running_in_test_harness) { fprintf(stderr, "exim: Permission denied\n"); return EXIT_FAILURE; } /* If the caller is not trusted, certain arguments are ignored when running for real, but are permitted when checking things (-be, -bv, -bt, -bh, -bf, -bF). Note that authority for performing certain actions on messages is tested in the queue_action() function. */ if (!trusted_caller && !checking) { sender_host_name = sender_host_address = interface_address = sender_ident = received_protocol = NULL; sender_host_port = interface_port = 0; sender_host_authenticated = authenticated_sender = authenticated_id = NULL; } /* If a sender host address is set, extract the optional port number off the end of it and check its syntax. Do the same thing for the interface address. Exim exits if the syntax is bad. */ else { if (sender_host_address != NULL) sender_host_port = check_port(sender_host_address); if (interface_address != NULL) interface_port = check_port(interface_address); } /* If the caller is trusted, then they can use -G to suppress_local_fixups. */ if (flag_G) { if (trusted_caller) { suppress_local_fixups = suppress_local_fixups_default = TRUE; DEBUG(D_acl) debug_printf("suppress_local_fixups forced on by -G\n"); } else { fprintf(stderr, "exim: permission denied (-G requires a trusted user)\n"); return EXIT_FAILURE; } } /* If an SMTP message is being received check to see if the standard input is a TCP/IP socket. If it is, we assume that Exim was called from inetd if the caller is root or the Exim user, or if the port is a privileged one. Otherwise, barf. */ if (smtp_input) { union sockaddr_46 inetd_sock; EXIM_SOCKLEN_T size = sizeof(inetd_sock); if (getpeername(0, (struct sockaddr *)(&inetd_sock), &size) == 0) { int family = ((struct sockaddr *)(&inetd_sock))->sa_family; if (family == AF_INET || family == AF_INET6) { union sockaddr_46 interface_sock; size = sizeof(interface_sock); if (getsockname(0, (struct sockaddr *)(&interface_sock), &size) == 0) interface_address = host_ntoa(-1, &interface_sock, NULL, &interface_port); if (host_is_tls_on_connect_port(interface_port)) tls_in.on_connect = TRUE; if (real_uid == root_uid || real_uid == exim_uid || interface_port < 1024) { is_inetd = TRUE; sender_host_address = host_ntoa(-1, (struct sockaddr *)(&inetd_sock), NULL, &sender_host_port); if (mua_wrapper) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Input from " "inetd is not supported when mua_wrapper is set"); } else { fprintf(stderr, "exim: Permission denied (unprivileged user, unprivileged port)\n"); return EXIT_FAILURE; } } } } /* If the load average is going to be needed while receiving a message, get it now for those OS that require the first call to os_getloadavg() to be done as root. There will be further calls later for each message received. */ #ifdef LOAD_AVG_NEEDS_ROOT if (receiving_message && (queue_only_load >= 0 || (is_inetd && smtp_load_reserve >= 0) )) { load_average = OS_GETLOADAVG(); } #endif /* The queue_only configuration option can be overridden by -odx on the command line, except that if queue_only_override is false, queue_only cannot be unset from the command line. */ if (queue_only_set && (queue_only_override || arg_queue_only)) queue_only = arg_queue_only; /* The receive_timeout and smtp_receive_timeout options can be overridden by -or and -os. */ if (arg_receive_timeout >= 0) receive_timeout = arg_receive_timeout; if (arg_smtp_receive_timeout >= 0) smtp_receive_timeout = arg_smtp_receive_timeout; /* If Exim was started with root privilege, unless we have already removed the root privilege above as a result of -C, -D, -be, -bf or -bF, remove it now except when starting the daemon or doing some kind of delivery or address testing (-bt). These are the only cases when root need to be retained. We run as exim for -bv and -bh. However, if deliver_drop_privilege is set, root is retained only for starting the daemon. We always do the initgroups() in this situation (controlled by the TRUE below), in order to be as close as possible to the state Exim usually runs in. */ if (!unprivileged && /* originally had root AND */ !removed_privilege && /* still got root AND */ !daemon_listen && /* not starting the daemon */ queue_interval <= 0 && /* (either kind of daemon) */ ( /* AND EITHER */ deliver_drop_privilege || /* requested unprivileged */ ( /* OR */ queue_interval < 0 && /* not running the queue */ (msg_action_arg < 0 || /* and */ msg_action != MSG_DELIVER) && /* not delivering and */ (!checking || !address_test_mode) /* not address checking */ ) ) ) exim_setugid(exim_uid, exim_gid, TRUE, US"privilege not needed"); /* When we are retaining a privileged uid, we still change to the exim gid. */ else { int rv; rv = setgid(exim_gid); /* Impact of failure is that some stuff might end up with an incorrect group. We track this for failures from root, since any attempt to change privilege by root should succeed and failures should be examined. For non-root, there's no security risk. For me, it's { exim -bV } on a just-built binary, no need to complain then. */ if (rv == -1) if (!(unprivileged || removed_privilege)) { fprintf(stderr, "exim: changing group failed: %s\n", strerror(errno)); exit(EXIT_FAILURE); } else DEBUG(D_any) debug_printf("changing group to %ld failed: %s\n", (long int)exim_gid, strerror(errno)); } /* Handle a request to scan a file for malware */ if (malware_test_file) { #ifdef WITH_CONTENT_SCAN int result; set_process_info("scanning file for malware"); result = malware_in_file(malware_test_file); if (result == FAIL) { printf("No malware found.\n"); exit(EXIT_SUCCESS); } if (result != OK) { printf("Malware lookup returned non-okay/fail: %d\n", result); exit(EXIT_FAILURE); } if (malware_name) printf("Malware found: %s\n", malware_name); else printf("Malware scan detected malware of unknown name.\n"); #else printf("Malware scanning not enabled at compile time.\n"); #endif exit(EXIT_FAILURE); } /* Handle a request to list the delivery queue */ if (list_queue) { set_process_info("listing the queue"); queue_list(list_queue_option, argv + recipients_arg, argc - recipients_arg); exit(EXIT_SUCCESS); } /* Handle a request to count the delivery queue */ if (count_queue) { set_process_info("counting the queue"); queue_count(); exit(EXIT_SUCCESS); } /* Handle actions on specific messages, except for the force delivery and message load actions, which are done below. Some actions take a whole list of message ids, which are known to continue up to the end of the arguments. Others take a single message id and then operate on the recipients list. */ if (msg_action_arg > 0 && msg_action != MSG_DELIVER && msg_action != MSG_LOAD) { int yield = EXIT_SUCCESS; set_process_info("acting on specified messages"); if (!one_msg_action) { for (i = msg_action_arg; i < argc; i++) if (!queue_action(argv[i], msg_action, NULL, 0, 0)) yield = EXIT_FAILURE; } else if (!queue_action(argv[msg_action_arg], msg_action, argv, argc, recipients_arg)) yield = EXIT_FAILURE; exit(yield); } /* We used to set up here to skip reading the ACL section, on (msg_action_arg > 0 || (queue_interval == 0 && !daemon_listen) Now, since the intro of the ${acl } expansion, ACL definitions may be needed in transports so we lost the optimisation. */ readconf_rest(); /* The configuration data will have been read into POOL_PERM because we won't ever want to reset back past it. Change the current pool to POOL_MAIN. In fact, this is just a bit of pedantic tidiness. It wouldn't really matter if the configuration were read into POOL_MAIN, because we don't do any resets till later on. However, it seems right, and it does ensure that both pools get used. */ store_pool = POOL_MAIN; /* Handle the -brt option. This is for checking out retry configurations. The next three arguments are a domain name or a complete address, and optionally two error numbers. All it does is to call the function that scans the retry configuration data. */ if (test_retry_arg >= 0) { retry_config *yield; int basic_errno = 0; int more_errno = 0; uschar *s1, *s2; if (test_retry_arg >= argc) { printf("-brt needs a domain or address argument\n"); exim_exit(EXIT_FAILURE); } s1 = argv[test_retry_arg++]; s2 = NULL; /* If the first argument contains no @ and no . it might be a local user or it might be a single-component name. Treat as a domain. */ if (Ustrchr(s1, '@') == NULL && Ustrchr(s1, '.') == NULL) { printf("Warning: \"%s\" contains no '@' and no '.' characters. It is " "being \ntreated as a one-component domain, not as a local part.\n\n", s1); } /* There may be an optional second domain arg. */ if (test_retry_arg < argc && Ustrchr(argv[test_retry_arg], '.') != NULL) s2 = argv[test_retry_arg++]; /* The final arg is an error name */ if (test_retry_arg < argc) { uschar *ss = argv[test_retry_arg]; uschar *error = readconf_retry_error(ss, ss + Ustrlen(ss), &basic_errno, &more_errno); if (error != NULL) { printf("%s\n", CS error); return EXIT_FAILURE; } /* For the {MAIL,RCPT,DATA}_4xx errors, a value of 255 means "any", and a code > 100 as an error is for matching codes to the decade. Turn them into a real error code, off the decade. */ if (basic_errno == ERRNO_MAIL4XX || basic_errno == ERRNO_RCPT4XX || basic_errno == ERRNO_DATA4XX) { int code = (more_errno >> 8) & 255; if (code == 255) more_errno = (more_errno & 0xffff00ff) | (21 << 8); else if (code > 100) more_errno = (more_errno & 0xffff00ff) | ((code - 96) << 8); } } yield = retry_find_config(s1, s2, basic_errno, more_errno); if (yield == NULL) printf("No retry information found\n"); else { retry_rule *r; more_errno = yield->more_errno; printf("Retry rule: %s ", yield->pattern); if (yield->basic_errno == ERRNO_EXIMQUOTA) { printf("quota%s%s ", (more_errno > 0)? "_" : "", (more_errno > 0)? readconf_printtime(more_errno) : US""); } else if (yield->basic_errno == ECONNREFUSED) { printf("refused%s%s ", (more_errno > 0)? "_" : "", (more_errno == 'M')? "MX" : (more_errno == 'A')? "A" : ""); } else if (yield->basic_errno == ETIMEDOUT) { printf("timeout"); if ((more_errno & RTEF_CTOUT) != 0) printf("_connect"); more_errno &= 255; if (more_errno != 0) printf("_%s", (more_errno == 'M')? "MX" : "A"); printf(" "); } else if (yield->basic_errno == ERRNO_AUTHFAIL) printf("auth_failed "); else printf("* "); for (r = yield->rules; r != NULL; r = r->next) { printf("%c,%s", r->rule, readconf_printtime(r->timeout)); /* Do not */ printf(",%s", readconf_printtime(r->p1)); /* amalgamate */ if (r->rule == 'G') { int x = r->p2; int f = x % 1000; int d = 100; printf(",%d.", x/1000); do { printf("%d", f/d); f %= d; d /= 10; } while (f != 0); } printf("; "); } printf("\n"); } exim_exit(EXIT_SUCCESS); } /* Handle a request to list one or more configuration options */ /* If -n was set, we suppress some information */ if (list_options) { set_process_info("listing variables"); if (recipients_arg >= argc) readconf_print(US"all", NULL, flag_n); else for (i = recipients_arg; i < argc; i++) { if (i < argc - 1 && (Ustrcmp(argv[i], "router") == 0 || Ustrcmp(argv[i], "transport") == 0 || Ustrcmp(argv[i], "authenticator") == 0 || Ustrcmp(argv[i], "macro") == 0 || Ustrcmp(argv[i], "environment") == 0)) { readconf_print(argv[i+1], argv[i], flag_n); i++; } else readconf_print(argv[i], NULL, flag_n); } exim_exit(EXIT_SUCCESS); } if (list_config) { set_process_info("listing config"); readconf_print(US"config", NULL, flag_n); exim_exit(EXIT_SUCCESS); } /* Initialise subsystems as required */ #ifndef DISABLE_DKIM dkim_exim_init(); #endif deliver_init(); /* Handle a request to deliver one or more messages that are already on the queue. Values of msg_action other than MSG_DELIVER and MSG_LOAD are dealt with above. MSG_LOAD is handled with -be (which is the only time it applies) below. Delivery of specific messages is typically used for a small number when prodding by hand (when the option forced_delivery will be set) or when re-execing to regain root privilege. Each message delivery must happen in a separate process, so we fork a process for each one, and run them sequentially so that debugging output doesn't get intertwined, and to avoid spawning too many processes if a long list is given. However, don't fork for the last one; this saves a process in the common case when Exim is called to deliver just one message. */ if (msg_action_arg > 0 && msg_action != MSG_LOAD) { if (prod_requires_admin && !admin_user) { fprintf(stderr, "exim: Permission denied\n"); exim_exit(EXIT_FAILURE); } set_process_info("delivering specified messages"); if (deliver_give_up) forced_delivery = deliver_force_thaw = TRUE; for (i = msg_action_arg; i < argc; i++) { int status; pid_t pid; if (i == argc - 1) (void)deliver_message(argv[i], forced_delivery, deliver_give_up); else if ((pid = fork()) == 0) { (void)deliver_message(argv[i], forced_delivery, deliver_give_up); _exit(EXIT_SUCCESS); } else if (pid < 0) { fprintf(stderr, "failed to fork delivery process for %s: %s\n", argv[i], strerror(errno)); exim_exit(EXIT_FAILURE); } else wait(&status); } exim_exit(EXIT_SUCCESS); } /* If only a single queue run is requested, without SMTP listening, we can just turn into a queue runner, with an optional starting message id. */ if (queue_interval == 0 && !daemon_listen) { DEBUG(D_queue_run) debug_printf("Single queue run%s%s%s%s\n", (start_queue_run_id == NULL)? US"" : US" starting at ", (start_queue_run_id == NULL)? US"" : start_queue_run_id, (stop_queue_run_id == NULL)? US"" : US" stopping at ", (stop_queue_run_id == NULL)? US"" : stop_queue_run_id); if (*queue_name) set_process_info("running the '%s' queue (single queue run)", queue_name); else set_process_info("running the queue (single queue run)"); queue_run(start_queue_run_id, stop_queue_run_id, FALSE); exim_exit(EXIT_SUCCESS); } /* Find the login name of the real user running this process. This is always needed when receiving a message, because it is written into the spool file. It may also be used to construct a from: or a sender: header, and in this case we need the user's full name as well, so save a copy of it, checked for RFC822 syntax and munged if necessary, if it hasn't previously been set by the -F argument. We may try to get the passwd entry more than once, in case NIS or other delays are in evidence. Save the home directory for use in filter testing (only). */ for (i = 0;;) { if ((pw = getpwuid(real_uid)) != NULL) { originator_login = string_copy(US pw->pw_name); originator_home = string_copy(US pw->pw_dir); /* If user name has not been set by -F, set it from the passwd entry unless -f has been used to set the sender address by a trusted user. */ if (originator_name == NULL) { if (sender_address == NULL || (!trusted_caller && filter_test == FTEST_NONE)) { uschar *name = US pw->pw_gecos; uschar *amp = Ustrchr(name, '&'); uschar buffer[256]; /* Most Unix specify that a '&' character in the gecos field is replaced by a copy of the login name, and some even specify that the first character should be upper cased, so that's what we do. */ if (amp != NULL) { int loffset; string_format(buffer, sizeof(buffer), "%.*s%n%s%s", amp - name, name, &loffset, originator_login, amp + 1); buffer[loffset] = toupper(buffer[loffset]); name = buffer; } /* If a pattern for matching the gecos field was supplied, apply it and then expand the name string. */ if (gecos_pattern != NULL && gecos_name != NULL) { const pcre *re; re = regex_must_compile(gecos_pattern, FALSE, TRUE); /* Use malloc */ if (regex_match_and_setup(re, name, 0, -1)) { uschar *new_name = expand_string(gecos_name); expand_nmax = -1; if (new_name != NULL) { DEBUG(D_receive) debug_printf("user name \"%s\" extracted from " "gecos field \"%s\"\n", new_name, name); name = new_name; } else DEBUG(D_receive) debug_printf("failed to expand gecos_name string " "\"%s\": %s\n", gecos_name, expand_string_message); } else DEBUG(D_receive) debug_printf("gecos_pattern \"%s\" did not match " "gecos field \"%s\"\n", gecos_pattern, name); store_free((void *)re); } originator_name = string_copy(name); } /* A trusted caller has used -f but not -F */ else originator_name = US""; } /* Break the retry loop */ break; } if (++i > finduser_retries) break; sleep(1); } /* If we cannot get a user login, log the incident and give up, unless the configuration specifies something to use. When running in the test harness, any setting of unknown_login overrides the actual name. */ if (originator_login == NULL || running_in_test_harness) { if (unknown_login != NULL) { originator_login = expand_string(unknown_login); if (originator_name == NULL && unknown_username != NULL) originator_name = expand_string(unknown_username); if (originator_name == NULL) originator_name = US""; } if (originator_login == NULL) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Failed to get user name for uid %d", (int)real_uid); } /* Ensure that the user name is in a suitable form for use as a "phrase" in an RFC822 address.*/ originator_name = string_copy(parse_fix_phrase(originator_name, Ustrlen(originator_name), big_buffer, big_buffer_size)); /* If a message is created by this call of Exim, the uid/gid of its originator are those of the caller. These values are overridden if an existing message is read in from the spool. */ originator_uid = real_uid; originator_gid = real_gid; DEBUG(D_receive) debug_printf("originator: uid=%d gid=%d login=%s name=%s\n", (int)originator_uid, (int)originator_gid, originator_login, originator_name); /* Run in daemon and/or queue-running mode. The function daemon_go() never returns. We leave this till here so that the originator_ fields are available for incoming messages via the daemon. The daemon cannot be run in mua_wrapper mode. */ if (daemon_listen || inetd_wait_mode || queue_interval > 0) { if (mua_wrapper) { fprintf(stderr, "Daemon cannot be run when mua_wrapper is set\n"); log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Daemon cannot be run when " "mua_wrapper is set"); } daemon_go(); } /* If the sender ident has not been set (by a trusted caller) set it to the caller. This will get overwritten below for an inetd call. If a trusted caller has set it empty, unset it. */ if (sender_ident == NULL) sender_ident = originator_login; else if (sender_ident[0] == 0) sender_ident = NULL; /* Handle the -brw option, which is for checking out rewriting rules. Cause log writes (on errors) to go to stderr instead. Can't do this earlier, as want the originator_* variables set. */ if (test_rewrite_arg >= 0) { really_exim = FALSE; if (test_rewrite_arg >= argc) { printf("-brw needs an address argument\n"); exim_exit(EXIT_FAILURE); } rewrite_test(argv[test_rewrite_arg]); exim_exit(EXIT_SUCCESS); } /* A locally-supplied message is considered to be coming from a local user unless a trusted caller supplies a sender address with -f, or is passing in the message via SMTP (inetd invocation or otherwise). */ if ((sender_address == NULL && !smtp_input) || (!trusted_caller && filter_test == FTEST_NONE)) { sender_local = TRUE; /* A trusted caller can supply authenticated_sender and authenticated_id via -oMas and -oMai and if so, they will already be set. Otherwise, force defaults except when host checking. */ if (authenticated_sender == NULL && !host_checking) authenticated_sender = string_sprintf("%s@%s", originator_login, qualify_domain_sender); if (authenticated_id == NULL && !host_checking) authenticated_id = originator_login; } /* Trusted callers are always permitted to specify the sender address. Untrusted callers may specify it if it matches untrusted_set_sender, or if what is specified is the empty address. However, if a trusted caller does not specify a sender address for SMTP input, we leave sender_address unset. This causes the MAIL commands to be honoured. */ if ((!smtp_input && sender_address == NULL) || !receive_check_set_sender(sender_address)) { /* Either the caller is not permitted to set a general sender, or this is non-SMTP input and the trusted caller has not set a sender. If there is no sender, or if a sender other than <> is set, override with the originator's login (which will get qualified below), except when checking things. */ if (sender_address == NULL /* No sender_address set */ || /* OR */ (sender_address[0] != 0 && /* Non-empty sender address, AND */ !checking)) /* Not running tests, including filter tests */ { sender_address = originator_login; sender_address_forced = FALSE; sender_address_domain = 0; } } /* Remember whether an untrusted caller set the sender address */ sender_set_untrusted = sender_address != originator_login && !trusted_caller; /* Ensure that the sender address is fully qualified unless it is the empty address, which indicates an error message, or doesn't exist (root caller, smtp interface, no -f argument). */ if (sender_address != NULL && sender_address[0] != 0 && sender_address_domain == 0) sender_address = string_sprintf("%s@%s", local_part_quote(sender_address), qualify_domain_sender); DEBUG(D_receive) debug_printf("sender address = %s\n", sender_address); /* Handle a request to verify a list of addresses, or test them for delivery. This must follow the setting of the sender address, since routers can be predicated upon the sender. If no arguments are given, read addresses from stdin. Set debug_level to at least D_v to get full output for address testing. */ if (verify_address_mode || address_test_mode) { int exit_value = 0; int flags = vopt_qualify; if (verify_address_mode) { if (!verify_as_sender) flags |= vopt_is_recipient; DEBUG(D_verify) debug_print_ids(US"Verifying:"); } else { flags |= vopt_is_recipient; debug_selector |= D_v; debug_file = stderr; debug_fd = fileno(debug_file); DEBUG(D_verify) debug_print_ids(US"Address testing:"); } if (recipients_arg < argc) { while (recipients_arg < argc) { uschar *s = argv[recipients_arg++]; while (*s != 0) { BOOL finished = FALSE; uschar *ss = parse_find_address_end(s, FALSE); if (*ss == ',') *ss = 0; else finished = TRUE; test_address(s, flags, &exit_value); s = ss; if (!finished) while (*(++s) != 0 && (*s == ',' || isspace(*s))); } } } else for (;;) { uschar *s = get_stdinput(NULL, NULL); if (s == NULL) break; test_address(s, flags, &exit_value); } route_tidyup(); exim_exit(exit_value); } /* Handle expansion checking. Either expand items on the command line, or read from stdin if there aren't any. If -Mset was specified, load the message so that its variables can be used, but restrict this facility to admin users. Otherwise, if -bem was used, read a message from stdin. */ if (expansion_test) { dns_init(FALSE, FALSE, FALSE); if (msg_action_arg > 0 && msg_action == MSG_LOAD) { uschar spoolname[256]; /* Not big_buffer; used in spool_read_header() */ if (!admin_user) { fprintf(stderr, "exim: permission denied\n"); exit(EXIT_FAILURE); } message_id = argv[msg_action_arg]; (void)string_format(spoolname, sizeof(spoolname), "%s-H", message_id); if ((deliver_datafile = spool_open_datafile(message_id)) < 0) printf ("Failed to load message datafile %s\n", message_id); if (spool_read_header(spoolname, TRUE, FALSE) != spool_read_OK) printf ("Failed to load message %s\n", message_id); } /* Read a test message from a file. We fudge it up to be on stdin, saving stdin itself for later reading of expansion strings. */ else if (expansion_test_message != NULL) { int save_stdin = dup(0); int fd = Uopen(expansion_test_message, O_RDONLY, 0); if (fd < 0) { fprintf(stderr, "exim: failed to open %s: %s\n", expansion_test_message, strerror(errno)); return EXIT_FAILURE; } (void) dup2(fd, 0); filter_test = FTEST_USER; /* Fudge to make it look like filter test */ message_ended = END_NOTENDED; read_message_body(receive_msg(extract_recipients)); message_linecount += body_linecount; (void)dup2(save_stdin, 0); (void)close(save_stdin); clearerr(stdin); /* Required by Darwin */ } /* Allow $recipients for this testing */ enable_dollar_recipients = TRUE; /* Expand command line items */ if (recipients_arg < argc) { while (recipients_arg < argc) { uschar *s = argv[recipients_arg++]; uschar *ss = expand_string(s); if (ss == NULL) printf ("Failed: %s\n", expand_string_message); else printf("%s\n", CS ss); } } /* Read stdin */ else { char *(*fn_readline)(const char *) = NULL; void (*fn_addhist)(const char *) = NULL; #ifdef USE_READLINE void *dlhandle = set_readline(&fn_readline, &fn_addhist); #endif for (;;) { uschar *ss; uschar *source = get_stdinput(fn_readline, fn_addhist); if (source == NULL) break; ss = expand_string(source); if (ss == NULL) printf ("Failed: %s\n", expand_string_message); else printf("%s\n", CS ss); } #ifdef USE_READLINE if (dlhandle != NULL) dlclose(dlhandle); #endif } /* The data file will be open after -Mset */ if (deliver_datafile >= 0) { (void)close(deliver_datafile); deliver_datafile = -1; } exim_exit(EXIT_SUCCESS); } /* The active host name is normally the primary host name, but it can be varied for hosts that want to play several parts at once. We need to ensure that it is set for host checking, and for receiving messages. */ smtp_active_hostname = primary_hostname; if (raw_active_hostname != NULL) { uschar *nah = expand_string(raw_active_hostname); if (nah == NULL) { if (!expand_string_forcedfail) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to expand \"%s\" " "(smtp_active_hostname): %s", raw_active_hostname, expand_string_message); } else if (nah[0] != 0) smtp_active_hostname = nah; } /* Handle host checking: this facility mocks up an incoming SMTP call from a given IP address so that the blocking and relay configuration can be tested. Unless a sender_ident was set by -oMt, we discard it (the default is the caller's login name). An RFC 1413 call is made only if we are running in the test harness and an incoming interface and both ports are specified, because there is no TCP/IP call to find the ident for. */ if (host_checking) { int x[4]; int size; if (!sender_ident_set) { sender_ident = NULL; if (running_in_test_harness && sender_host_port != 0 && interface_address != NULL && interface_port != 0) verify_get_ident(1413); } /* In case the given address is a non-canonical IPv6 address, canonicalize it. The code works for both IPv4 and IPv6, as it happens. */ size = host_aton(sender_host_address, x); sender_host_address = store_get(48); /* large enough for full IPv6 */ (void)host_nmtoa(size, x, -1, sender_host_address, ':'); /* Now set up for testing */ host_build_sender_fullhost(); smtp_input = TRUE; smtp_in = stdin; smtp_out = stdout; sender_local = FALSE; sender_host_notsocket = TRUE; debug_file = stderr; debug_fd = fileno(debug_file); fprintf(stdout, "\n**** SMTP testing session as if from host %s\n" "**** but without any ident (RFC 1413) callback.\n" "**** This is not for real!\n\n", sender_host_address); memset(sender_host_cache, 0, sizeof(sender_host_cache)); if (verify_check_host(&hosts_connection_nolog) == OK) BIT_CLEAR(log_selector, log_selector_size, Li_smtp_connection); log_write(L_smtp_connection, LOG_MAIN, "%s", smtp_get_connection_info()); /* NOTE: We do *not* call smtp_log_no_mail() if smtp_start_session() fails, because a log line has already been written for all its failure exists (usually "connection refused: <reason>") and writing another one is unnecessary clutter. */ if (smtp_start_session()) { for (reset_point = store_get(0); ; store_reset(reset_point)) { if (smtp_setup_msg() <= 0) break; if (!receive_msg(FALSE)) break; return_path = sender_address = NULL; dnslist_domain = dnslist_matched = NULL; #ifndef DISABLE_DKIM dkim_cur_signer = NULL; #endif acl_var_m = NULL; deliver_localpart_orig = NULL; deliver_domain_orig = NULL; callout_address = sending_ip_address = NULL; sender_rate = sender_rate_limit = sender_rate_period = NULL; } smtp_log_no_mail(); } exim_exit(EXIT_SUCCESS); } /* Arrange for message reception if recipients or SMTP were specified; otherwise complain unless a version print (-bV) happened or this is a filter verification test or info dump. In the former case, show the configuration file name. */ if (recipients_arg >= argc && !extract_recipients && !smtp_input) { if (version_printed) { printf("Configuration file is %s\n", config_main_filename); return EXIT_SUCCESS; } if (info_flag != CMDINFO_NONE) { show_exim_information(info_flag, info_stdout ? stdout : stderr); return info_stdout ? EXIT_SUCCESS : EXIT_FAILURE; } if (filter_test == FTEST_NONE) exim_usage(called_as); } /* If mua_wrapper is set, Exim is being used to turn an MUA that submits on the standard input into an MUA that submits to a smarthost over TCP/IP. We know that we are not called from inetd, because that is rejected above. The following configuration settings are forced here: (1) Synchronous delivery (-odi) (2) Errors to stderr (-oep == -oeq) (3) No parallel remote delivery (4) Unprivileged delivery We don't force overall queueing options because there are several of them; instead, queueing is avoided below when mua_wrapper is set. However, we do need to override any SMTP queueing. */ if (mua_wrapper) { synchronous_delivery = TRUE; arg_error_handling = ERRORS_STDERR; remote_max_parallel = 1; deliver_drop_privilege = TRUE; queue_smtp = FALSE; queue_smtp_domains = NULL; #ifdef SUPPORT_I18N message_utf8_downconvert = -1; /* convert-if-needed */ #endif } /* Prepare to accept one or more new messages on the standard input. When a message has been read, its id is returned in message_id[]. If doing immediate delivery, we fork a delivery process for each received message, except for the last one, where we can save a process switch. It is only in non-smtp mode that error_handling is allowed to be changed from its default of ERRORS_SENDER by argument. (Idle thought: are any of the sendmail error modes other than -oem ever actually used? Later: yes.) */ if (!smtp_input) error_handling = arg_error_handling; /* If this is an inetd call, ensure that stderr is closed to prevent panic logging being sent down the socket and make an identd call to get the sender_ident. */ else if (is_inetd) { (void)fclose(stderr); exim_nullstd(); /* Re-open to /dev/null */ verify_get_ident(IDENT_PORT); host_build_sender_fullhost(); set_process_info("handling incoming connection from %s via inetd", sender_fullhost); } /* If the sender host address has been set, build sender_fullhost if it hasn't already been done (which it will have been for inetd). This caters for the case when it is forced by -oMa. However, we must flag that it isn't a socket, so that the test for IP options is skipped for -bs input. */ if (sender_host_address != NULL && sender_fullhost == NULL) { host_build_sender_fullhost(); set_process_info("handling incoming connection from %s via -oMa", sender_fullhost); sender_host_notsocket = TRUE; } /* Otherwise, set the sender host as unknown except for inetd calls. This prevents host checking in the case of -bs not from inetd and also for -bS. */ else if (!is_inetd) sender_host_unknown = TRUE; /* If stdout does not exist, then dup stdin to stdout. This can happen if exim is started from inetd. In this case fd 0 will be set to the socket, but fd 1 will not be set. This also happens for passed SMTP channels. */ if (fstat(1, &statbuf) < 0) (void)dup2(0, 1); /* Set up the incoming protocol name and the state of the program. Root is allowed to force received protocol via the -oMr option above. If we have come via inetd, the process info has already been set up. We don't set received_protocol here for smtp input, as it varies according to batch/HELO/EHLO/AUTH/TLS. */ if (smtp_input) { if (!is_inetd) set_process_info("accepting a local %sSMTP message from <%s>", smtp_batched_input? "batched " : "", (sender_address!= NULL)? sender_address : originator_login); } else { int old_pool = store_pool; store_pool = POOL_PERM; if (!received_protocol) received_protocol = string_sprintf("local%s", called_as); store_pool = old_pool; set_process_info("accepting a local non-SMTP message from <%s>", sender_address); } /* Initialize the session_local_queue-only flag (this will be ignored if mua_wrapper is set) */ queue_check_only(); session_local_queue_only = queue_only; /* For non-SMTP and for batched SMTP input, check that there is enough space on the spool if so configured. On failure, we must not attempt to send an error message! (For interactive SMTP, the check happens at MAIL FROM and an SMTP error code is given.) */ if ((!smtp_input || smtp_batched_input) && !receive_check_fs(0)) { fprintf(stderr, "exim: insufficient disk space\n"); return EXIT_FAILURE; } /* If this is smtp input of any kind, real or batched, handle the start of the SMTP session. NOTE: We do *not* call smtp_log_no_mail() if smtp_start_session() fails, because a log line has already been written for all its failure exists (usually "connection refused: <reason>") and writing another one is unnecessary clutter. */ if (smtp_input) { smtp_in = stdin; smtp_out = stdout; memset(sender_host_cache, 0, sizeof(sender_host_cache)); if (verify_check_host(&hosts_connection_nolog) == OK) BIT_CLEAR(log_selector, log_selector_size, Li_smtp_connection); log_write(L_smtp_connection, LOG_MAIN, "%s", smtp_get_connection_info()); if (!smtp_start_session()) { mac_smtp_fflush(); exim_exit(EXIT_SUCCESS); } } /* Otherwise, set up the input size limit here. */ else { thismessage_size_limit = expand_string_integer(message_size_limit, TRUE); if (expand_string_message) if (thismessage_size_limit == -1) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to expand " "message_size_limit: %s", expand_string_message); else log_write(0, LOG_MAIN|LOG_PANIC_DIE, "invalid value for " "message_size_limit: %s", expand_string_message); } /* Loop for several messages when reading SMTP input. If we fork any child processes, we don't want to wait for them unless synchronous delivery is requested, so set SIGCHLD to SIG_IGN in that case. This is not necessarily the same as SIG_DFL, despite the fact that documentation often lists the default as "ignore". This is a confusing area. This is what I know: At least on some systems (e.g. Solaris), just setting SIG_IGN causes child processes that complete simply to go away without ever becoming defunct. You can't then wait for them - but we don't want to wait for them in the non-synchronous delivery case. However, this behaviour of SIG_IGN doesn't happen for all OS (e.g. *BSD is different). But that's not the end of the story. Some (many? all?) systems have the SA_NOCLDWAIT option for sigaction(). This requests the behaviour that Solaris has by default, so it seems that the difference is merely one of default (compare restarting vs non-restarting signals). To cover all cases, Exim sets SIG_IGN with SA_NOCLDWAIT here if it can. If not, it just sets SIG_IGN. To be on the safe side it also calls waitpid() at the end of the loop below. Paranoia rules. February 2003: That's *still* not the end of the story. There are now versions of Linux (where SIG_IGN does work) that are picky. If, having set SIG_IGN, a process then calls waitpid(), a grumble is written to the system log, because this is logically inconsistent. In other words, it doesn't like the paranoia. As a consequence of this, the waitpid() below is now excluded if we are sure that SIG_IGN works. */ if (!synchronous_delivery) { #ifdef SA_NOCLDWAIT struct sigaction act; act.sa_handler = SIG_IGN; sigemptyset(&(act.sa_mask)); act.sa_flags = SA_NOCLDWAIT; sigaction(SIGCHLD, &act, NULL); #else signal(SIGCHLD, SIG_IGN); #endif } /* Save the current store pool point, for resetting at the start of each message, and save the real sender address, if any. */ reset_point = store_get(0); real_sender_address = sender_address; /* Loop to receive messages; receive_msg() returns TRUE if there are more messages to be read (SMTP input), or FALSE otherwise (not SMTP, or SMTP channel collapsed). */ while (more) { message_id[0] = 0; /* Handle the SMTP case; call smtp_setup_mst() to deal with the initial SMTP input and build the recipients list, before calling receive_msg() to read the message proper. Whatever sender address is given in the SMTP transaction is often ignored for local senders - we use the actual sender, which is normally either the underlying user running this process or a -f argument provided by a trusted caller. It is saved in real_sender_address. The test for whether to accept the SMTP sender is encapsulated in receive_check_set_sender(). */ if (smtp_input) { int rc; if ((rc = smtp_setup_msg()) > 0) { if (real_sender_address != NULL && !receive_check_set_sender(sender_address)) { sender_address = raw_sender = real_sender_address; sender_address_unrewritten = NULL; } /* For batched SMTP, we have to run the acl_not_smtp_start ACL, since it isn't really SMTP, so no other ACL will run until the acl_not_smtp one at the very end. The result of the ACL is ignored (as for other non-SMTP messages). It is run for its potential side effects. */ if (smtp_batched_input && acl_not_smtp_start != NULL) { uschar *user_msg, *log_msg; enable_dollar_recipients = TRUE; (void)acl_check(ACL_WHERE_NOTSMTP_START, NULL, acl_not_smtp_start, &user_msg, &log_msg); enable_dollar_recipients = FALSE; } /* Now get the data for the message */ more = receive_msg(extract_recipients); if (message_id[0] == 0) { cancel_cutthrough_connection(TRUE, US"receive dropped"); if (more) goto moreloop; smtp_log_no_mail(); /* Log no mail if configured */ exim_exit(EXIT_FAILURE); } } else { cancel_cutthrough_connection(TRUE, US"message setup dropped"); smtp_log_no_mail(); /* Log no mail if configured */ exim_exit((rc == 0)? EXIT_SUCCESS : EXIT_FAILURE); } } /* In the non-SMTP case, we have all the information from the command line, but must process it in case it is in the more general RFC822 format, and in any case, to detect syntax errors. Also, it appears that the use of comma-separated lists as single arguments is common, so we had better support them. */ else { int i; int rcount = 0; int count = argc - recipients_arg; uschar **list = argv + recipients_arg; /* These options cannot be changed dynamically for non-SMTP messages */ active_local_sender_retain = local_sender_retain; active_local_from_check = local_from_check; /* Save before any rewriting */ raw_sender = string_copy(sender_address); /* Loop for each argument */ for (i = 0; i < count; i++) { int start, end, domain; uschar *errmess; uschar *s = list[i]; /* Loop for each comma-separated address */ while (*s != 0) { BOOL finished = FALSE; uschar *recipient; uschar *ss = parse_find_address_end(s, FALSE); if (*ss == ',') *ss = 0; else finished = TRUE; /* Check max recipients - if -t was used, these aren't recipients */ if (recipients_max > 0 && ++rcount > recipients_max && !extract_recipients) if (error_handling == ERRORS_STDERR) { fprintf(stderr, "exim: too many recipients\n"); exim_exit(EXIT_FAILURE); } else { return moan_to_sender(ERRMESS_TOOMANYRECIP, NULL, NULL, stdin, TRUE)? errors_sender_rc : EXIT_FAILURE; } #ifdef SUPPORT_I18N { BOOL b = allow_utf8_domains; allow_utf8_domains = TRUE; #endif recipient = parse_extract_address(s, &errmess, &start, &end, &domain, FALSE); #ifdef SUPPORT_I18N if (string_is_utf8(recipient)) message_smtputf8 = TRUE; else allow_utf8_domains = b; } #endif if (domain == 0 && !allow_unqualified_recipient) { recipient = NULL; errmess = US"unqualified recipient address not allowed"; } if (recipient == NULL) { if (error_handling == ERRORS_STDERR) { fprintf(stderr, "exim: bad recipient address \"%s\": %s\n", string_printing(list[i]), errmess); exim_exit(EXIT_FAILURE); } else { error_block eblock; eblock.next = NULL; eblock.text1 = string_printing(list[i]); eblock.text2 = errmess; return moan_to_sender(ERRMESS_BADARGADDRESS, &eblock, NULL, stdin, TRUE)? errors_sender_rc : EXIT_FAILURE; } } receive_add_recipient(recipient, -1); s = ss; if (!finished) while (*(++s) != 0 && (*s == ',' || isspace(*s))); } } /* Show the recipients when debugging */ DEBUG(D_receive) { int i; if (sender_address != NULL) debug_printf("Sender: %s\n", sender_address); if (recipients_list != NULL) { debug_printf("Recipients:\n"); for (i = 0; i < recipients_count; i++) debug_printf(" %s\n", recipients_list[i].address); } } /* Run the acl_not_smtp_start ACL if required. The result of the ACL is ignored; rejecting here would just add complication, and it can just as well be done later. Allow $recipients to be visible in the ACL. */ if (acl_not_smtp_start) { uschar *user_msg, *log_msg; enable_dollar_recipients = TRUE; (void)acl_check(ACL_WHERE_NOTSMTP_START, NULL, acl_not_smtp_start, &user_msg, &log_msg); enable_dollar_recipients = FALSE; } /* Pause for a while waiting for input. If none received in that time, close the logfile, if we had one open; then if we wait for a long-running datasource (months, in one use-case) log rotation will not leave us holding the file copy. */ if (!receive_timeout) { struct timeval t = { 30*60, 0 }; /* 30 minutes */ fd_set r; FD_ZERO(&r); FD_SET(0, &r); if (select(1, &r, NULL, NULL, &t) == 0) mainlog_close(); } /* Read the data for the message. If filter_test is not FTEST_NONE, this will just read the headers for the message, and not write anything onto the spool. */ message_ended = END_NOTENDED; more = receive_msg(extract_recipients); /* more is always FALSE here (not SMTP message) when reading a message for real; when reading the headers of a message for filter testing, it is TRUE if the headers were terminated by '.' and FALSE otherwise. */ if (message_id[0] == 0) exim_exit(EXIT_FAILURE); } /* Non-SMTP message reception */ /* If this is a filter testing run, there are headers in store, but no message on the spool. Run the filtering code in testing mode, setting the domain to the qualify domain and the local part to the current user, unless they have been set by options. The prefix and suffix are left unset unless specified. The the return path is set to to the sender unless it has already been set from a return-path header in the message. */ if (filter_test != FTEST_NONE) { deliver_domain = (ftest_domain != NULL)? ftest_domain : qualify_domain_recipient; deliver_domain_orig = deliver_domain; deliver_localpart = (ftest_localpart != NULL)? ftest_localpart : originator_login; deliver_localpart_orig = deliver_localpart; deliver_localpart_prefix = ftest_prefix; deliver_localpart_suffix = ftest_suffix; deliver_home = originator_home; if (return_path == NULL) { printf("Return-path copied from sender\n"); return_path = string_copy(sender_address); } else printf("Return-path = %s\n", (return_path[0] == 0)? US"<>" : return_path); printf("Sender = %s\n", (sender_address[0] == 0)? US"<>" : sender_address); receive_add_recipient( string_sprintf("%s%s%s@%s", (ftest_prefix == NULL)? US"" : ftest_prefix, deliver_localpart, (ftest_suffix == NULL)? US"" : ftest_suffix, deliver_domain), -1); printf("Recipient = %s\n", recipients_list[0].address); if (ftest_prefix != NULL) printf("Prefix = %s\n", ftest_prefix); if (ftest_suffix != NULL) printf("Suffix = %s\n", ftest_suffix); if (chdir("/")) /* Get away from wherever the user is running this from */ { DEBUG(D_receive) debug_printf("chdir(\"/\") failed\n"); exim_exit(EXIT_FAILURE); } /* Now we run either a system filter test, or a user filter test, or both. In the latter case, headers added by the system filter will persist and be available to the user filter. We need to copy the filter variables explicitly. */ if ((filter_test & FTEST_SYSTEM) != 0) { if (!filter_runtest(filter_sfd, filter_test_sfile, TRUE, more)) exim_exit(EXIT_FAILURE); } memcpy(filter_sn, filter_n, sizeof(filter_sn)); if ((filter_test & FTEST_USER) != 0) { if (!filter_runtest(filter_ufd, filter_test_ufile, FALSE, more)) exim_exit(EXIT_FAILURE); } exim_exit(EXIT_SUCCESS); } /* Else act on the result of message reception. We should not get here unless message_id[0] is non-zero. If queue_only is set, session_local_queue_only will be TRUE. If it is not, check on the number of messages received in this connection. */ if (!session_local_queue_only && smtp_accept_queue_per_connection > 0 && receive_messagecount > smtp_accept_queue_per_connection) { session_local_queue_only = TRUE; queue_only_reason = 2; } /* Initialize local_queue_only from session_local_queue_only. If it is false, and queue_only_load is set, check that the load average is below it. If it is not, set local_queue_only TRUE. If queue_only_load_latch is true (the default), we put the whole session into queue_only mode. It then remains this way for any subsequent messages on the same SMTP connection. This is a deliberate choice; even though the load average may fall, it doesn't seem right to deliver later messages on the same call when not delivering earlier ones. However, there are odd cases where this is not wanted, so this can be changed by setting queue_only_load_latch false. */ local_queue_only = session_local_queue_only; if (!local_queue_only && queue_only_load >= 0) { local_queue_only = (load_average = OS_GETLOADAVG()) > queue_only_load; if (local_queue_only) { queue_only_reason = 3; if (queue_only_load_latch) session_local_queue_only = TRUE; } } /* If running as an MUA wrapper, all queueing options and freezing options are ignored. */ if (mua_wrapper) local_queue_only = queue_only_policy = deliver_freeze = FALSE; /* Log the queueing here, when it will get a message id attached, but not if queue_only is set (case 0). Case 1 doesn't happen here (too many connections). */ if (local_queue_only) { cancel_cutthrough_connection(TRUE, US"no delivery; queueing"); switch(queue_only_reason) { case 2: log_write(L_delay_delivery, LOG_MAIN, "no immediate delivery: more than %d messages " "received in one connection", smtp_accept_queue_per_connection); break; case 3: log_write(L_delay_delivery, LOG_MAIN, "no immediate delivery: load average %.2f", (double)load_average/1000.0); break; } } else if (queue_only_policy || deliver_freeze) cancel_cutthrough_connection(TRUE, US"no delivery; queueing"); /* Else do the delivery unless the ACL or local_scan() called for queue only or froze the message. Always deliver in a separate process. A fork failure is not a disaster, as the delivery will eventually happen on a subsequent queue run. The search cache must be tidied before the fork, as the parent will do it before exiting. The child will trigger a lookup failure and thereby defer the delivery if it tries to use (for example) a cached ldap connection that the parent has called unbind on. */ else { pid_t pid; search_tidyup(); if ((pid = fork()) == 0) { int rc; close_unwanted(); /* Close unwanted file descriptors and TLS */ exim_nullstd(); /* Ensure std{in,out,err} exist */ /* Re-exec Exim if we need to regain privilege (note: in mua_wrapper mode, deliver_drop_privilege is forced TRUE). */ if (geteuid() != root_uid && !deliver_drop_privilege && !unprivileged) { delivery_re_exec(CEE_EXEC_EXIT); /* Control does not return here. */ } /* No need to re-exec */ rc = deliver_message(message_id, FALSE, FALSE); search_tidyup(); _exit((!mua_wrapper || rc == DELIVER_MUA_SUCCEEDED)? EXIT_SUCCESS : EXIT_FAILURE); } if (pid < 0) { cancel_cutthrough_connection(TRUE, US"delivery fork failed"); log_write(0, LOG_MAIN|LOG_PANIC, "failed to fork automatic delivery " "process: %s", strerror(errno)); } else { release_cutthrough_connection(US"msg passed for delivery"); /* In the parent, wait if synchronous delivery is required. This will always be the case in MUA wrapper mode. */ if (synchronous_delivery) { int status; while (wait(&status) != pid); if ((status & 0x00ff) != 0) log_write(0, LOG_MAIN|LOG_PANIC, "process %d crashed with signal %d while delivering %s", (int)pid, status & 0x00ff, message_id); if (mua_wrapper && (status & 0xffff) != 0) exim_exit(EXIT_FAILURE); } } } /* The loop will repeat if more is TRUE. If we do not know know that the OS automatically reaps children (see comments above the loop), clear away any finished subprocesses here, in case there are lots of messages coming in from the same source. */ #ifndef SIG_IGN_WORKS while (waitpid(-1, NULL, WNOHANG) > 0); #endif moreloop: return_path = sender_address = NULL; authenticated_sender = NULL; deliver_localpart_orig = NULL; deliver_domain_orig = NULL; deliver_host = deliver_host_address = NULL; dnslist_domain = dnslist_matched = NULL; #ifdef WITH_CONTENT_SCAN malware_name = NULL; #endif callout_address = NULL; sending_ip_address = NULL; acl_var_m = NULL; { int i; for(i=0; i<REGEX_VARS; i++) regex_vars[i] = NULL; } store_reset(reset_point); } exim_exit(EXIT_SUCCESS); /* Never returns */ return 0; /* To stop compiler warning */ } /* End of exim.c */
./CrossVul/dataset_final_sorted/CWE-404/c/good_2525_1
crossvul-cpp_data_good_3267_0
/* Userspace key control operations * * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/slab.h> #include <linux/syscalls.h> #include <linux/key.h> #include <linux/keyctl.h> #include <linux/fs.h> #include <linux/capability.h> #include <linux/cred.h> #include <linux/string.h> #include <linux/err.h> #include <linux/vmalloc.h> #include <linux/security.h> #include <linux/uio.h> #include <linux/uaccess.h> #include "internal.h" #define KEY_MAX_DESC_SIZE 4096 static int key_get_type_from_user(char *type, const char __user *_type, unsigned len) { int ret; ret = strncpy_from_user(type, _type, len); if (ret < 0) return ret; if (ret == 0 || ret >= len) return -EINVAL; if (type[0] == '.') return -EPERM; type[len - 1] = '\0'; return 0; } /* * Extract the description of a new key from userspace and either add it as a * new key to the specified keyring or update a matching key in that keyring. * * If the description is NULL or an empty string, the key type is asked to * generate one from the payload. * * The keyring must be writable so that we can attach the key to it. * * If successful, the new key's serial number is returned, otherwise an error * code is returned. */ SYSCALL_DEFINE5(add_key, const char __user *, _type, const char __user *, _description, const void __user *, _payload, size_t, plen, key_serial_t, ringid) { key_ref_t keyring_ref, key_ref; char type[32], *description; void *payload; long ret; ret = -EINVAL; if (plen > 1024 * 1024 - 1) goto error; /* draw all the data into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; description = NULL; if (_description) { description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } if (!*description) { kfree(description); description = NULL; } else if ((description[0] == '.') && (strncmp(type, "keyring", 7) == 0)) { ret = -EPERM; goto error2; } } /* pull the payload in if one was supplied */ payload = NULL; if (_payload) { ret = -ENOMEM; payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN); if (!payload) { if (plen <= PAGE_SIZE) goto error2; payload = vmalloc(plen); if (!payload) goto error2; } ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) goto error3; } /* find the target keyring (which must be writable) */ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error3; } /* create or update the requested key and add it to the target * keyring */ key_ref = key_create_or_update(keyring_ref, type, description, payload, plen, KEY_PERM_UNDEF, KEY_ALLOC_IN_QUOTA); if (!IS_ERR(key_ref)) { ret = key_ref_to_ptr(key_ref)->serial; key_ref_put(key_ref); } else { ret = PTR_ERR(key_ref); } key_ref_put(keyring_ref); error3: kvfree(payload); error2: kfree(description); error: return ret; } /* * Search the process keyrings and keyring trees linked from those for a * matching key. Keyrings must have appropriate Search permission to be * searched. * * If a key is found, it will be attached to the destination keyring if there's * one specified and the serial number of the key will be returned. * * If no key is found, /sbin/request-key will be invoked if _callout_info is * non-NULL in an attempt to create a key. The _callout_info string will be * passed to /sbin/request-key to aid with completing the request. If the * _callout_info string is "" then it will be changed to "-". */ SYSCALL_DEFINE4(request_key, const char __user *, _type, const char __user *, _description, const char __user *, _callout_info, key_serial_t, destringid) { struct key_type *ktype; struct key *key; key_ref_t dest_ref; size_t callout_len; char type[32], *description, *callout_info; long ret; /* pull the type into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; /* pull the description into kernel space */ description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } /* pull the callout info into kernel space */ callout_info = NULL; callout_len = 0; if (_callout_info) { callout_info = strndup_user(_callout_info, PAGE_SIZE); if (IS_ERR(callout_info)) { ret = PTR_ERR(callout_info); goto error2; } callout_len = strlen(callout_info); } /* get the destination keyring if specified */ dest_ref = NULL; if (destringid) { dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dest_ref)) { ret = PTR_ERR(dest_ref); goto error3; } } /* find the key type */ ktype = key_type_lookup(type); if (IS_ERR(ktype)) { ret = PTR_ERR(ktype); goto error4; } /* do the search */ key = request_key_and_link(ktype, description, callout_info, callout_len, NULL, key_ref_to_ptr(dest_ref), KEY_ALLOC_IN_QUOTA); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error5; } /* wait for the key to finish being constructed */ ret = wait_for_key_construction(key, 1); if (ret < 0) goto error6; ret = key->serial; error6: key_put(key); error5: key_type_put(ktype); error4: key_ref_put(dest_ref); error3: kfree(callout_info); error2: kfree(description); error: return ret; } /* * Get the ID of the specified process keyring. * * The requested keyring must have search permission to be found. * * If successful, the ID of the requested keyring will be returned. */ long keyctl_get_keyring_ID(key_serial_t id, int create) { key_ref_t key_ref; unsigned long lflags; long ret; lflags = create ? KEY_LOOKUP_CREATE : 0; key_ref = lookup_user_key(id, lflags, KEY_NEED_SEARCH); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } ret = key_ref_to_ptr(key_ref)->serial; key_ref_put(key_ref); error: return ret; } /* * Join a (named) session keyring. * * Create and join an anonymous session keyring or join a named session * keyring, creating it if necessary. A named session keyring must have Search * permission for it to be joined. Session keyrings without this permit will * be skipped over. It is not permitted for userspace to create or join * keyrings whose name begin with a dot. * * If successful, the ID of the joined session keyring will be returned. */ long keyctl_join_session_keyring(const char __user *_name) { char *name; long ret; /* fetch the name from userspace */ name = NULL; if (_name) { name = strndup_user(_name, KEY_MAX_DESC_SIZE); if (IS_ERR(name)) { ret = PTR_ERR(name); goto error; } ret = -EPERM; if (name[0] == '.') goto error_name; } /* join the session */ ret = join_session_keyring(name); error_name: kfree(name); error: return ret; } /* * Update a key's data payload from the given data. * * The key must grant the caller Write permission and the key type must support * updating for this to work. A negative key can be positively instantiated * with this call. * * If successful, 0 will be returned. If the key type does not support * updating, then -EOPNOTSUPP will be returned. */ long keyctl_update_key(key_serial_t id, const void __user *_payload, size_t plen) { key_ref_t key_ref; void *payload; long ret; ret = -EINVAL; if (plen > PAGE_SIZE) goto error; /* pull the payload in if one was supplied */ payload = NULL; if (_payload) { ret = -ENOMEM; payload = kmalloc(plen, GFP_KERNEL); if (!payload) goto error; ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) goto error2; } /* find the target key (which must be writable) */ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } /* update the key */ ret = key_update(key_ref, payload, plen); key_ref_put(key_ref); error2: kfree(payload); error: return ret; } /* * Revoke a key. * * The key must be grant the caller Write or Setattr permission for this to * work. The key type should give up its quota claim when revoked. The key * and any links to the key will be automatically garbage collected after a * certain amount of time (/proc/sys/kernel/keys/gc_delay). * * Keys with KEY_FLAG_KEEP set should not be revoked. * * If successful, 0 is returned. */ long keyctl_revoke_key(key_serial_t id) { key_ref_t key_ref; struct key *key; long ret; key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); if (ret != -EACCES) goto error; key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } } key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_revoke(key); key_ref_put(key_ref); error: return ret; } /* * Invalidate a key. * * The key must be grant the caller Invalidate permission for this to work. * The key and any links to the key will be automatically garbage collected * immediately. * * Keys with KEY_FLAG_KEEP set should not be invalidated. * * If successful, 0 is returned. */ long keyctl_invalidate_key(key_serial_t id) { key_ref_t key_ref; struct key *key; long ret; kenter("%d", id); key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); /* Root is permitted to invalidate certain special keys */ if (capable(CAP_SYS_ADMIN)) { key_ref = lookup_user_key(id, 0, 0); if (IS_ERR(key_ref)) goto error; if (test_bit(KEY_FLAG_ROOT_CAN_INVAL, &key_ref_to_ptr(key_ref)->flags)) goto invalidate; goto error_put; } goto error; } invalidate: key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_invalidate(key); error_put: key_ref_put(key_ref); error: kleave(" = %ld", ret); return ret; } /* * Clear the specified keyring, creating an empty process keyring if one of the * special keyring IDs is used. * * The keyring must grant the caller Write permission and not have * KEY_FLAG_KEEP set for this to work. If successful, 0 will be returned. */ long keyctl_keyring_clear(key_serial_t ringid) { key_ref_t keyring_ref; struct key *keyring; long ret; keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); /* Root is permitted to invalidate certain special keyrings */ if (capable(CAP_SYS_ADMIN)) { keyring_ref = lookup_user_key(ringid, 0, 0); if (IS_ERR(keyring_ref)) goto error; if (test_bit(KEY_FLAG_ROOT_CAN_CLEAR, &key_ref_to_ptr(keyring_ref)->flags)) goto clear; goto error_put; } goto error; } clear: keyring = key_ref_to_ptr(keyring_ref); if (test_bit(KEY_FLAG_KEEP, &keyring->flags)) ret = -EPERM; else ret = keyring_clear(keyring); error_put: key_ref_put(keyring_ref); error: return ret; } /* * Create a link from a keyring to a key if there's no matching key in the * keyring, otherwise replace the link to the matching key with a link to the * new key. * * The key must grant the caller Link permission and the the keyring must grant * the caller Write permission. Furthermore, if an additional link is created, * the keyring's quota will be extended. * * If successful, 0 will be returned. */ long keyctl_keyring_link(key_serial_t id, key_serial_t ringid) { key_ref_t keyring_ref, key_ref; long ret; keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error; } key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref)); key_ref_put(key_ref); error2: key_ref_put(keyring_ref); error: return ret; } /* * Unlink a key from a keyring. * * The keyring must grant the caller Write permission for this to work; the key * itself need not grant the caller anything. If the last link to a key is * removed then that key will be scheduled for destruction. * * Keys or keyrings with KEY_FLAG_KEEP set should not be unlinked. * * If successful, 0 will be returned. */ long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid) { key_ref_t keyring_ref, key_ref; struct key *keyring, *key; long ret; keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error; } key_ref = lookup_user_key(id, KEY_LOOKUP_FOR_UNLINK, 0); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } keyring = key_ref_to_ptr(keyring_ref); key = key_ref_to_ptr(key_ref); if (test_bit(KEY_FLAG_KEEP, &keyring->flags) && test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else ret = key_unlink(keyring, key); key_ref_put(key_ref); error2: key_ref_put(keyring_ref); error: return ret; } /* * Return a description of a key to userspace. * * The key must grant the caller View permission for this to work. * * If there's a buffer, we place up to buflen bytes of data into it formatted * in the following way: * * type;uid;gid;perm;description<NUL> * * If successful, we return the amount of description available, irrespective * of how much we may have copied into the buffer. */ long keyctl_describe_key(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key, *instkey; key_ref_t key_ref; char *infobuf; long ret; int desclen, infolen; key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); if (IS_ERR(key_ref)) { /* viewing a key under construction is permitted if we have the * authorisation token handy */ if (PTR_ERR(key_ref) == -EACCES) { instkey = key_get_instantiation_authkey(keyid); if (!IS_ERR(instkey)) { key_put(instkey); key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0); if (!IS_ERR(key_ref)) goto okay; } } ret = PTR_ERR(key_ref); goto error; } okay: key = key_ref_to_ptr(key_ref); desclen = strlen(key->description); /* calculate how much information we're going to return */ ret = -ENOMEM; infobuf = kasprintf(GFP_KERNEL, "%s;%d;%d;%08x;", key->type->name, from_kuid_munged(current_user_ns(), key->uid), from_kgid_munged(current_user_ns(), key->gid), key->perm); if (!infobuf) goto error2; infolen = strlen(infobuf); ret = infolen + desclen + 1; /* consider returning the data */ if (buffer && buflen >= ret) { if (copy_to_user(buffer, infobuf, infolen) != 0 || copy_to_user(buffer + infolen, key->description, desclen + 1) != 0) ret = -EFAULT; } kfree(infobuf); error2: key_ref_put(key_ref); error: return ret; } /* * Search the specified keyring and any keyrings it links to for a matching * key. Only keyrings that grant the caller Search permission will be searched * (this includes the starting keyring). Only keys with Search permission can * be found. * * If successful, the found key will be linked to the destination keyring if * supplied and the key has Link permission, and the found key ID will be * returned. */ long keyctl_keyring_search(key_serial_t ringid, const char __user *_type, const char __user *_description, key_serial_t destringid) { struct key_type *ktype; key_ref_t keyring_ref, key_ref, dest_ref; char type[32], *description; long ret; /* pull the type and description into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } /* get the keyring at which to begin the search */ keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_SEARCH); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error2; } /* get the destination keyring if specified */ dest_ref = NULL; if (destringid) { dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dest_ref)) { ret = PTR_ERR(dest_ref); goto error3; } } /* find the key type */ ktype = key_type_lookup(type); if (IS_ERR(ktype)) { ret = PTR_ERR(ktype); goto error4; } /* do the search */ key_ref = keyring_search(keyring_ref, ktype, description); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); /* treat lack or presence of a negative key the same */ if (ret == -EAGAIN) ret = -ENOKEY; goto error5; } /* link the resulting key to the destination keyring if we can */ if (dest_ref) { ret = key_permission(key_ref, KEY_NEED_LINK); if (ret < 0) goto error6; ret = key_link(key_ref_to_ptr(dest_ref), key_ref_to_ptr(key_ref)); if (ret < 0) goto error6; } ret = key_ref_to_ptr(key_ref)->serial; error6: key_ref_put(key_ref); error5: key_type_put(ktype); error4: key_ref_put(dest_ref); error3: key_ref_put(keyring_ref); error2: kfree(description); error: return ret; } /* * Read a key's payload. * * The key must either grant the caller Read permission, or it must grant the * caller Search permission when searched for from the process keyrings. * * If successful, we place up to buflen bytes of data into the buffer, if one * is provided, and return the amount of data that is available in the key, * irrespective of how much we copied into the buffer. */ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key; key_ref_t key_ref; long ret; /* find the key first */ key_ref = lookup_user_key(keyid, 0, 0); if (IS_ERR(key_ref)) { ret = -ENOKEY; goto error; } key = key_ref_to_ptr(key_ref); /* see if we can read it directly */ ret = key_permission(key_ref, KEY_NEED_READ); if (ret == 0) goto can_read_key; if (ret != -EACCES) goto error; /* we can't; see if it's searchable from this process's keyrings * - we automatically take account of the fact that it may be * dangling off an instantiation key */ if (!is_key_possessed(key_ref)) { ret = -EACCES; goto error2; } /* the key is probably readable - now try to read it */ can_read_key: ret = -EOPNOTSUPP; if (key->type->read) { /* Read the data with the semaphore held (since we might sleep) * to protect against the key being updated or revoked. */ down_read(&key->sem); ret = key_validate(key); if (ret == 0) ret = key->type->read(key, buffer, buflen); up_read(&key->sem); } error2: key_put(key); error: return ret; } /* * Change the ownership of a key * * The key must grant the caller Setattr permission for this to work, though * the key need not be fully instantiated yet. For the UID to be changed, or * for the GID to be changed to a group the caller is not a member of, the * caller must have sysadmin capability. If either uid or gid is -1 then that * attribute is not changed. * * If the UID is to be changed, the new user must have sufficient quota to * accept the key. The quota deduction will be removed from the old user to * the new user should the attribute be changed. * * If successful, 0 will be returned. */ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group) { struct key_user *newowner, *zapowner = NULL; struct key *key; key_ref_t key_ref; long ret; kuid_t uid; kgid_t gid; uid = make_kuid(current_user_ns(), user); gid = make_kgid(current_user_ns(), group); ret = -EINVAL; if ((user != (uid_t) -1) && !uid_valid(uid)) goto error; if ((group != (gid_t) -1) && !gid_valid(gid)) goto error; ret = 0; if (user == (uid_t) -1 && group == (gid_t) -1) goto error; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } key = key_ref_to_ptr(key_ref); /* make the changes with the locks held to prevent chown/chown races */ ret = -EACCES; down_write(&key->sem); if (!capable(CAP_SYS_ADMIN)) { /* only the sysadmin can chown a key to some other UID */ if (user != (uid_t) -1 && !uid_eq(key->uid, uid)) goto error_put; /* only the sysadmin can set the key's GID to a group other * than one of those that the current process subscribes to */ if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid)) goto error_put; } /* change the UID */ if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) { ret = -ENOMEM; newowner = key_user_lookup(uid); if (!newowner) goto error_put; /* transfer the quota burden to the new user */ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&newowner->lock); if (newowner->qnkeys + 1 >= maxkeys || newowner->qnbytes + key->quotalen >= maxbytes || newowner->qnbytes + key->quotalen < newowner->qnbytes) goto quota_overrun; newowner->qnkeys++; newowner->qnbytes += key->quotalen; spin_unlock(&newowner->lock); spin_lock(&key->user->lock); key->user->qnkeys--; key->user->qnbytes -= key->quotalen; spin_unlock(&key->user->lock); } atomic_dec(&key->user->nkeys); atomic_inc(&newowner->nkeys); if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { atomic_dec(&key->user->nikeys); atomic_inc(&newowner->nikeys); } zapowner = key->user; key->user = newowner; key->uid = uid; } /* change the GID */ if (group != (gid_t) -1) key->gid = gid; ret = 0; error_put: up_write(&key->sem); key_put(key); if (zapowner) key_user_put(zapowner); error: return ret; quota_overrun: spin_unlock(&newowner->lock); zapowner = newowner; ret = -EDQUOT; goto error_put; } /* * Change the permission mask on a key. * * The key must grant the caller Setattr permission for this to work, though * the key need not be fully instantiated yet. If the caller does not have * sysadmin capability, it may only change the permission on keys that it owns. */ long keyctl_setperm_key(key_serial_t id, key_perm_t perm) { struct key *key; key_ref_t key_ref; long ret; ret = -EINVAL; if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL)) goto error; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } key = key_ref_to_ptr(key_ref); /* make the changes with the locks held to prevent chown/chmod races */ ret = -EACCES; down_write(&key->sem); /* if we're not the sysadmin, we can only change a key that we own */ if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) { key->perm = perm; ret = 0; } up_write(&key->sem); key_put(key); error: return ret; } /* * Get the destination keyring for instantiation and check that the caller has * Write permission on it. */ static long get_instantiation_keyring(key_serial_t ringid, struct request_key_auth *rka, struct key **_dest_keyring) { key_ref_t dkref; *_dest_keyring = NULL; /* just return a NULL pointer if we weren't asked to make a link */ if (ringid == 0) return 0; /* if a specific keyring is nominated by ID, then use that */ if (ringid > 0) { dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dkref)) return PTR_ERR(dkref); *_dest_keyring = key_ref_to_ptr(dkref); return 0; } if (ringid == KEY_SPEC_REQKEY_AUTH_KEY) return -EINVAL; /* otherwise specify the destination keyring recorded in the * authorisation key (any KEY_SPEC_*_KEYRING) */ if (ringid >= KEY_SPEC_REQUESTOR_KEYRING) { *_dest_keyring = key_get(rka->dest_keyring); return 0; } return -ENOKEY; } /* * Change the request_key authorisation key on the current process. */ static int keyctl_change_reqkey_auth(struct key *key) { struct cred *new; new = prepare_creds(); if (!new) return -ENOMEM; key_put(new->request_key_auth); new->request_key_auth = key_get(key); return commit_creds(new); } /* * Instantiate a key with the specified payload and link the key into the * destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key_common(key_serial_t id, struct iov_iter *from, key_serial_t ringid) { const struct cred *cred = current_cred(); struct request_key_auth *rka; struct key *instkey, *dest_keyring; size_t plen = from ? iov_iter_count(from) : 0; void *payload; long ret; kenter("%d,,%zu,%d", id, plen, ringid); if (!plen) from = NULL; ret = -EINVAL; if (plen > 1024 * 1024 - 1) goto error; /* the appropriate instantiation authorisation key must have been * assumed before calling this */ ret = -EPERM; instkey = cred->request_key_auth; if (!instkey) goto error; rka = instkey->payload.data[0]; if (rka->target_key->serial != id) goto error; /* pull the payload in if one was supplied */ payload = NULL; if (from) { ret = -ENOMEM; payload = kmalloc(plen, GFP_KERNEL); if (!payload) { if (plen <= PAGE_SIZE) goto error; payload = vmalloc(plen); if (!payload) goto error; } ret = -EFAULT; if (!copy_from_iter_full(payload, plen, from)) goto error2; } /* find the destination keyring amongst those belonging to the * requesting task */ ret = get_instantiation_keyring(ringid, rka, &dest_keyring); if (ret < 0) goto error2; /* instantiate the key and link it into a keyring */ ret = key_instantiate_and_link(rka->target_key, payload, plen, dest_keyring, instkey); key_put(dest_keyring); /* discard the assumed authority if it's just been disabled by * instantiation of the key */ if (ret == 0) keyctl_change_reqkey_auth(NULL); error2: kvfree(payload); error: return ret; } /* * Instantiate a key with the specified payload and link the key into the * destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key(key_serial_t id, const void __user *_payload, size_t plen, key_serial_t ringid) { if (_payload && plen) { struct iovec iov; struct iov_iter from; int ret; ret = import_single_range(WRITE, (void __user *)_payload, plen, &iov, &from); if (unlikely(ret)) return ret; return keyctl_instantiate_key_common(id, &from, ringid); } return keyctl_instantiate_key_common(id, NULL, ringid); } /* * Instantiate a key with the specified multipart payload and link the key into * the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key_iov(key_serial_t id, const struct iovec __user *_payload_iov, unsigned ioc, key_serial_t ringid) { struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; struct iov_iter from; long ret; if (!_payload_iov) ioc = 0; ret = import_iovec(WRITE, _payload_iov, ioc, ARRAY_SIZE(iovstack), &iov, &from); if (ret < 0) return ret; ret = keyctl_instantiate_key_common(id, &from, ringid); kfree(iov); return ret; } /* * Negatively instantiate the key with the given timeout (in seconds) and link * the key into the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * The key and any links to the key will be automatically garbage collected * after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return -ENOKEY until the negative key expires. * * If successful, 0 will be returned. */ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) { return keyctl_reject_key(id, timeout, ENOKEY, ringid); } /* * Negatively instantiate the key with the given timeout (in seconds) and error * code and link the key into the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * The key and any links to the key will be automatically garbage collected * after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return the specified error code until the negative key expires. * * If successful, 0 will be returned. */ long keyctl_reject_key(key_serial_t id, unsigned timeout, unsigned error, key_serial_t ringid) { const struct cred *cred = current_cred(); struct request_key_auth *rka; struct key *instkey, *dest_keyring; long ret; kenter("%d,%u,%u,%d", id, timeout, error, ringid); /* must be a valid error code and mustn't be a kernel special */ if (error <= 0 || error >= MAX_ERRNO || error == ERESTARTSYS || error == ERESTARTNOINTR || error == ERESTARTNOHAND || error == ERESTART_RESTARTBLOCK) return -EINVAL; /* the appropriate instantiation authorisation key must have been * assumed before calling this */ ret = -EPERM; instkey = cred->request_key_auth; if (!instkey) goto error; rka = instkey->payload.data[0]; if (rka->target_key->serial != id) goto error; /* find the destination keyring if present (which must also be * writable) */ ret = get_instantiation_keyring(ringid, rka, &dest_keyring); if (ret < 0) goto error; /* instantiate the key and link it into a keyring */ ret = key_reject_and_link(rka->target_key, timeout, error, dest_keyring, instkey); key_put(dest_keyring); /* discard the assumed authority if it's just been disabled by * instantiation of the key */ if (ret == 0) keyctl_change_reqkey_auth(NULL); error: return ret; } /* * Read or set the default keyring in which request_key() will cache keys and * return the old setting. * * If a thread or process keyring is specified then it will be created if it * doesn't yet exist. The old setting will be returned if successful. */ long keyctl_set_reqkey_keyring(int reqkey_defl) { struct cred *new; int ret, old_setting; old_setting = current_cred_xxx(jit_keyring); if (reqkey_defl == KEY_REQKEY_DEFL_NO_CHANGE) return old_setting; new = prepare_creds(); if (!new) return -ENOMEM; switch (reqkey_defl) { case KEY_REQKEY_DEFL_THREAD_KEYRING: ret = install_thread_keyring_to_cred(new); if (ret < 0) goto error; goto set; case KEY_REQKEY_DEFL_PROCESS_KEYRING: ret = install_process_keyring_to_cred(new); if (ret < 0) goto error; goto set; case KEY_REQKEY_DEFL_DEFAULT: case KEY_REQKEY_DEFL_SESSION_KEYRING: case KEY_REQKEY_DEFL_USER_KEYRING: case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: case KEY_REQKEY_DEFL_REQUESTOR_KEYRING: goto set; case KEY_REQKEY_DEFL_NO_CHANGE: case KEY_REQKEY_DEFL_GROUP_KEYRING: default: ret = -EINVAL; goto error; } set: new->jit_keyring = reqkey_defl; commit_creds(new); return old_setting; error: abort_creds(new); return ret; } /* * Set or clear the timeout on a key. * * Either the key must grant the caller Setattr permission or else the caller * must hold an instantiation authorisation token for the key. * * The timeout is either 0 to clear the timeout, or a number of seconds from * the current time. The key and any links to the key will be automatically * garbage collected after the timeout expires. * * Keys with KEY_FLAG_KEEP set should not be timed out. * * If successful, 0 is returned. */ long keyctl_set_timeout(key_serial_t id, unsigned timeout) { struct key *key, *instkey; key_ref_t key_ref; long ret; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { /* setting the timeout on a key under construction is permitted * if we have the authorisation token handy */ if (PTR_ERR(key_ref) == -EACCES) { instkey = key_get_instantiation_authkey(id); if (!IS_ERR(instkey)) { key_put(instkey); key_ref = lookup_user_key(id, KEY_LOOKUP_PARTIAL, 0); if (!IS_ERR(key_ref)) goto okay; } } ret = PTR_ERR(key_ref); goto error; } okay: key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_set_timeout(key, timeout); key_put(key); error: return ret; } /* * Assume (or clear) the authority to instantiate the specified key. * * This sets the authoritative token currently in force for key instantiation. * This must be done for a key to be instantiated. It has the effect of making * available all the keys from the caller of the request_key() that created a * key to request_key() calls made by the caller of this function. * * The caller must have the instantiation key in their process keyrings with a * Search permission grant available to the caller. * * If the ID given is 0, then the setting will be cleared and 0 returned. * * If the ID given has a matching an authorisation key, then that key will be * set and its ID will be returned. The authorisation key can be read to get * the callout information passed to request_key(). */ long keyctl_assume_authority(key_serial_t id) { struct key *authkey; long ret; /* special key IDs aren't permitted */ ret = -EINVAL; if (id < 0) goto error; /* we divest ourselves of authority if given an ID of 0 */ if (id == 0) { ret = keyctl_change_reqkey_auth(NULL); goto error; } /* attempt to assume the authority temporarily granted to us whilst we * instantiate the specified key * - the authorisation key must be in the current task's keyrings * somewhere */ authkey = key_get_instantiation_authkey(id); if (IS_ERR(authkey)) { ret = PTR_ERR(authkey); goto error; } ret = keyctl_change_reqkey_auth(authkey); if (ret < 0) goto error; key_put(authkey); ret = authkey->serial; error: return ret; } /* * Get a key's the LSM security label. * * The key must grant the caller View permission for this to work. * * If there's a buffer, then up to buflen bytes of data will be placed into it. * * If successful, the amount of information available will be returned, * irrespective of how much was copied (including the terminal NUL). */ long keyctl_get_security(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key, *instkey; key_ref_t key_ref; char *context; long ret; key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); if (IS_ERR(key_ref)) { if (PTR_ERR(key_ref) != -EACCES) return PTR_ERR(key_ref); /* viewing a key under construction is also permitted if we * have the authorisation token handy */ instkey = key_get_instantiation_authkey(keyid); if (IS_ERR(instkey)) return PTR_ERR(instkey); key_put(instkey); key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0); if (IS_ERR(key_ref)) return PTR_ERR(key_ref); } key = key_ref_to_ptr(key_ref); ret = security_key_getsecurity(key, &context); if (ret == 0) { /* if no information was returned, give userspace an empty * string */ ret = 1; if (buffer && buflen > 0 && copy_to_user(buffer, "", 1) != 0) ret = -EFAULT; } else if (ret > 0) { /* return as much data as there's room for */ if (buffer && buflen > 0) { if (buflen > ret) buflen = ret; if (copy_to_user(buffer, context, buflen) != 0) ret = -EFAULT; } kfree(context); } key_ref_put(key_ref); return ret; } /* * Attempt to install the calling process's session keyring on the process's * parent process. * * The keyring must exist and must grant the caller LINK permission, and the * parent process must be single-threaded and must have the same effective * ownership as this process and mustn't be SUID/SGID. * * The keyring will be emplaced on the parent when it next resumes userspace. * * If successful, 0 will be returned. */ long keyctl_session_to_parent(void) { struct task_struct *me, *parent; const struct cred *mycred, *pcred; struct callback_head *newwork, *oldwork; key_ref_t keyring_r; struct cred *cred; int ret; keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_NEED_LINK); if (IS_ERR(keyring_r)) return PTR_ERR(keyring_r); ret = -ENOMEM; /* our parent is going to need a new cred struct, a new tgcred struct * and new security data, so we allocate them here to prevent ENOMEM in * our parent */ cred = cred_alloc_blank(); if (!cred) goto error_keyring; newwork = &cred->rcu; cred->session_keyring = key_ref_to_ptr(keyring_r); keyring_r = NULL; init_task_work(newwork, key_change_session_keyring); me = current; rcu_read_lock(); write_lock_irq(&tasklist_lock); ret = -EPERM; oldwork = NULL; parent = me->real_parent; /* the parent mustn't be init and mustn't be a kernel thread */ if (parent->pid <= 1 || !parent->mm) goto unlock; /* the parent must be single threaded */ if (!thread_group_empty(parent)) goto unlock; /* the parent and the child must have different session keyrings or * there's no point */ mycred = current_cred(); pcred = __task_cred(parent); if (mycred == pcred || mycred->session_keyring == pcred->session_keyring) { ret = 0; goto unlock; } /* the parent must have the same effective ownership and mustn't be * SUID/SGID */ if (!uid_eq(pcred->uid, mycred->euid) || !uid_eq(pcred->euid, mycred->euid) || !uid_eq(pcred->suid, mycred->euid) || !gid_eq(pcred->gid, mycred->egid) || !gid_eq(pcred->egid, mycred->egid) || !gid_eq(pcred->sgid, mycred->egid)) goto unlock; /* the keyrings must have the same UID */ if ((pcred->session_keyring && !uid_eq(pcred->session_keyring->uid, mycred->euid)) || !uid_eq(mycred->session_keyring->uid, mycred->euid)) goto unlock; /* cancel an already pending keyring replacement */ oldwork = task_work_cancel(parent, key_change_session_keyring); /* the replacement session keyring is applied just prior to userspace * restarting */ ret = task_work_add(parent, newwork, true); if (!ret) newwork = NULL; unlock: write_unlock_irq(&tasklist_lock); rcu_read_unlock(); if (oldwork) put_cred(container_of(oldwork, struct cred, rcu)); if (newwork) put_cred(cred); return ret; error_keyring: key_ref_put(keyring_r); return ret; } /* * The key control system call */ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5) { switch (option) { case KEYCTL_GET_KEYRING_ID: return keyctl_get_keyring_ID((key_serial_t) arg2, (int) arg3); case KEYCTL_JOIN_SESSION_KEYRING: return keyctl_join_session_keyring((const char __user *) arg2); case KEYCTL_UPDATE: return keyctl_update_key((key_serial_t) arg2, (const void __user *) arg3, (size_t) arg4); case KEYCTL_REVOKE: return keyctl_revoke_key((key_serial_t) arg2); case KEYCTL_DESCRIBE: return keyctl_describe_key((key_serial_t) arg2, (char __user *) arg3, (unsigned) arg4); case KEYCTL_CLEAR: return keyctl_keyring_clear((key_serial_t) arg2); case KEYCTL_LINK: return keyctl_keyring_link((key_serial_t) arg2, (key_serial_t) arg3); case KEYCTL_UNLINK: return keyctl_keyring_unlink((key_serial_t) arg2, (key_serial_t) arg3); case KEYCTL_SEARCH: return keyctl_keyring_search((key_serial_t) arg2, (const char __user *) arg3, (const char __user *) arg4, (key_serial_t) arg5); case KEYCTL_READ: return keyctl_read_key((key_serial_t) arg2, (char __user *) arg3, (size_t) arg4); case KEYCTL_CHOWN: return keyctl_chown_key((key_serial_t) arg2, (uid_t) arg3, (gid_t) arg4); case KEYCTL_SETPERM: return keyctl_setperm_key((key_serial_t) arg2, (key_perm_t) arg3); case KEYCTL_INSTANTIATE: return keyctl_instantiate_key((key_serial_t) arg2, (const void __user *) arg3, (size_t) arg4, (key_serial_t) arg5); case KEYCTL_NEGATE: return keyctl_negate_key((key_serial_t) arg2, (unsigned) arg3, (key_serial_t) arg4); case KEYCTL_SET_REQKEY_KEYRING: return keyctl_set_reqkey_keyring(arg2); case KEYCTL_SET_TIMEOUT: return keyctl_set_timeout((key_serial_t) arg2, (unsigned) arg3); case KEYCTL_ASSUME_AUTHORITY: return keyctl_assume_authority((key_serial_t) arg2); case KEYCTL_GET_SECURITY: return keyctl_get_security((key_serial_t) arg2, (char __user *) arg3, (size_t) arg4); case KEYCTL_SESSION_TO_PARENT: return keyctl_session_to_parent(); case KEYCTL_REJECT: return keyctl_reject_key((key_serial_t) arg2, (unsigned) arg3, (unsigned) arg4, (key_serial_t) arg5); case KEYCTL_INSTANTIATE_IOV: return keyctl_instantiate_key_iov( (key_serial_t) arg2, (const struct iovec __user *) arg3, (unsigned) arg4, (key_serial_t) arg5); case KEYCTL_INVALIDATE: return keyctl_invalidate_key((key_serial_t) arg2); case KEYCTL_GET_PERSISTENT: return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3); case KEYCTL_DH_COMPUTE: return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2, (char __user *) arg3, (size_t) arg4, (void __user *) arg5); default: return -EOPNOTSUPP; } }
./CrossVul/dataset_final_sorted/CWE-404/c/good_3267_0
crossvul-cpp_data_bad_3351_3
/* * XDR support for nfsd/protocol version 3. * * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> * * 2003-08-09 Jamie Lokier: Use htonl() for nanoseconds, not htons()! */ #include <linux/namei.h> #include <linux/sunrpc/svc_xprt.h> #include "xdr3.h" #include "auth.h" #include "netns.h" #include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_XDR /* * Mapping of S_IF* types to NFS file types */ static u32 nfs3_ftypes[] = { NF3NON, NF3FIFO, NF3CHR, NF3BAD, NF3DIR, NF3BAD, NF3BLK, NF3BAD, NF3REG, NF3BAD, NF3LNK, NF3BAD, NF3SOCK, NF3BAD, NF3LNK, NF3BAD, }; /* * XDR functions for basic NFS types */ static __be32 * encode_time3(__be32 *p, struct timespec *time) { *p++ = htonl((u32) time->tv_sec); *p++ = htonl(time->tv_nsec); return p; } static __be32 * decode_time3(__be32 *p, struct timespec *time) { time->tv_sec = ntohl(*p++); time->tv_nsec = ntohl(*p++); return p; } static __be32 * decode_fh(__be32 *p, struct svc_fh *fhp) { unsigned int size; fh_init(fhp, NFS3_FHSIZE); size = ntohl(*p++); if (size > NFS3_FHSIZE) return NULL; memcpy(&fhp->fh_handle.fh_base, p, size); fhp->fh_handle.fh_size = size; return p + XDR_QUADLEN(size); } /* Helper function for NFSv3 ACL code */ __be32 *nfs3svc_decode_fh(__be32 *p, struct svc_fh *fhp) { return decode_fh(p, fhp); } static __be32 * encode_fh(__be32 *p, struct svc_fh *fhp) { unsigned int size = fhp->fh_handle.fh_size; *p++ = htonl(size); if (size) p[XDR_QUADLEN(size)-1]=0; memcpy(p, &fhp->fh_handle.fh_base, size); return p + XDR_QUADLEN(size); } /* * Decode a file name and make sure that the path contains * no slashes or null bytes. */ static __be32 * decode_filename(__be32 *p, char **namp, unsigned int *lenp) { char *name; unsigned int i; if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS3_MAXNAMLEN)) != NULL) { for (i = 0, name = *namp; i < *lenp; i++, name++) { if (*name == '\0' || *name == '/') return NULL; } } return p; } static __be32 * decode_sattr3(__be32 *p, struct iattr *iap) { u32 tmp; iap->ia_valid = 0; if (*p++) { iap->ia_valid |= ATTR_MODE; iap->ia_mode = ntohl(*p++); } if (*p++) { iap->ia_uid = make_kuid(&init_user_ns, ntohl(*p++)); if (uid_valid(iap->ia_uid)) iap->ia_valid |= ATTR_UID; } if (*p++) { iap->ia_gid = make_kgid(&init_user_ns, ntohl(*p++)); if (gid_valid(iap->ia_gid)) iap->ia_valid |= ATTR_GID; } if (*p++) { u64 newsize; iap->ia_valid |= ATTR_SIZE; p = xdr_decode_hyper(p, &newsize); iap->ia_size = min_t(u64, newsize, NFS_OFFSET_MAX); } if ((tmp = ntohl(*p++)) == 1) { /* set to server time */ iap->ia_valid |= ATTR_ATIME; } else if (tmp == 2) { /* set to client time */ iap->ia_valid |= ATTR_ATIME | ATTR_ATIME_SET; iap->ia_atime.tv_sec = ntohl(*p++); iap->ia_atime.tv_nsec = ntohl(*p++); } if ((tmp = ntohl(*p++)) == 1) { /* set to server time */ iap->ia_valid |= ATTR_MTIME; } else if (tmp == 2) { /* set to client time */ iap->ia_valid |= ATTR_MTIME | ATTR_MTIME_SET; iap->ia_mtime.tv_sec = ntohl(*p++); iap->ia_mtime.tv_nsec = ntohl(*p++); } return p; } static __be32 *encode_fsid(__be32 *p, struct svc_fh *fhp) { u64 f; switch(fsid_source(fhp)) { default: case FSIDSOURCE_DEV: p = xdr_encode_hyper(p, (u64)huge_encode_dev (fhp->fh_dentry->d_sb->s_dev)); break; case FSIDSOURCE_FSID: p = xdr_encode_hyper(p, (u64) fhp->fh_export->ex_fsid); break; case FSIDSOURCE_UUID: f = ((u64*)fhp->fh_export->ex_uuid)[0]; f ^= ((u64*)fhp->fh_export->ex_uuid)[1]; p = xdr_encode_hyper(p, f); break; } return p; } static __be32 * encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat) { *p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]); *p++ = htonl((u32) (stat->mode & S_IALLUGO)); *p++ = htonl((u32) stat->nlink); *p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid)); *p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid)); if (S_ISLNK(stat->mode) && stat->size > NFS3_MAXPATHLEN) { p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN); } else { p = xdr_encode_hyper(p, (u64) stat->size); } p = xdr_encode_hyper(p, ((u64)stat->blocks) << 9); *p++ = htonl((u32) MAJOR(stat->rdev)); *p++ = htonl((u32) MINOR(stat->rdev)); p = encode_fsid(p, fhp); p = xdr_encode_hyper(p, stat->ino); p = encode_time3(p, &stat->atime); p = encode_time3(p, &stat->mtime); p = encode_time3(p, &stat->ctime); return p; } static __be32 * encode_saved_post_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { /* Attributes to follow */ *p++ = xdr_one; return encode_fattr3(rqstp, p, fhp, &fhp->fh_post_attr); } /* * Encode post-operation attributes. * The inode may be NULL if the call failed because of a stale file * handle. In this case, no attributes are returned. */ static __be32 * encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { struct dentry *dentry = fhp->fh_dentry; if (dentry && d_really_is_positive(dentry)) { __be32 err; struct kstat stat; err = fh_getattr(fhp, &stat); if (!err) { *p++ = xdr_one; /* attributes follow */ lease_get_mtime(d_inode(dentry), &stat.mtime); return encode_fattr3(rqstp, p, fhp, &stat); } } *p++ = xdr_zero; return p; } /* Helper for NFSv3 ACLs */ __be32 * nfs3svc_encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { return encode_post_op_attr(rqstp, p, fhp); } /* * Enocde weak cache consistency data */ static __be32 * encode_wcc_data(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { struct dentry *dentry = fhp->fh_dentry; if (dentry && d_really_is_positive(dentry) && fhp->fh_post_saved) { if (fhp->fh_pre_saved) { *p++ = xdr_one; p = xdr_encode_hyper(p, (u64) fhp->fh_pre_size); p = encode_time3(p, &fhp->fh_pre_mtime); p = encode_time3(p, &fhp->fh_pre_ctime); } else { *p++ = xdr_zero; } return encode_saved_post_attr(rqstp, p, fhp); } /* no pre- or post-attrs */ *p++ = xdr_zero; return encode_post_op_attr(rqstp, p, fhp); } /* * Fill in the post_op attr for the wcc data */ void fill_post_wcc(struct svc_fh *fhp) { __be32 err; if (fhp->fh_post_saved) printk("nfsd: inode locked twice during operation.\n"); err = fh_getattr(fhp, &fhp->fh_post_attr); fhp->fh_post_change = d_inode(fhp->fh_dentry)->i_version; if (err) { fhp->fh_post_saved = false; /* Grab the ctime anyway - set_change_info might use it */ fhp->fh_post_attr.ctime = d_inode(fhp->fh_dentry)->i_ctime; } else fhp->fh_post_saved = true; } /* * XDR decode functions */ int nfs3svc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *args) { p = decode_fh(p, &args->fh); if (!p) return 0; return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_sattrargs *args) { p = decode_fh(p, &args->fh); if (!p) return 0; p = decode_sattr3(p, &args->attrs); if ((args->check_guard = ntohl(*p++)) != 0) { struct timespec time; p = decode_time3(p, &time); args->guardtime = time.tv_sec; } return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_diropargs *args) { if (!(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_accessargs *args) { p = decode_fh(p, &args->fh); if (!p) return 0; args->access = ntohl(*p++); return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readargs *args) { unsigned int len; int v; u32 max_blocksize = svc_max_payload(rqstp); p = decode_fh(p, &args->fh); if (!p) return 0; p = xdr_decode_hyper(p, &args->offset); args->count = ntohl(*p++); len = min(args->count, max_blocksize); /* set up the kvec */ v=0; while (len > 0) { struct page *p = *(rqstp->rq_next_page++); rqstp->rq_vec[v].iov_base = page_address(p); rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE); len -= rqstp->rq_vec[v].iov_len; v++; } args->vlen = v; return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_writeargs *args) { unsigned int len, v, hdr, dlen; u32 max_blocksize = svc_max_payload(rqstp); struct kvec *head = rqstp->rq_arg.head; struct kvec *tail = rqstp->rq_arg.tail; p = decode_fh(p, &args->fh); if (!p) return 0; p = xdr_decode_hyper(p, &args->offset); args->count = ntohl(*p++); args->stable = ntohl(*p++); len = args->len = ntohl(*p++); if ((void *)p > head->iov_base + head->iov_len) return 0; /* * The count must equal the amount of data passed. */ if (args->count != args->len) return 0; /* * Check to make sure that we got the right number of * bytes. */ hdr = (void*)p - head->iov_base; dlen = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len - hdr; /* * Round the length of the data which was specified up to * the next multiple of XDR units and then compare that * against the length which was actually received. * Note that when RPCSEC/GSS (for example) is used, the * data buffer can be padded so dlen might be larger * than required. It must never be smaller. */ if (dlen < XDR_QUADLEN(len)*4) return 0; if (args->count > max_blocksize) { args->count = max_blocksize; len = args->len = max_blocksize; } rqstp->rq_vec[0].iov_base = (void*)p; rqstp->rq_vec[0].iov_len = head->iov_len - hdr; v = 0; while (len > rqstp->rq_vec[v].iov_len) { len -= rqstp->rq_vec[v].iov_len; v++; rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_pages[v]); rqstp->rq_vec[v].iov_len = PAGE_SIZE; } rqstp->rq_vec[v].iov_len = len; args->vlen = v + 1; return 1; } int nfs3svc_decode_createargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_createargs *args) { if (!(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; switch (args->createmode = ntohl(*p++)) { case NFS3_CREATE_UNCHECKED: case NFS3_CREATE_GUARDED: p = decode_sattr3(p, &args->attrs); break; case NFS3_CREATE_EXCLUSIVE: args->verf = p; p += 2; break; default: return 0; } return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_mkdirargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_createargs *args) { if (!(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; p = decode_sattr3(p, &args->attrs); return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_symlinkargs *args) { unsigned int len, avail; char *old, *new; struct kvec *vec; if (!(p = decode_fh(p, &args->ffh)) || !(p = decode_filename(p, &args->fname, &args->flen)) ) return 0; p = decode_sattr3(p, &args->attrs); /* now decode the pathname, which might be larger than the first page. * As we have to check for nul's anyway, we copy it into a new page * This page appears in the rq_res.pages list, but as pages_len is always * 0, it won't get in the way */ len = ntohl(*p++); if (len == 0 || len > NFS3_MAXPATHLEN || len >= PAGE_SIZE) return 0; args->tname = new = page_address(*(rqstp->rq_next_page++)); args->tlen = len; /* first copy and check from the first page */ old = (char*)p; vec = &rqstp->rq_arg.head[0]; if ((void *)old > vec->iov_base + vec->iov_len) return 0; avail = vec->iov_len - (old - (char*)vec->iov_base); while (len && avail && *old) { *new++ = *old++; len--; avail--; } /* now copy next page if there is one */ if (len && !avail && rqstp->rq_arg.page_len) { avail = min_t(unsigned int, rqstp->rq_arg.page_len, PAGE_SIZE); old = page_address(rqstp->rq_arg.pages[0]); } while (len && avail && *old) { *new++ = *old++; len--; avail--; } *new = '\0'; if (len) return 0; return 1; } int nfs3svc_decode_mknodargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_mknodargs *args) { if (!(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; args->ftype = ntohl(*p++); if (args->ftype == NF3BLK || args->ftype == NF3CHR || args->ftype == NF3SOCK || args->ftype == NF3FIFO) p = decode_sattr3(p, &args->attrs); if (args->ftype == NF3BLK || args->ftype == NF3CHR) { args->major = ntohl(*p++); args->minor = ntohl(*p++); } return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_renameargs *args) { if (!(p = decode_fh(p, &args->ffh)) || !(p = decode_filename(p, &args->fname, &args->flen)) || !(p = decode_fh(p, &args->tfh)) || !(p = decode_filename(p, &args->tname, &args->tlen))) return 0; return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readlinkargs *args) { p = decode_fh(p, &args->fh); if (!p) return 0; args->buffer = page_address(*(rqstp->rq_next_page++)); return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_linkargs *args) { if (!(p = decode_fh(p, &args->ffh)) || !(p = decode_fh(p, &args->tfh)) || !(p = decode_filename(p, &args->tname, &args->tlen))) return 0; return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readdirargs *args) { p = decode_fh(p, &args->fh); if (!p) return 0; p = xdr_decode_hyper(p, &args->cookie); args->verf = p; p += 2; args->dircount = ~0; args->count = ntohl(*p++); args->count = min_t(u32, args->count, PAGE_SIZE); args->buffer = page_address(*(rqstp->rq_next_page++)); return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readdirargs *args) { int len; u32 max_blocksize = svc_max_payload(rqstp); p = decode_fh(p, &args->fh); if (!p) return 0; p = xdr_decode_hyper(p, &args->cookie); args->verf = p; p += 2; args->dircount = ntohl(*p++); args->count = ntohl(*p++); len = args->count = min(args->count, max_blocksize); while (len > 0) { struct page *p = *(rqstp->rq_next_page++); if (!args->buffer) args->buffer = page_address(p); len -= PAGE_SIZE; } return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_commitargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_commitargs *args) { p = decode_fh(p, &args->fh); if (!p) return 0; p = xdr_decode_hyper(p, &args->offset); args->count = ntohl(*p++); return xdr_argsize_check(rqstp, p); } /* * XDR encode functions */ /* * There must be an encoding function for void results so svc_process * will work properly. */ int nfs3svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_ressize_check(rqstp, p); } /* GETATTR */ int nfs3svc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_attrstat *resp) { if (resp->status == 0) { lease_get_mtime(d_inode(resp->fh.fh_dentry), &resp->stat.mtime); p = encode_fattr3(rqstp, p, &resp->fh, &resp->stat); } return xdr_ressize_check(rqstp, p); } /* SETATTR, REMOVE, RMDIR */ int nfs3svc_encode_wccstat(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_attrstat *resp) { p = encode_wcc_data(rqstp, p, &resp->fh); return xdr_ressize_check(rqstp, p); } /* LOOKUP */ int nfs3svc_encode_diropres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_diropres *resp) { if (resp->status == 0) { p = encode_fh(p, &resp->fh); p = encode_post_op_attr(rqstp, p, &resp->fh); } p = encode_post_op_attr(rqstp, p, &resp->dirfh); return xdr_ressize_check(rqstp, p); } /* ACCESS */ int nfs3svc_encode_accessres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_accessres *resp) { p = encode_post_op_attr(rqstp, p, &resp->fh); if (resp->status == 0) *p++ = htonl(resp->access); return xdr_ressize_check(rqstp, p); } /* READLINK */ int nfs3svc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readlinkres *resp) { p = encode_post_op_attr(rqstp, p, &resp->fh); if (resp->status == 0) { *p++ = htonl(resp->len); xdr_ressize_check(rqstp, p); rqstp->rq_res.page_len = resp->len; if (resp->len & 3) { /* need to pad the tail */ rqstp->rq_res.tail[0].iov_base = p; *p = 0; rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3); } return 1; } else return xdr_ressize_check(rqstp, p); } /* READ */ int nfs3svc_encode_readres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readres *resp) { p = encode_post_op_attr(rqstp, p, &resp->fh); if (resp->status == 0) { *p++ = htonl(resp->count); *p++ = htonl(resp->eof); *p++ = htonl(resp->count); /* xdr opaque count */ xdr_ressize_check(rqstp, p); /* now update rqstp->rq_res to reflect data as well */ rqstp->rq_res.page_len = resp->count; if (resp->count & 3) { /* need to pad the tail */ rqstp->rq_res.tail[0].iov_base = p; *p = 0; rqstp->rq_res.tail[0].iov_len = 4 - (resp->count & 3); } return 1; } else return xdr_ressize_check(rqstp, p); } /* WRITE */ int nfs3svc_encode_writeres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_writeres *resp) { struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); p = encode_wcc_data(rqstp, p, &resp->fh); if (resp->status == 0) { *p++ = htonl(resp->count); *p++ = htonl(resp->committed); *p++ = htonl(nn->nfssvc_boot.tv_sec); *p++ = htonl(nn->nfssvc_boot.tv_usec); } return xdr_ressize_check(rqstp, p); } /* CREATE, MKDIR, SYMLINK, MKNOD */ int nfs3svc_encode_createres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_diropres *resp) { if (resp->status == 0) { *p++ = xdr_one; p = encode_fh(p, &resp->fh); p = encode_post_op_attr(rqstp, p, &resp->fh); } p = encode_wcc_data(rqstp, p, &resp->dirfh); return xdr_ressize_check(rqstp, p); } /* RENAME */ int nfs3svc_encode_renameres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_renameres *resp) { p = encode_wcc_data(rqstp, p, &resp->ffh); p = encode_wcc_data(rqstp, p, &resp->tfh); return xdr_ressize_check(rqstp, p); } /* LINK */ int nfs3svc_encode_linkres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_linkres *resp) { p = encode_post_op_attr(rqstp, p, &resp->fh); p = encode_wcc_data(rqstp, p, &resp->tfh); return xdr_ressize_check(rqstp, p); } /* READDIR */ int nfs3svc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readdirres *resp) { p = encode_post_op_attr(rqstp, p, &resp->fh); if (resp->status == 0) { /* stupid readdir cookie */ memcpy(p, resp->verf, 8); p += 2; xdr_ressize_check(rqstp, p); if (rqstp->rq_res.head[0].iov_len + (2<<2) > PAGE_SIZE) return 1; /*No room for trailer */ rqstp->rq_res.page_len = (resp->count) << 2; /* add the 'tail' to the end of the 'head' page - page 0. */ rqstp->rq_res.tail[0].iov_base = p; *p++ = 0; /* no more entries */ *p++ = htonl(resp->common.err == nfserr_eof); rqstp->rq_res.tail[0].iov_len = 2<<2; return 1; } else return xdr_ressize_check(rqstp, p); } static __be32 * encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen, u64 ino) { *p++ = xdr_one; /* mark entry present */ p = xdr_encode_hyper(p, ino); /* file id */ p = xdr_encode_array(p, name, namlen);/* name length & name */ cd->offset = p; /* remember pointer */ p = xdr_encode_hyper(p, NFS_OFFSET_MAX);/* offset of next entry */ return p; } static __be32 compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp, const char *name, int namlen, u64 ino) { struct svc_export *exp; struct dentry *dparent, *dchild; __be32 rv = nfserr_noent; dparent = cd->fh.fh_dentry; exp = cd->fh.fh_export; if (isdotent(name, namlen)) { if (namlen == 2) { dchild = dget_parent(dparent); /* filesystem root - cannot return filehandle for ".." */ if (dchild == dparent) goto out; } else dchild = dget(dparent); } else dchild = lookup_one_len_unlocked(name, dparent, namlen); if (IS_ERR(dchild)) return rv; if (d_mountpoint(dchild)) goto out; if (d_really_is_negative(dchild)) goto out; if (dchild->d_inode->i_ino != ino) goto out; rv = fh_compose(fhp, exp, dchild, &cd->fh); out: dput(dchild); return rv; } static __be32 *encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen, u64 ino) { struct svc_fh *fh = &cd->scratch; __be32 err; fh_init(fh, NFS3_FHSIZE); err = compose_entry_fh(cd, fh, name, namlen, ino); if (err) { *p++ = 0; *p++ = 0; goto out; } p = encode_post_op_attr(cd->rqstp, p, fh); *p++ = xdr_one; /* yes, a file handle follows */ p = encode_fh(p, fh); out: fh_put(fh); return p; } /* * Encode a directory entry. This one works for both normal readdir * and readdirplus. * The normal readdir reply requires 2 (fileid) + 1 (stringlen) * + string + 2 (cookie) + 1 (next) words, i.e. 6 + strlen. * * The readdirplus baggage is 1+21 words for post_op_attr, plus the * file handle. */ #define NFS3_ENTRY_BAGGAGE (2 + 1 + 2 + 1) #define NFS3_ENTRYPLUS_BAGGAGE (1 + 21 + 1 + (NFS3_FHSIZE >> 2)) static int encode_entry(struct readdir_cd *ccd, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type, int plus) { struct nfsd3_readdirres *cd = container_of(ccd, struct nfsd3_readdirres, common); __be32 *p = cd->buffer; caddr_t curr_page_addr = NULL; struct page ** page; int slen; /* string (name) length */ int elen; /* estimated entry length in words */ int num_entry_words = 0; /* actual number of words */ if (cd->offset) { u64 offset64 = offset; if (unlikely(cd->offset1)) { /* we ended up with offset on a page boundary */ *cd->offset = htonl(offset64 >> 32); *cd->offset1 = htonl(offset64 & 0xffffffff); cd->offset1 = NULL; } else { xdr_encode_hyper(cd->offset, offset64); } } /* dprintk("encode_entry(%.*s @%ld%s)\n", namlen, name, (long) offset, plus? " plus" : ""); */ /* truncate filename if too long */ namlen = min(namlen, NFS3_MAXNAMLEN); slen = XDR_QUADLEN(namlen); elen = slen + NFS3_ENTRY_BAGGAGE + (plus? NFS3_ENTRYPLUS_BAGGAGE : 0); if (cd->buflen < elen) { cd->common.err = nfserr_toosmall; return -EINVAL; } /* determine which page in rq_respages[] we are currently filling */ for (page = cd->rqstp->rq_respages + 1; page < cd->rqstp->rq_next_page; page++) { curr_page_addr = page_address(*page); if (((caddr_t)cd->buffer >= curr_page_addr) && ((caddr_t)cd->buffer < curr_page_addr + PAGE_SIZE)) break; } if ((caddr_t)(cd->buffer + elen) < (curr_page_addr + PAGE_SIZE)) { /* encode entry in current page */ p = encode_entry_baggage(cd, p, name, namlen, ino); if (plus) p = encode_entryplus_baggage(cd, p, name, namlen, ino); num_entry_words = p - cd->buffer; } else if (*(page+1) != NULL) { /* temporarily encode entry into next page, then move back to * current and next page in rq_respages[] */ __be32 *p1, *tmp; int len1, len2; /* grab next page for temporary storage of entry */ p1 = tmp = page_address(*(page+1)); p1 = encode_entry_baggage(cd, p1, name, namlen, ino); if (plus) p1 = encode_entryplus_baggage(cd, p1, name, namlen, ino); /* determine entry word length and lengths to go in pages */ num_entry_words = p1 - tmp; len1 = curr_page_addr + PAGE_SIZE - (caddr_t)cd->buffer; if ((num_entry_words << 2) < len1) { /* the actual number of words in the entry is less * than elen and can still fit in the current page */ memmove(p, tmp, num_entry_words << 2); p += num_entry_words; /* update offset */ cd->offset = cd->buffer + (cd->offset - tmp); } else { unsigned int offset_r = (cd->offset - tmp) << 2; /* update pointer to offset location. * This is a 64bit quantity, so we need to * deal with 3 cases: * - entirely in first page * - entirely in second page * - 4 bytes in each page */ if (offset_r + 8 <= len1) { cd->offset = p + (cd->offset - tmp); } else if (offset_r >= len1) { cd->offset -= len1 >> 2; } else { /* sitting on the fence */ BUG_ON(offset_r != len1 - 4); cd->offset = p + (cd->offset - tmp); cd->offset1 = tmp; } len2 = (num_entry_words << 2) - len1; /* move from temp page to current and next pages */ memmove(p, tmp, len1); memmove(tmp, (caddr_t)tmp+len1, len2); p = tmp + (len2 >> 2); } } else { cd->common.err = nfserr_toosmall; return -EINVAL; } cd->buflen -= num_entry_words; cd->buffer = p; cd->common.err = nfs_ok; return 0; } int nfs3svc_encode_entry(void *cd, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { return encode_entry(cd, name, namlen, offset, ino, d_type, 0); } int nfs3svc_encode_entry_plus(void *cd, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { return encode_entry(cd, name, namlen, offset, ino, d_type, 1); } /* FSSTAT */ int nfs3svc_encode_fsstatres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_fsstatres *resp) { struct kstatfs *s = &resp->stats; u64 bs = s->f_bsize; *p++ = xdr_zero; /* no post_op_attr */ if (resp->status == 0) { p = xdr_encode_hyper(p, bs * s->f_blocks); /* total bytes */ p = xdr_encode_hyper(p, bs * s->f_bfree); /* free bytes */ p = xdr_encode_hyper(p, bs * s->f_bavail); /* user available bytes */ p = xdr_encode_hyper(p, s->f_files); /* total inodes */ p = xdr_encode_hyper(p, s->f_ffree); /* free inodes */ p = xdr_encode_hyper(p, s->f_ffree); /* user available inodes */ *p++ = htonl(resp->invarsec); /* mean unchanged time */ } return xdr_ressize_check(rqstp, p); } /* FSINFO */ int nfs3svc_encode_fsinfores(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_fsinfores *resp) { *p++ = xdr_zero; /* no post_op_attr */ if (resp->status == 0) { *p++ = htonl(resp->f_rtmax); *p++ = htonl(resp->f_rtpref); *p++ = htonl(resp->f_rtmult); *p++ = htonl(resp->f_wtmax); *p++ = htonl(resp->f_wtpref); *p++ = htonl(resp->f_wtmult); *p++ = htonl(resp->f_dtpref); p = xdr_encode_hyper(p, resp->f_maxfilesize); *p++ = xdr_one; *p++ = xdr_zero; *p++ = htonl(resp->f_properties); } return xdr_ressize_check(rqstp, p); } /* PATHCONF */ int nfs3svc_encode_pathconfres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_pathconfres *resp) { *p++ = xdr_zero; /* no post_op_attr */ if (resp->status == 0) { *p++ = htonl(resp->p_link_max); *p++ = htonl(resp->p_name_max); *p++ = htonl(resp->p_no_trunc); *p++ = htonl(resp->p_chown_restricted); *p++ = htonl(resp->p_case_insensitive); *p++ = htonl(resp->p_case_preserving); } return xdr_ressize_check(rqstp, p); } /* COMMIT */ int nfs3svc_encode_commitres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_commitres *resp) { struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); p = encode_wcc_data(rqstp, p, &resp->fh); /* Write verifier */ if (resp->status == 0) { *p++ = htonl(nn->nfssvc_boot.tv_sec); *p++ = htonl(nn->nfssvc_boot.tv_usec); } return xdr_ressize_check(rqstp, p); } /* * XDR release functions */ int nfs3svc_release_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_attrstat *resp) { fh_put(&resp->fh); return 1; } int nfs3svc_release_fhandle2(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_fhandle_pair *resp) { fh_put(&resp->fh1); fh_put(&resp->fh2); return 1; }
./CrossVul/dataset_final_sorted/CWE-404/c/bad_3351_3
crossvul-cpp_data_bad_3320_0
/* * hid-cp2112.c - Silicon Labs HID USB to SMBus master bridge * Copyright (c) 2013,2014 Uplogix, Inc. * David Barksdale <dbarksdale@uplogix.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ /* * The Silicon Labs CP2112 chip is a USB HID device which provides an * SMBus controller for talking to slave devices and 8 GPIO pins. The * host communicates with the CP2112 via raw HID reports. * * Data Sheet: * http://www.silabs.com/Support%20Documents/TechnicalDocs/CP2112.pdf * Programming Interface Specification: * http://www.silabs.com/Support%20Documents/TechnicalDocs/AN495.pdf */ #include <linux/gpio.h> #include <linux/gpio/driver.h> #include <linux/hid.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/nls.h> #include <linux/usb/ch9.h> #include "hid-ids.h" #define CP2112_REPORT_MAX_LENGTH 64 #define CP2112_GPIO_CONFIG_LENGTH 5 #define CP2112_GPIO_GET_LENGTH 2 #define CP2112_GPIO_SET_LENGTH 3 enum { CP2112_GPIO_CONFIG = 0x02, CP2112_GPIO_GET = 0x03, CP2112_GPIO_SET = 0x04, CP2112_GET_VERSION_INFO = 0x05, CP2112_SMBUS_CONFIG = 0x06, CP2112_DATA_READ_REQUEST = 0x10, CP2112_DATA_WRITE_READ_REQUEST = 0x11, CP2112_DATA_READ_FORCE_SEND = 0x12, CP2112_DATA_READ_RESPONSE = 0x13, CP2112_DATA_WRITE_REQUEST = 0x14, CP2112_TRANSFER_STATUS_REQUEST = 0x15, CP2112_TRANSFER_STATUS_RESPONSE = 0x16, CP2112_CANCEL_TRANSFER = 0x17, CP2112_LOCK_BYTE = 0x20, CP2112_USB_CONFIG = 0x21, CP2112_MANUFACTURER_STRING = 0x22, CP2112_PRODUCT_STRING = 0x23, CP2112_SERIAL_STRING = 0x24, }; enum { STATUS0_IDLE = 0x00, STATUS0_BUSY = 0x01, STATUS0_COMPLETE = 0x02, STATUS0_ERROR = 0x03, }; enum { STATUS1_TIMEOUT_NACK = 0x00, STATUS1_TIMEOUT_BUS = 0x01, STATUS1_ARBITRATION_LOST = 0x02, STATUS1_READ_INCOMPLETE = 0x03, STATUS1_WRITE_INCOMPLETE = 0x04, STATUS1_SUCCESS = 0x05, }; struct cp2112_smbus_config_report { u8 report; /* CP2112_SMBUS_CONFIG */ __be32 clock_speed; /* Hz */ u8 device_address; /* Stored in the upper 7 bits */ u8 auto_send_read; /* 1 = enabled, 0 = disabled */ __be16 write_timeout; /* ms, 0 = no timeout */ __be16 read_timeout; /* ms, 0 = no timeout */ u8 scl_low_timeout; /* 1 = enabled, 0 = disabled */ __be16 retry_time; /* # of retries, 0 = no limit */ } __packed; struct cp2112_usb_config_report { u8 report; /* CP2112_USB_CONFIG */ __le16 vid; /* Vendor ID */ __le16 pid; /* Product ID */ u8 max_power; /* Power requested in 2mA units */ u8 power_mode; /* 0x00 = bus powered 0x01 = self powered & regulator off 0x02 = self powered & regulator on */ u8 release_major; u8 release_minor; u8 mask; /* What fields to program */ } __packed; struct cp2112_read_req_report { u8 report; /* CP2112_DATA_READ_REQUEST */ u8 slave_address; __be16 length; } __packed; struct cp2112_write_read_req_report { u8 report; /* CP2112_DATA_WRITE_READ_REQUEST */ u8 slave_address; __be16 length; u8 target_address_length; u8 target_address[16]; } __packed; struct cp2112_write_req_report { u8 report; /* CP2112_DATA_WRITE_REQUEST */ u8 slave_address; u8 length; u8 data[61]; } __packed; struct cp2112_force_read_report { u8 report; /* CP2112_DATA_READ_FORCE_SEND */ __be16 length; } __packed; struct cp2112_xfer_status_report { u8 report; /* CP2112_TRANSFER_STATUS_RESPONSE */ u8 status0; /* STATUS0_* */ u8 status1; /* STATUS1_* */ __be16 retries; __be16 length; } __packed; struct cp2112_string_report { u8 dummy; /* force .string to be aligned */ u8 report; /* CP2112_*_STRING */ u8 length; /* length in bytes of everyting after .report */ u8 type; /* USB_DT_STRING */ wchar_t string[30]; /* UTF16_LITTLE_ENDIAN string */ } __packed; /* Number of times to request transfer status before giving up waiting for a transfer to complete. This may need to be changed if SMBUS clock, retries, or read/write/scl_low timeout settings are changed. */ static const int XFER_STATUS_RETRIES = 10; /* Time in ms to wait for a CP2112_DATA_READ_RESPONSE or CP2112_TRANSFER_STATUS_RESPONSE. */ static const int RESPONSE_TIMEOUT = 50; static const struct hid_device_id cp2112_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, { } }; MODULE_DEVICE_TABLE(hid, cp2112_devices); struct cp2112_device { struct i2c_adapter adap; struct hid_device *hdev; wait_queue_head_t wait; u8 read_data[61]; u8 read_length; u8 hwversion; int xfer_status; atomic_t read_avail; atomic_t xfer_avail; struct gpio_chip gc; u8 *in_out_buffer; spinlock_t lock; struct gpio_desc *desc[8]; bool gpio_poll; struct delayed_work gpio_poll_worker; unsigned long irq_mask; u8 gpio_prev_state; }; static int gpio_push_pull = 0xFF; module_param(gpio_push_pull, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(gpio_push_pull, "GPIO push-pull configuration bitmask"); static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; unsigned long flags; int ret; spin_lock_irqsave(&dev->lock, flags); ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error requesting GPIO config: %d\n", ret); goto exit; } buf[1] &= ~(1 << offset); buf[2] = gpio_push_pull; ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret < 0) { hid_err(hdev, "error setting GPIO config: %d\n", ret); goto exit; } ret = 0; exit: spin_unlock_irqrestore(&dev->lock, flags); return ret <= 0 ? ret : -EIO; } static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; unsigned long flags; int ret; spin_lock_irqsave(&dev->lock, flags); buf[0] = CP2112_GPIO_SET; buf[1] = value ? 0xff : 0; buf[2] = 1 << offset; ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret < 0) hid_err(hdev, "error setting GPIO values: %d\n", ret); spin_unlock_irqrestore(&dev->lock, flags); } static int cp2112_gpio_get_all(struct gpio_chip *chip) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; unsigned long flags; int ret; spin_lock_irqsave(&dev->lock, flags); ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret != CP2112_GPIO_GET_LENGTH) { hid_err(hdev, "error requesting GPIO values: %d\n", ret); ret = ret < 0 ? ret : -EIO; goto exit; } ret = buf[1]; exit: spin_unlock_irqrestore(&dev->lock, flags); return ret; } static int cp2112_gpio_get(struct gpio_chip *chip, unsigned int offset) { int ret; ret = cp2112_gpio_get_all(chip); if (ret < 0) return ret; return (ret >> offset) & 1; } static int cp2112_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; unsigned long flags; int ret; spin_lock_irqsave(&dev->lock, flags); ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error requesting GPIO config: %d\n", ret); goto fail; } buf[1] |= 1 << offset; buf[2] = gpio_push_pull; ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret < 0) { hid_err(hdev, "error setting GPIO config: %d\n", ret); goto fail; } spin_unlock_irqrestore(&dev->lock, flags); /* * Set gpio value when output direction is already set, * as specified in AN495, Rev. 0.2, cpt. 4.4 */ cp2112_gpio_set(chip, offset, value); return 0; fail: spin_unlock_irqrestore(&dev->lock, flags); return ret < 0 ? ret : -EIO; } static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number, u8 *data, size_t count, unsigned char report_type) { u8 *buf; int ret; buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; ret = hid_hw_raw_request(hdev, report_number, buf, count, report_type, HID_REQ_GET_REPORT); memcpy(data, buf, count); kfree(buf); return ret; } static int cp2112_hid_output(struct hid_device *hdev, u8 *data, size_t count, unsigned char report_type) { u8 *buf; int ret; buf = kmemdup(data, count, GFP_KERNEL); if (!buf) return -ENOMEM; if (report_type == HID_OUTPUT_REPORT) ret = hid_hw_output_report(hdev, buf, count); else ret = hid_hw_raw_request(hdev, buf[0], buf, count, report_type, HID_REQ_SET_REPORT); kfree(buf); return ret; } static int cp2112_wait(struct cp2112_device *dev, atomic_t *avail) { int ret = 0; /* We have sent either a CP2112_TRANSFER_STATUS_REQUEST or a * CP2112_DATA_READ_FORCE_SEND and we are waiting for the response to * come in cp2112_raw_event or timeout. There will only be one of these * in flight at any one time. The timeout is extremely large and is a * last resort if the CP2112 has died. If we do timeout we don't expect * to receive the response which would cause data races, it's not like * we can do anything about it anyway. */ ret = wait_event_interruptible_timeout(dev->wait, atomic_read(avail), msecs_to_jiffies(RESPONSE_TIMEOUT)); if (-ERESTARTSYS == ret) return ret; if (!ret) return -ETIMEDOUT; atomic_set(avail, 0); return 0; } static int cp2112_xfer_status(struct cp2112_device *dev) { struct hid_device *hdev = dev->hdev; u8 buf[2]; int ret; buf[0] = CP2112_TRANSFER_STATUS_REQUEST; buf[1] = 0x01; atomic_set(&dev->xfer_avail, 0); ret = cp2112_hid_output(hdev, buf, 2, HID_OUTPUT_REPORT); if (ret < 0) { hid_warn(hdev, "Error requesting status: %d\n", ret); return ret; } ret = cp2112_wait(dev, &dev->xfer_avail); if (ret) return ret; return dev->xfer_status; } static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size) { struct hid_device *hdev = dev->hdev; struct cp2112_force_read_report report; int ret; if (size > sizeof(dev->read_data)) size = sizeof(dev->read_data); report.report = CP2112_DATA_READ_FORCE_SEND; report.length = cpu_to_be16(size); atomic_set(&dev->read_avail, 0); ret = cp2112_hid_output(hdev, &report.report, sizeof(report), HID_OUTPUT_REPORT); if (ret < 0) { hid_warn(hdev, "Error requesting data: %d\n", ret); return ret; } ret = cp2112_wait(dev, &dev->read_avail); if (ret) return ret; hid_dbg(hdev, "read %d of %zd bytes requested\n", dev->read_length, size); if (size > dev->read_length) size = dev->read_length; memcpy(data, dev->read_data, size); return dev->read_length; } static int cp2112_read_req(void *buf, u8 slave_address, u16 length) { struct cp2112_read_req_report *report = buf; if (length < 1 || length > 512) return -EINVAL; report->report = CP2112_DATA_READ_REQUEST; report->slave_address = slave_address << 1; report->length = cpu_to_be16(length); return sizeof(*report); } static int cp2112_write_read_req(void *buf, u8 slave_address, u16 length, u8 command, u8 *data, u8 data_length) { struct cp2112_write_read_req_report *report = buf; if (length < 1 || length > 512 || data_length > sizeof(report->target_address) - 1) return -EINVAL; report->report = CP2112_DATA_WRITE_READ_REQUEST; report->slave_address = slave_address << 1; report->length = cpu_to_be16(length); report->target_address_length = data_length + 1; report->target_address[0] = command; memcpy(&report->target_address[1], data, data_length); return data_length + 6; } static int cp2112_write_req(void *buf, u8 slave_address, u8 command, u8 *data, u8 data_length) { struct cp2112_write_req_report *report = buf; if (data_length > sizeof(report->data) - 1) return -EINVAL; report->report = CP2112_DATA_WRITE_REQUEST; report->slave_address = slave_address << 1; report->length = data_length + 1; report->data[0] = command; memcpy(&report->data[1], data, data_length); return data_length + 4; } static int cp2112_i2c_write_req(void *buf, u8 slave_address, u8 *data, u8 data_length) { struct cp2112_write_req_report *report = buf; if (data_length > sizeof(report->data)) return -EINVAL; report->report = CP2112_DATA_WRITE_REQUEST; report->slave_address = slave_address << 1; report->length = data_length; memcpy(report->data, data, data_length); return data_length + 3; } static int cp2112_i2c_write_read_req(void *buf, u8 slave_address, u8 *addr, int addr_length, int read_length) { struct cp2112_write_read_req_report *report = buf; if (read_length < 1 || read_length > 512 || addr_length > sizeof(report->target_address)) return -EINVAL; report->report = CP2112_DATA_WRITE_READ_REQUEST; report->slave_address = slave_address << 1; report->length = cpu_to_be16(read_length); report->target_address_length = addr_length; memcpy(report->target_address, addr, addr_length); return addr_length + 5; } static int cp2112_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct cp2112_device *dev = (struct cp2112_device *)adap->algo_data; struct hid_device *hdev = dev->hdev; u8 buf[64]; ssize_t count; ssize_t read_length = 0; u8 *read_buf = NULL; unsigned int retries; int ret; hid_dbg(hdev, "I2C %d messages\n", num); if (num == 1) { if (msgs->flags & I2C_M_RD) { hid_dbg(hdev, "I2C read %#04x len %d\n", msgs->addr, msgs->len); read_length = msgs->len; read_buf = msgs->buf; count = cp2112_read_req(buf, msgs->addr, msgs->len); } else { hid_dbg(hdev, "I2C write %#04x len %d\n", msgs->addr, msgs->len); count = cp2112_i2c_write_req(buf, msgs->addr, msgs->buf, msgs->len); } if (count < 0) return count; } else if (dev->hwversion > 1 && /* no repeated start in rev 1 */ num == 2 && msgs[0].addr == msgs[1].addr && !(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD)) { hid_dbg(hdev, "I2C write-read %#04x wlen %d rlen %d\n", msgs[0].addr, msgs[0].len, msgs[1].len); read_length = msgs[1].len; read_buf = msgs[1].buf; count = cp2112_i2c_write_read_req(buf, msgs[0].addr, msgs[0].buf, msgs[0].len, msgs[1].len); if (count < 0) return count; } else { hid_err(hdev, "Multi-message I2C transactions not supported\n"); return -EOPNOTSUPP; } ret = hid_hw_power(hdev, PM_HINT_FULLON); if (ret < 0) { hid_err(hdev, "power management error: %d\n", ret); return ret; } ret = cp2112_hid_output(hdev, buf, count, HID_OUTPUT_REPORT); if (ret < 0) { hid_warn(hdev, "Error starting transaction: %d\n", ret); goto power_normal; } for (retries = 0; retries < XFER_STATUS_RETRIES; ++retries) { ret = cp2112_xfer_status(dev); if (-EBUSY == ret) continue; if (ret < 0) goto power_normal; break; } if (XFER_STATUS_RETRIES <= retries) { hid_warn(hdev, "Transfer timed out, cancelling.\n"); buf[0] = CP2112_CANCEL_TRANSFER; buf[1] = 0x01; ret = cp2112_hid_output(hdev, buf, 2, HID_OUTPUT_REPORT); if (ret < 0) hid_warn(hdev, "Error cancelling transaction: %d\n", ret); ret = -ETIMEDOUT; goto power_normal; } for (count = 0; count < read_length;) { ret = cp2112_read(dev, read_buf + count, read_length - count); if (ret < 0) goto power_normal; if (ret == 0) { hid_err(hdev, "read returned 0\n"); ret = -EIO; goto power_normal; } count += ret; if (count > read_length) { /* * The hardware returned too much data. * This is mostly harmless because cp2112_read() * has a limit check so didn't overrun our * buffer. Nevertheless, we return an error * because something is seriously wrong and * it shouldn't go unnoticed. */ hid_err(hdev, "long read: %d > %zd\n", ret, read_length - count + ret); ret = -EIO; goto power_normal; } } /* return the number of transferred messages */ ret = num; power_normal: hid_hw_power(hdev, PM_HINT_NORMAL); hid_dbg(hdev, "I2C transfer finished: %d\n", ret); return ret; } static int cp2112_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct cp2112_device *dev = (struct cp2112_device *)adap->algo_data; struct hid_device *hdev = dev->hdev; u8 buf[64]; __le16 word; ssize_t count; size_t read_length = 0; unsigned int retries; int ret; hid_dbg(hdev, "%s addr 0x%x flags 0x%x cmd 0x%x size %d\n", read_write == I2C_SMBUS_WRITE ? "write" : "read", addr, flags, command, size); switch (size) { case I2C_SMBUS_BYTE: read_length = 1; if (I2C_SMBUS_READ == read_write) count = cp2112_read_req(buf, addr, read_length); else count = cp2112_write_req(buf, addr, command, NULL, 0); break; case I2C_SMBUS_BYTE_DATA: read_length = 1; if (I2C_SMBUS_READ == read_write) count = cp2112_write_read_req(buf, addr, read_length, command, NULL, 0); else count = cp2112_write_req(buf, addr, command, &data->byte, 1); break; case I2C_SMBUS_WORD_DATA: read_length = 2; word = cpu_to_le16(data->word); if (I2C_SMBUS_READ == read_write) count = cp2112_write_read_req(buf, addr, read_length, command, NULL, 0); else count = cp2112_write_req(buf, addr, command, (u8 *)&word, 2); break; case I2C_SMBUS_PROC_CALL: size = I2C_SMBUS_WORD_DATA; read_write = I2C_SMBUS_READ; read_length = 2; word = cpu_to_le16(data->word); count = cp2112_write_read_req(buf, addr, read_length, command, (u8 *)&word, 2); break; case I2C_SMBUS_I2C_BLOCK_DATA: size = I2C_SMBUS_BLOCK_DATA; /* fallthrough */ case I2C_SMBUS_BLOCK_DATA: if (I2C_SMBUS_READ == read_write) { count = cp2112_write_read_req(buf, addr, I2C_SMBUS_BLOCK_MAX, command, NULL, 0); } else { count = cp2112_write_req(buf, addr, command, data->block, data->block[0] + 1); } break; case I2C_SMBUS_BLOCK_PROC_CALL: size = I2C_SMBUS_BLOCK_DATA; read_write = I2C_SMBUS_READ; count = cp2112_write_read_req(buf, addr, I2C_SMBUS_BLOCK_MAX, command, data->block, data->block[0] + 1); break; default: hid_warn(hdev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } if (count < 0) return count; ret = hid_hw_power(hdev, PM_HINT_FULLON); if (ret < 0) { hid_err(hdev, "power management error: %d\n", ret); return ret; } ret = cp2112_hid_output(hdev, buf, count, HID_OUTPUT_REPORT); if (ret < 0) { hid_warn(hdev, "Error starting transaction: %d\n", ret); goto power_normal; } for (retries = 0; retries < XFER_STATUS_RETRIES; ++retries) { ret = cp2112_xfer_status(dev); if (-EBUSY == ret) continue; if (ret < 0) goto power_normal; break; } if (XFER_STATUS_RETRIES <= retries) { hid_warn(hdev, "Transfer timed out, cancelling.\n"); buf[0] = CP2112_CANCEL_TRANSFER; buf[1] = 0x01; ret = cp2112_hid_output(hdev, buf, 2, HID_OUTPUT_REPORT); if (ret < 0) hid_warn(hdev, "Error cancelling transaction: %d\n", ret); ret = -ETIMEDOUT; goto power_normal; } if (I2C_SMBUS_WRITE == read_write) { ret = 0; goto power_normal; } if (I2C_SMBUS_BLOCK_DATA == size) read_length = ret; ret = cp2112_read(dev, buf, read_length); if (ret < 0) goto power_normal; if (ret != read_length) { hid_warn(hdev, "short read: %d < %zd\n", ret, read_length); ret = -EIO; goto power_normal; } switch (size) { case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: data->byte = buf[0]; break; case I2C_SMBUS_WORD_DATA: data->word = le16_to_cpup((__le16 *)buf); break; case I2C_SMBUS_BLOCK_DATA: if (read_length > I2C_SMBUS_BLOCK_MAX) { ret = -EPROTO; goto power_normal; } memcpy(data->block, buf, read_length); break; } ret = 0; power_normal: hid_hw_power(hdev, PM_HINT_NORMAL); hid_dbg(hdev, "transfer finished: %d\n", ret); return ret; } static u32 cp2112_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_I2C_BLOCK | I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_PROC_CALL; } static const struct i2c_algorithm smbus_algorithm = { .master_xfer = cp2112_i2c_xfer, .smbus_xfer = cp2112_xfer, .functionality = cp2112_functionality, }; static int cp2112_get_usb_config(struct hid_device *hdev, struct cp2112_usb_config_report *cfg) { int ret; ret = cp2112_hid_get(hdev, CP2112_USB_CONFIG, (u8 *)cfg, sizeof(*cfg), HID_FEATURE_REPORT); if (ret != sizeof(*cfg)) { hid_err(hdev, "error reading usb config: %d\n", ret); if (ret < 0) return ret; return -EIO; } return 0; } static int cp2112_set_usb_config(struct hid_device *hdev, struct cp2112_usb_config_report *cfg) { int ret; BUG_ON(cfg->report != CP2112_USB_CONFIG); ret = cp2112_hid_output(hdev, (u8 *)cfg, sizeof(*cfg), HID_FEATURE_REPORT); if (ret != sizeof(*cfg)) { hid_err(hdev, "error writing usb config: %d\n", ret); if (ret < 0) return ret; return -EIO; } return 0; } static void chmod_sysfs_attrs(struct hid_device *hdev); #define CP2112_CONFIG_ATTR(name, store, format, ...) \ static ssize_t name##_store(struct device *kdev, \ struct device_attribute *attr, const char *buf, \ size_t count) \ { \ struct hid_device *hdev = to_hid_device(kdev); \ struct cp2112_usb_config_report cfg; \ int ret = cp2112_get_usb_config(hdev, &cfg); \ if (ret) \ return ret; \ store; \ ret = cp2112_set_usb_config(hdev, &cfg); \ if (ret) \ return ret; \ chmod_sysfs_attrs(hdev); \ return count; \ } \ static ssize_t name##_show(struct device *kdev, \ struct device_attribute *attr, char *buf) \ { \ struct hid_device *hdev = to_hid_device(kdev); \ struct cp2112_usb_config_report cfg; \ int ret = cp2112_get_usb_config(hdev, &cfg); \ if (ret) \ return ret; \ return scnprintf(buf, PAGE_SIZE, format, ##__VA_ARGS__); \ } \ static DEVICE_ATTR_RW(name); CP2112_CONFIG_ATTR(vendor_id, ({ u16 vid; if (sscanf(buf, "%hi", &vid) != 1) return -EINVAL; cfg.vid = cpu_to_le16(vid); cfg.mask = 0x01; }), "0x%04x\n", le16_to_cpu(cfg.vid)); CP2112_CONFIG_ATTR(product_id, ({ u16 pid; if (sscanf(buf, "%hi", &pid) != 1) return -EINVAL; cfg.pid = cpu_to_le16(pid); cfg.mask = 0x02; }), "0x%04x\n", le16_to_cpu(cfg.pid)); CP2112_CONFIG_ATTR(max_power, ({ int mA; if (sscanf(buf, "%i", &mA) != 1) return -EINVAL; cfg.max_power = (mA + 1) / 2; cfg.mask = 0x04; }), "%u mA\n", cfg.max_power * 2); CP2112_CONFIG_ATTR(power_mode, ({ if (sscanf(buf, "%hhi", &cfg.power_mode) != 1) return -EINVAL; cfg.mask = 0x08; }), "%u\n", cfg.power_mode); CP2112_CONFIG_ATTR(release_version, ({ if (sscanf(buf, "%hhi.%hhi", &cfg.release_major, &cfg.release_minor) != 2) return -EINVAL; cfg.mask = 0x10; }), "%u.%u\n", cfg.release_major, cfg.release_minor); #undef CP2112_CONFIG_ATTR struct cp2112_pstring_attribute { struct device_attribute attr; unsigned char report; }; static ssize_t pstr_store(struct device *kdev, struct device_attribute *kattr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(kdev); struct cp2112_pstring_attribute *attr = container_of(kattr, struct cp2112_pstring_attribute, attr); struct cp2112_string_report report; int ret; memset(&report, 0, sizeof(report)); ret = utf8s_to_utf16s(buf, count, UTF16_LITTLE_ENDIAN, report.string, ARRAY_SIZE(report.string)); report.report = attr->report; report.length = ret * sizeof(report.string[0]) + 2; report.type = USB_DT_STRING; ret = cp2112_hid_output(hdev, &report.report, report.length + 1, HID_FEATURE_REPORT); if (ret != report.length + 1) { hid_err(hdev, "error writing %s string: %d\n", kattr->attr.name, ret); if (ret < 0) return ret; return -EIO; } chmod_sysfs_attrs(hdev); return count; } static ssize_t pstr_show(struct device *kdev, struct device_attribute *kattr, char *buf) { struct hid_device *hdev = to_hid_device(kdev); struct cp2112_pstring_attribute *attr = container_of(kattr, struct cp2112_pstring_attribute, attr); struct cp2112_string_report report; u8 length; int ret; ret = cp2112_hid_get(hdev, attr->report, &report.report, sizeof(report) - 1, HID_FEATURE_REPORT); if (ret < 3) { hid_err(hdev, "error reading %s string: %d\n", kattr->attr.name, ret); if (ret < 0) return ret; return -EIO; } if (report.length < 2) { hid_err(hdev, "invalid %s string length: %d\n", kattr->attr.name, report.length); return -EIO; } length = report.length > ret - 1 ? ret - 1 : report.length; length = (length - 2) / sizeof(report.string[0]); ret = utf16s_to_utf8s(report.string, length, UTF16_LITTLE_ENDIAN, buf, PAGE_SIZE - 1); buf[ret++] = '\n'; return ret; } #define CP2112_PSTR_ATTR(name, _report) \ static struct cp2112_pstring_attribute dev_attr_##name = { \ .attr = __ATTR(name, (S_IWUSR | S_IRUGO), pstr_show, pstr_store), \ .report = _report, \ }; CP2112_PSTR_ATTR(manufacturer, CP2112_MANUFACTURER_STRING); CP2112_PSTR_ATTR(product, CP2112_PRODUCT_STRING); CP2112_PSTR_ATTR(serial, CP2112_SERIAL_STRING); #undef CP2112_PSTR_ATTR static const struct attribute_group cp2112_attr_group = { .attrs = (struct attribute *[]){ &dev_attr_vendor_id.attr, &dev_attr_product_id.attr, &dev_attr_max_power.attr, &dev_attr_power_mode.attr, &dev_attr_release_version.attr, &dev_attr_manufacturer.attr.attr, &dev_attr_product.attr.attr, &dev_attr_serial.attr.attr, NULL } }; /* Chmoding our sysfs attributes is simply a way to expose which fields in the * PROM have already been programmed. We do not depend on this preventing * writing to these attributes since the CP2112 will simply ignore writes to * already-programmed fields. This is why there is no sense in fixing this * racy behaviour. */ static void chmod_sysfs_attrs(struct hid_device *hdev) { struct attribute **attr; u8 buf[2]; int ret; ret = cp2112_hid_get(hdev, CP2112_LOCK_BYTE, buf, sizeof(buf), HID_FEATURE_REPORT); if (ret != sizeof(buf)) { hid_err(hdev, "error reading lock byte: %d\n", ret); return; } for (attr = cp2112_attr_group.attrs; *attr; ++attr) { umode_t mode = (buf[1] & 1) ? S_IWUSR | S_IRUGO : S_IRUGO; ret = sysfs_chmod_file(&hdev->dev.kobj, *attr, mode); if (ret < 0) hid_err(hdev, "error chmoding sysfs file %s\n", (*attr)->name); buf[1] >>= 1; } } static void cp2112_gpio_irq_ack(struct irq_data *d) { } static void cp2112_gpio_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); __clear_bit(d->hwirq, &dev->irq_mask); } static void cp2112_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); __set_bit(d->hwirq, &dev->irq_mask); } static void cp2112_gpio_poll_callback(struct work_struct *work) { struct cp2112_device *dev = container_of(work, struct cp2112_device, gpio_poll_worker.work); struct irq_data *d; u8 gpio_mask; u8 virqs = (u8)dev->irq_mask; u32 irq_type; int irq, virq, ret; ret = cp2112_gpio_get_all(&dev->gc); if (ret == -ENODEV) /* the hardware has been disconnected */ return; if (ret < 0) goto exit; gpio_mask = ret; while (virqs) { virq = ffs(virqs) - 1; virqs &= ~BIT(virq); if (!dev->gc.to_irq) break; irq = dev->gc.to_irq(&dev->gc, virq); d = irq_get_irq_data(irq); if (!d) continue; irq_type = irqd_get_trigger_type(d); if (gpio_mask & BIT(virq)) { /* Level High */ if (irq_type & IRQ_TYPE_LEVEL_HIGH) handle_nested_irq(irq); if ((irq_type & IRQ_TYPE_EDGE_RISING) && !(dev->gpio_prev_state & BIT(virq))) handle_nested_irq(irq); } else { /* Level Low */ if (irq_type & IRQ_TYPE_LEVEL_LOW) handle_nested_irq(irq); if ((irq_type & IRQ_TYPE_EDGE_FALLING) && (dev->gpio_prev_state & BIT(virq))) handle_nested_irq(irq); } } dev->gpio_prev_state = gpio_mask; exit: if (dev->gpio_poll) schedule_delayed_work(&dev->gpio_poll_worker, 10); } static unsigned int cp2112_gpio_irq_startup(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); cp2112_gpio_direction_input(gc, d->hwirq); if (!dev->gpio_poll) { dev->gpio_poll = true; schedule_delayed_work(&dev->gpio_poll_worker, 0); } cp2112_gpio_irq_unmask(d); return 0; } static void cp2112_gpio_irq_shutdown(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); cancel_delayed_work_sync(&dev->gpio_poll_worker); } static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type) { return 0; } static struct irq_chip cp2112_gpio_irqchip = { .name = "cp2112-gpio", .irq_startup = cp2112_gpio_irq_startup, .irq_shutdown = cp2112_gpio_irq_shutdown, .irq_ack = cp2112_gpio_irq_ack, .irq_mask = cp2112_gpio_irq_mask, .irq_unmask = cp2112_gpio_irq_unmask, .irq_set_type = cp2112_gpio_irq_type, }; static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev, int pin) { int ret; if (dev->desc[pin]) return -EINVAL; dev->desc[pin] = gpiochip_request_own_desc(&dev->gc, pin, "HID/I2C:Event"); if (IS_ERR(dev->desc[pin])) { dev_err(dev->gc.parent, "Failed to request GPIO\n"); return PTR_ERR(dev->desc[pin]); } ret = gpiochip_lock_as_irq(&dev->gc, pin); if (ret) { dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n"); goto err_desc; } ret = gpiod_to_irq(dev->desc[pin]); if (ret < 0) { dev_err(dev->gc.parent, "Failed to translate GPIO to IRQ\n"); goto err_lock; } return ret; err_lock: gpiochip_unlock_as_irq(&dev->gc, pin); err_desc: gpiochip_free_own_desc(dev->desc[pin]); dev->desc[pin] = NULL; return ret; } static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct cp2112_device *dev; u8 buf[3]; struct cp2112_smbus_config_report config; int ret; dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->in_out_buffer = devm_kzalloc(&hdev->dev, CP2112_REPORT_MAX_LENGTH, GFP_KERNEL); if (!dev->in_out_buffer) return -ENOMEM; spin_lock_init(&dev->lock); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); return ret; } ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); if (ret) { hid_err(hdev, "hw start failed\n"); return ret; } ret = hid_hw_open(hdev); if (ret) { hid_err(hdev, "hw open failed\n"); goto err_hid_stop; } ret = hid_hw_power(hdev, PM_HINT_FULLON); if (ret < 0) { hid_err(hdev, "power management error: %d\n", ret); goto err_hid_close; } ret = cp2112_hid_get(hdev, CP2112_GET_VERSION_INFO, buf, sizeof(buf), HID_FEATURE_REPORT); if (ret != sizeof(buf)) { hid_err(hdev, "error requesting version\n"); if (ret >= 0) ret = -EIO; goto err_power_normal; } hid_info(hdev, "Part Number: 0x%02X Device Version: 0x%02X\n", buf[1], buf[2]); ret = cp2112_hid_get(hdev, CP2112_SMBUS_CONFIG, (u8 *)&config, sizeof(config), HID_FEATURE_REPORT); if (ret != sizeof(config)) { hid_err(hdev, "error requesting SMBus config\n"); if (ret >= 0) ret = -EIO; goto err_power_normal; } config.retry_time = cpu_to_be16(1); ret = cp2112_hid_output(hdev, (u8 *)&config, sizeof(config), HID_FEATURE_REPORT); if (ret != sizeof(config)) { hid_err(hdev, "error setting SMBus config\n"); if (ret >= 0) ret = -EIO; goto err_power_normal; } hid_set_drvdata(hdev, (void *)dev); dev->hdev = hdev; dev->adap.owner = THIS_MODULE; dev->adap.class = I2C_CLASS_HWMON; dev->adap.algo = &smbus_algorithm; dev->adap.algo_data = dev; dev->adap.dev.parent = &hdev->dev; snprintf(dev->adap.name, sizeof(dev->adap.name), "CP2112 SMBus Bridge on hiddev%d", hdev->minor); dev->hwversion = buf[2]; init_waitqueue_head(&dev->wait); hid_device_io_start(hdev); ret = i2c_add_adapter(&dev->adap); hid_device_io_stop(hdev); if (ret) { hid_err(hdev, "error registering i2c adapter\n"); goto err_power_normal; } hid_dbg(hdev, "adapter registered\n"); dev->gc.label = "cp2112_gpio"; dev->gc.direction_input = cp2112_gpio_direction_input; dev->gc.direction_output = cp2112_gpio_direction_output; dev->gc.set = cp2112_gpio_set; dev->gc.get = cp2112_gpio_get; dev->gc.base = -1; dev->gc.ngpio = 8; dev->gc.can_sleep = 1; dev->gc.parent = &hdev->dev; ret = gpiochip_add_data(&dev->gc, dev); if (ret < 0) { hid_err(hdev, "error registering gpio chip\n"); goto err_free_i2c; } ret = sysfs_create_group(&hdev->dev.kobj, &cp2112_attr_group); if (ret < 0) { hid_err(hdev, "error creating sysfs attrs\n"); goto err_gpiochip_remove; } chmod_sysfs_attrs(hdev); hid_hw_power(hdev, PM_HINT_NORMAL); ret = gpiochip_irqchip_add(&dev->gc, &cp2112_gpio_irqchip, 0, handle_simple_irq, IRQ_TYPE_NONE); if (ret) { dev_err(dev->gc.parent, "failed to add IRQ chip\n"); goto err_sysfs_remove; } return ret; err_sysfs_remove: sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group); err_gpiochip_remove: gpiochip_remove(&dev->gc); err_free_i2c: i2c_del_adapter(&dev->adap); err_power_normal: hid_hw_power(hdev, PM_HINT_NORMAL); err_hid_close: hid_hw_close(hdev); err_hid_stop: hid_hw_stop(hdev); return ret; } static void cp2112_remove(struct hid_device *hdev) { struct cp2112_device *dev = hid_get_drvdata(hdev); int i; sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group); i2c_del_adapter(&dev->adap); if (dev->gpio_poll) { dev->gpio_poll = false; cancel_delayed_work_sync(&dev->gpio_poll_worker); } for (i = 0; i < ARRAY_SIZE(dev->desc); i++) { gpiochip_unlock_as_irq(&dev->gc, i); gpiochip_free_own_desc(dev->desc[i]); } gpiochip_remove(&dev->gc); /* i2c_del_adapter has finished removing all i2c devices from our * adapter. Well behaved devices should no longer call our cp2112_xfer * and should have waited for any pending calls to finish. It has also * waited for device_unregister(&adap->dev) to complete. Therefore we * can safely free our struct cp2112_device. */ hid_hw_close(hdev); hid_hw_stop(hdev); } static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct cp2112_device *dev = hid_get_drvdata(hdev); struct cp2112_xfer_status_report *xfer = (void *)data; switch (data[0]) { case CP2112_TRANSFER_STATUS_RESPONSE: hid_dbg(hdev, "xfer status: %02x %02x %04x %04x\n", xfer->status0, xfer->status1, be16_to_cpu(xfer->retries), be16_to_cpu(xfer->length)); switch (xfer->status0) { case STATUS0_IDLE: dev->xfer_status = -EAGAIN; break; case STATUS0_BUSY: dev->xfer_status = -EBUSY; break; case STATUS0_COMPLETE: dev->xfer_status = be16_to_cpu(xfer->length); break; case STATUS0_ERROR: switch (xfer->status1) { case STATUS1_TIMEOUT_NACK: case STATUS1_TIMEOUT_BUS: dev->xfer_status = -ETIMEDOUT; break; default: dev->xfer_status = -EIO; break; } break; default: dev->xfer_status = -EINVAL; break; } atomic_set(&dev->xfer_avail, 1); break; case CP2112_DATA_READ_RESPONSE: hid_dbg(hdev, "read response: %02x %02x\n", data[1], data[2]); dev->read_length = data[2]; if (dev->read_length > sizeof(dev->read_data)) dev->read_length = sizeof(dev->read_data); memcpy(dev->read_data, &data[3], dev->read_length); atomic_set(&dev->read_avail, 1); break; default: hid_err(hdev, "unknown report\n"); return 0; } wake_up_interruptible(&dev->wait); return 1; } static struct hid_driver cp2112_driver = { .name = "cp2112", .id_table = cp2112_devices, .probe = cp2112_probe, .remove = cp2112_remove, .raw_event = cp2112_raw_event, }; module_hid_driver(cp2112_driver); MODULE_DESCRIPTION("Silicon Labs HID USB to SMBus master bridge"); MODULE_AUTHOR("David Barksdale <dbarksdale@uplogix.com>"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-404/c/bad_3320_0
crossvul-cpp_data_bad_2525_1
/************************************************* * Exim - an Internet mail transport agent * *************************************************/ /* Copyright (c) University of Cambridge 1995 - 2016 */ /* See the file NOTICE for conditions of use and distribution. */ /* The main function: entry point, initialization, and high-level control. Also a few functions that don't naturally fit elsewhere. */ #include "exim.h" #if defined(__GLIBC__) && !defined(__UCLIBC__) # include <gnu/libc-version.h> #endif #ifdef USE_GNUTLS # include <gnutls/gnutls.h> # if GNUTLS_VERSION_NUMBER < 0x030103 && !defined(DISABLE_OCSP) # define DISABLE_OCSP # endif #endif extern void init_lookup_list(void); /************************************************* * Function interface to store functions * *************************************************/ /* We need some real functions to pass to the PCRE regular expression library for store allocation via Exim's store manager. The normal calls are actually macros that pass over location information to make tracing easier. These functions just interface to the standard macro calls. A good compiler will optimize out the tail recursion and so not make them too expensive. There are two sets of functions; one for use when we want to retain the compiled regular expression for a long time; the other for short-term use. */ static void * function_store_get(size_t size) { return store_get((int)size); } static void function_dummy_free(void *block) { block = block; } static void * function_store_malloc(size_t size) { return store_malloc((int)size); } static void function_store_free(void *block) { store_free(block); } /************************************************* * Enums for cmdline interface * *************************************************/ enum commandline_info { CMDINFO_NONE=0, CMDINFO_HELP, CMDINFO_SIEVE, CMDINFO_DSCP }; /************************************************* * Compile regular expression and panic on fail * *************************************************/ /* This function is called when failure to compile a regular expression leads to a panic exit. In other cases, pcre_compile() is called directly. In many cases where this function is used, the results of the compilation are to be placed in long-lived store, so we temporarily reset the store management functions that PCRE uses if the use_malloc flag is set. Argument: pattern the pattern to compile caseless TRUE if caseless matching is required use_malloc TRUE if compile into malloc store Returns: pointer to the compiled pattern */ const pcre * regex_must_compile(const uschar *pattern, BOOL caseless, BOOL use_malloc) { int offset; int options = PCRE_COPT; const pcre *yield; const uschar *error; if (use_malloc) { pcre_malloc = function_store_malloc; pcre_free = function_store_free; } if (caseless) options |= PCRE_CASELESS; yield = pcre_compile(CCS pattern, options, (const char **)&error, &offset, NULL); pcre_malloc = function_store_get; pcre_free = function_dummy_free; if (yield == NULL) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "regular expression error: " "%s at offset %d while compiling %s", error, offset, pattern); return yield; } /************************************************* * Execute regular expression and set strings * *************************************************/ /* This function runs a regular expression match, and sets up the pointers to the matched substrings. Arguments: re the compiled expression subject the subject string options additional PCRE options setup if < 0 do full setup if >= 0 setup from setup+1 onwards, excluding the full matched string Returns: TRUE or FALSE */ BOOL regex_match_and_setup(const pcre *re, const uschar *subject, int options, int setup) { int ovector[3*(EXPAND_MAXN+1)]; uschar * s = string_copy(subject); /* de-constifying */ int n = pcre_exec(re, NULL, CS s, Ustrlen(s), 0, PCRE_EOPT | options, ovector, sizeof(ovector)/sizeof(int)); BOOL yield = n >= 0; if (n == 0) n = EXPAND_MAXN + 1; if (yield) { int nn; expand_nmax = (setup < 0)? 0 : setup + 1; for (nn = (setup < 0)? 0 : 2; nn < n*2; nn += 2) { expand_nstring[expand_nmax] = s + ovector[nn]; expand_nlength[expand_nmax++] = ovector[nn+1] - ovector[nn]; } expand_nmax--; } return yield; } /************************************************* * Set up processing details * *************************************************/ /* Save a text string for dumping when SIGUSR1 is received. Do checks for overruns. Arguments: format and arguments, as for printf() Returns: nothing */ void set_process_info(const char *format, ...) { int len = sprintf(CS process_info, "%5d ", (int)getpid()); va_list ap; va_start(ap, format); if (!string_vformat(process_info + len, PROCESS_INFO_SIZE - len - 2, format, ap)) Ustrcpy(process_info + len, "**** string overflowed buffer ****"); len = Ustrlen(process_info); process_info[len+0] = '\n'; process_info[len+1] = '\0'; process_info_len = len + 1; DEBUG(D_process_info) debug_printf("set_process_info: %s", process_info); va_end(ap); } /************************************************* * Handler for SIGUSR1 * *************************************************/ /* SIGUSR1 causes any exim process to write to the process log details of what it is currently doing. It will only be used if the OS is capable of setting up a handler that causes automatic restarting of any system call that is in progress at the time. This function takes care to be signal-safe. Argument: the signal number (SIGUSR1) Returns: nothing */ static void usr1_handler(int sig) { int fd; os_restarting_signal(sig, usr1_handler); fd = Uopen(process_log_path, O_APPEND|O_WRONLY, LOG_MODE); if (fd < 0) { /* If we are already running as the Exim user, try to create it in the current process (assuming spool_directory exists). Otherwise, if we are root, do the creation in an exim:exim subprocess. */ int euid = geteuid(); if (euid == exim_uid) fd = Uopen(process_log_path, O_CREAT|O_APPEND|O_WRONLY, LOG_MODE); else if (euid == root_uid) fd = log_create_as_exim(process_log_path); } /* If we are neither exim nor root, or if we failed to create the log file, give up. There is not much useful we can do with errors, since we don't want to disrupt whatever is going on outside the signal handler. */ if (fd < 0) return; (void)write(fd, process_info, process_info_len); (void)close(fd); } /************************************************* * Timeout handler * *************************************************/ /* This handler is enabled most of the time that Exim is running. The handler doesn't actually get used unless alarm() has been called to set a timer, to place a time limit on a system call of some kind. When the handler is run, it re-enables itself. There are some other SIGALRM handlers that are used in special cases when more than just a flag setting is required; for example, when reading a message's input. These are normally set up in the code module that uses them, and the SIGALRM handler is reset to this one afterwards. Argument: the signal value (SIGALRM) Returns: nothing */ void sigalrm_handler(int sig) { sig = sig; /* Keep picky compilers happy */ sigalrm_seen = TRUE; os_non_restarting_signal(SIGALRM, sigalrm_handler); } /************************************************* * Sleep for a fractional time interval * *************************************************/ /* This function is called by millisleep() and exim_wait_tick() to wait for a period of time that may include a fraction of a second. The coding is somewhat tedious. We do not expect setitimer() ever to fail, but if it does, the process will wait for ever, so we panic in this instance. (There was a case of this when a bug in a function that calls milliwait() caused it to pass invalid data. That's when I added the check. :-) We assume it to be not worth sleeping for under 100us; this value will require revisiting as hardware advances. This avoids the issue of a zero-valued timer setting meaning "never fire". Argument: an itimerval structure containing the interval Returns: nothing */ static void milliwait(struct itimerval *itval) { sigset_t sigmask; sigset_t old_sigmask; if (itval->it_value.tv_usec < 100 && itval->it_value.tv_sec == 0) return; (void)sigemptyset(&sigmask); /* Empty mask */ (void)sigaddset(&sigmask, SIGALRM); /* Add SIGALRM */ (void)sigprocmask(SIG_BLOCK, &sigmask, &old_sigmask); /* Block SIGALRM */ if (setitimer(ITIMER_REAL, itval, NULL) < 0) /* Start timer */ log_write(0, LOG_MAIN|LOG_PANIC_DIE, "setitimer() failed: %s", strerror(errno)); (void)sigfillset(&sigmask); /* All signals */ (void)sigdelset(&sigmask, SIGALRM); /* Remove SIGALRM */ (void)sigsuspend(&sigmask); /* Until SIGALRM */ (void)sigprocmask(SIG_SETMASK, &old_sigmask, NULL); /* Restore mask */ } /************************************************* * Millisecond sleep function * *************************************************/ /* The basic sleep() function has a granularity of 1 second, which is too rough in some cases - for example, when using an increasing delay to slow down spammers. Argument: number of millseconds Returns: nothing */ void millisleep(int msec) { struct itimerval itval; itval.it_interval.tv_sec = 0; itval.it_interval.tv_usec = 0; itval.it_value.tv_sec = msec/1000; itval.it_value.tv_usec = (msec % 1000) * 1000; milliwait(&itval); } /************************************************* * Compare microsecond times * *************************************************/ /* Arguments: tv1 the first time tv2 the second time Returns: -1, 0, or +1 */ int exim_tvcmp(struct timeval *t1, struct timeval *t2) { if (t1->tv_sec > t2->tv_sec) return +1; if (t1->tv_sec < t2->tv_sec) return -1; if (t1->tv_usec > t2->tv_usec) return +1; if (t1->tv_usec < t2->tv_usec) return -1; return 0; } /************************************************* * Clock tick wait function * *************************************************/ /* Exim uses a time + a pid to generate a unique identifier in two places: its message IDs, and in file names for maildir deliveries. Because some OS now re-use pids within the same second, sub-second times are now being used. However, for absolute certainty, we must ensure the clock has ticked before allowing the relevant process to complete. At the time of implementation of this code (February 2003), the speed of processors is such that the clock will invariably have ticked already by the time a process has done its job. This function prepares for the time when things are faster - and it also copes with clocks that go backwards. Arguments: then_tv A timeval which was used to create uniqueness; its usec field has been rounded down to the value of the resolution. We want to be sure the current time is greater than this. resolution The resolution that was used to divide the microseconds (1 for maildir, larger for message ids) Returns: nothing */ void exim_wait_tick(struct timeval *then_tv, int resolution) { struct timeval now_tv; long int now_true_usec; (void)gettimeofday(&now_tv, NULL); now_true_usec = now_tv.tv_usec; now_tv.tv_usec = (now_true_usec/resolution) * resolution; if (exim_tvcmp(&now_tv, then_tv) <= 0) { struct itimerval itval; itval.it_interval.tv_sec = 0; itval.it_interval.tv_usec = 0; itval.it_value.tv_sec = then_tv->tv_sec - now_tv.tv_sec; itval.it_value.tv_usec = then_tv->tv_usec + resolution - now_true_usec; /* We know that, overall, "now" is less than or equal to "then". Therefore, a negative value for the microseconds is possible only in the case when "now" is more than a second less than "then". That means that itval.it_value.tv_sec is greater than zero. The following correction is therefore safe. */ if (itval.it_value.tv_usec < 0) { itval.it_value.tv_usec += 1000000; itval.it_value.tv_sec -= 1; } DEBUG(D_transport|D_receive) { if (!running_in_test_harness) { debug_printf("tick check: " TIME_T_FMT ".%06lu " TIME_T_FMT ".%06lu\n", then_tv->tv_sec, (long) then_tv->tv_usec, now_tv.tv_sec, (long) now_tv.tv_usec); debug_printf("waiting " TIME_T_FMT ".%06lu\n", itval.it_value.tv_sec, (long) itval.it_value.tv_usec); } } milliwait(&itval); } } /************************************************* * Call fopen() with umask 777 and adjust mode * *************************************************/ /* Exim runs with umask(0) so that files created with open() have the mode that is specified in the open() call. However, there are some files, typically in the spool directory, that are created with fopen(). They end up world-writeable if no precautions are taken. Although the spool directory is not accessible to the world, this is an untidiness. So this is a wrapper function for fopen() that sorts out the mode of the created file. Arguments: filename the file name options the fopen() options mode the required mode Returns: the fopened FILE or NULL */ FILE * modefopen(const uschar *filename, const char *options, mode_t mode) { mode_t saved_umask = umask(0777); FILE *f = Ufopen(filename, options); (void)umask(saved_umask); if (f != NULL) (void)fchmod(fileno(f), mode); return f; } /************************************************* * Ensure stdin, stdout, and stderr exist * *************************************************/ /* Some operating systems grumble if an exec() happens without a standard input, output, and error (fds 0, 1, 2) being defined. The worry is that some file will be opened and will use these fd values, and then some other bit of code will assume, for example, that it can write error messages to stderr. This function ensures that fds 0, 1, and 2 are open if they do not already exist, by connecting them to /dev/null. This function is also used to ensure that std{in,out,err} exist at all times, so that if any library that Exim calls tries to use them, it doesn't crash. Arguments: None Returns: Nothing */ void exim_nullstd(void) { int i; int devnull = -1; struct stat statbuf; for (i = 0; i <= 2; i++) { if (fstat(i, &statbuf) < 0 && errno == EBADF) { if (devnull < 0) devnull = open("/dev/null", O_RDWR); if (devnull < 0) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "%s", string_open_failed(errno, "/dev/null")); if (devnull != i) (void)dup2(devnull, i); } } if (devnull > 2) (void)close(devnull); } /************************************************* * Close unwanted file descriptors for delivery * *************************************************/ /* This function is called from a new process that has been forked to deliver an incoming message, either directly, or using exec. We want any smtp input streams to be closed in this new process. However, it has been observed that using fclose() here causes trouble. When reading in -bS input, duplicate copies of messages have been seen. The files will be sharing a file pointer with the parent process, and it seems that fclose() (at least on some systems - I saw this on Solaris 2.5.1) messes with that file pointer, at least sometimes. Hence we go for closing the underlying file descriptors. If TLS is active, we want to shut down the TLS library, but without molesting the parent's SSL connection. For delivery of a non-SMTP message, we want to close stdin and stdout (and stderr unless debugging) because the calling process might have set them up as pipes and be waiting for them to close before it waits for the submission process to terminate. If they aren't closed, they hold up the calling process until the initial delivery process finishes, which is not what we want. Exception: We do want it for synchronous delivery! And notwithstanding all the above, if D_resolver is set, implying resolver debugging, leave stdout open, because that's where the resolver writes its debugging output. When we close stderr (which implies we've also closed stdout), we also get rid of any controlling terminal. Arguments: None Returns: Nothing */ static void close_unwanted(void) { if (smtp_input) { #ifdef SUPPORT_TLS tls_close(TRUE, FALSE); /* Shut down the TLS library */ #endif (void)close(fileno(smtp_in)); (void)close(fileno(smtp_out)); smtp_in = NULL; } else { (void)close(0); /* stdin */ if ((debug_selector & D_resolver) == 0) (void)close(1); /* stdout */ if (debug_selector == 0) /* stderr */ { if (!synchronous_delivery) { (void)close(2); log_stderr = NULL; } (void)setsid(); } } } /************************************************* * Set uid and gid * *************************************************/ /* This function sets a new uid and gid permanently, optionally calling initgroups() to set auxiliary groups. There are some special cases when running Exim in unprivileged modes. In these situations the effective uid will not be root; if we already have the right effective uid/gid, and don't need to initialize any groups, leave things as they are. Arguments: uid the uid gid the gid igflag TRUE if initgroups() wanted msg text to use in debugging output and failure log Returns: nothing; bombs out on failure */ void exim_setugid(uid_t uid, gid_t gid, BOOL igflag, uschar *msg) { uid_t euid = geteuid(); gid_t egid = getegid(); if (euid == root_uid || euid != uid || egid != gid || igflag) { /* At least one OS returns +1 for initgroups failure, so just check for non-zero. */ if (igflag) { struct passwd *pw = getpwuid(uid); if (pw != NULL) { if (initgroups(pw->pw_name, gid) != 0) log_write(0,LOG_MAIN|LOG_PANIC_DIE,"initgroups failed for uid=%ld: %s", (long int)uid, strerror(errno)); } else log_write(0, LOG_MAIN|LOG_PANIC_DIE, "cannot run initgroups(): " "no passwd entry for uid=%ld", (long int)uid); } if (setgid(gid) < 0 || setuid(uid) < 0) { log_write(0, LOG_MAIN|LOG_PANIC_DIE, "unable to set gid=%ld or uid=%ld " "(euid=%ld): %s", (long int)gid, (long int)uid, (long int)euid, msg); } } /* Debugging output included uid/gid and all groups */ DEBUG(D_uid) { int group_count, save_errno; gid_t group_list[NGROUPS_MAX]; debug_printf("changed uid/gid: %s\n uid=%ld gid=%ld pid=%ld\n", msg, (long int)geteuid(), (long int)getegid(), (long int)getpid()); group_count = getgroups(NGROUPS_MAX, group_list); save_errno = errno; debug_printf(" auxiliary group list:"); if (group_count > 0) { int i; for (i = 0; i < group_count; i++) debug_printf(" %d", (int)group_list[i]); } else if (group_count < 0) debug_printf(" <error: %s>", strerror(save_errno)); else debug_printf(" <none>"); debug_printf("\n"); } } /************************************************* * Exit point * *************************************************/ /* Exim exits via this function so that it always clears up any open databases. Arguments: rc return code Returns: does not return */ void exim_exit(int rc) { search_tidyup(); DEBUG(D_any) debug_printf(">>>>>>>>>>>>>>>> Exim pid=%d terminating with rc=%d " ">>>>>>>>>>>>>>>>\n", (int)getpid(), rc); exit(rc); } /************************************************* * Extract port from host address * *************************************************/ /* Called to extract the port from the values given to -oMa and -oMi. It also checks the syntax of the address, and terminates it before the port data when a port is extracted. Argument: address the address, with possible port on the end Returns: the port, or zero if there isn't one bombs out on a syntax error */ static int check_port(uschar *address) { int port = host_address_extract_port(address); if (string_is_ip_address(address, NULL) == 0) { fprintf(stderr, "exim abandoned: \"%s\" is not an IP address\n", address); exit(EXIT_FAILURE); } return port; } /************************************************* * Test/verify an address * *************************************************/ /* This function is called by the -bv and -bt code. It extracts a working address from a full RFC 822 address. This isn't really necessary per se, but it has the effect of collapsing source routes. Arguments: s the address string flags flag bits for verify_address() exit_value to be set for failures Returns: nothing */ static void test_address(uschar *s, int flags, int *exit_value) { int start, end, domain; uschar *parse_error = NULL; uschar *address = parse_extract_address(s, &parse_error, &start, &end, &domain, FALSE); if (address == NULL) { fprintf(stdout, "syntax error: %s\n", parse_error); *exit_value = 2; } else { int rc = verify_address(deliver_make_addr(address,TRUE), stdout, flags, -1, -1, -1, NULL, NULL, NULL); if (rc == FAIL) *exit_value = 2; else if (rc == DEFER && *exit_value == 0) *exit_value = 1; } } /************************************************* * Show supported features * *************************************************/ /* This function is called for -bV/--version and for -d to output the optional features of the current Exim binary. Arguments: a FILE for printing Returns: nothing */ static void show_whats_supported(FILE *f) { auth_info *authi; #ifdef DB_VERSION_STRING fprintf(f, "Berkeley DB: %s\n", DB_VERSION_STRING); #elif defined(BTREEVERSION) && defined(HASHVERSION) #ifdef USE_DB fprintf(f, "Probably Berkeley DB version 1.8x (native mode)\n"); #else fprintf(f, "Probably Berkeley DB version 1.8x (compatibility mode)\n"); #endif #elif defined(_DBM_RDONLY) || defined(dbm_dirfno) fprintf(f, "Probably ndbm\n"); #elif defined(USE_TDB) fprintf(f, "Using tdb\n"); #else #ifdef USE_GDBM fprintf(f, "Probably GDBM (native mode)\n"); #else fprintf(f, "Probably GDBM (compatibility mode)\n"); #endif #endif fprintf(f, "Support for:"); #ifdef SUPPORT_CRYPTEQ fprintf(f, " crypteq"); #endif #if HAVE_ICONV fprintf(f, " iconv()"); #endif #if HAVE_IPV6 fprintf(f, " IPv6"); #endif #ifdef HAVE_SETCLASSRESOURCES fprintf(f, " use_setclassresources"); #endif #ifdef SUPPORT_PAM fprintf(f, " PAM"); #endif #ifdef EXIM_PERL fprintf(f, " Perl"); #endif #ifdef EXPAND_DLFUNC fprintf(f, " Expand_dlfunc"); #endif #ifdef USE_TCP_WRAPPERS fprintf(f, " TCPwrappers"); #endif #ifdef SUPPORT_TLS #ifdef USE_GNUTLS fprintf(f, " GnuTLS"); #else fprintf(f, " OpenSSL"); #endif #endif #ifdef SUPPORT_TRANSLATE_IP_ADDRESS fprintf(f, " translate_ip_address"); #endif #ifdef SUPPORT_MOVE_FROZEN_MESSAGES fprintf(f, " move_frozen_messages"); #endif #ifdef WITH_CONTENT_SCAN fprintf(f, " Content_Scanning"); #endif #ifndef DISABLE_DKIM fprintf(f, " DKIM"); #endif #ifndef DISABLE_DNSSEC fprintf(f, " DNSSEC"); #endif #ifndef DISABLE_EVENT fprintf(f, " Event"); #endif #ifdef SUPPORT_I18N fprintf(f, " I18N"); #endif #ifndef DISABLE_OCSP fprintf(f, " OCSP"); #endif #ifndef DISABLE_PRDR fprintf(f, " PRDR"); #endif #ifdef SUPPORT_PROXY fprintf(f, " PROXY"); #endif #ifdef SUPPORT_SOCKS fprintf(f, " SOCKS"); #endif #ifdef TCP_FASTOPEN fprintf(f, " TCP_Fast_Open"); #endif #ifdef EXPERIMENTAL_LMDB fprintf(f, " Experimental_LMDB"); #endif #ifdef EXPERIMENTAL_QUEUEFILE fprintf(f, " Experimental_QUEUEFILE"); #endif #ifdef EXPERIMENTAL_SPF fprintf(f, " Experimental_SPF"); #endif #ifdef EXPERIMENTAL_SRS fprintf(f, " Experimental_SRS"); #endif #ifdef EXPERIMENTAL_BRIGHTMAIL fprintf(f, " Experimental_Brightmail"); #endif #ifdef EXPERIMENTAL_DANE fprintf(f, " Experimental_DANE"); #endif #ifdef EXPERIMENTAL_DCC fprintf(f, " Experimental_DCC"); #endif #ifdef EXPERIMENTAL_DMARC fprintf(f, " Experimental_DMARC"); #endif #ifdef EXPERIMENTAL_DSN_INFO fprintf(f, " Experimental_DSN_info"); #endif fprintf(f, "\n"); fprintf(f, "Lookups (built-in):"); #if defined(LOOKUP_LSEARCH) && LOOKUP_LSEARCH!=2 fprintf(f, " lsearch wildlsearch nwildlsearch iplsearch"); #endif #if defined(LOOKUP_CDB) && LOOKUP_CDB!=2 fprintf(f, " cdb"); #endif #if defined(LOOKUP_DBM) && LOOKUP_DBM!=2 fprintf(f, " dbm dbmjz dbmnz"); #endif #if defined(LOOKUP_DNSDB) && LOOKUP_DNSDB!=2 fprintf(f, " dnsdb"); #endif #if defined(LOOKUP_DSEARCH) && LOOKUP_DSEARCH!=2 fprintf(f, " dsearch"); #endif #if defined(LOOKUP_IBASE) && LOOKUP_IBASE!=2 fprintf(f, " ibase"); #endif #if defined(LOOKUP_LDAP) && LOOKUP_LDAP!=2 fprintf(f, " ldap ldapdn ldapm"); #endif #ifdef EXPERIMENTAL_LMDB fprintf(f, " lmdb"); #endif #if defined(LOOKUP_MYSQL) && LOOKUP_MYSQL!=2 fprintf(f, " mysql"); #endif #if defined(LOOKUP_NIS) && LOOKUP_NIS!=2 fprintf(f, " nis nis0"); #endif #if defined(LOOKUP_NISPLUS) && LOOKUP_NISPLUS!=2 fprintf(f, " nisplus"); #endif #if defined(LOOKUP_ORACLE) && LOOKUP_ORACLE!=2 fprintf(f, " oracle"); #endif #if defined(LOOKUP_PASSWD) && LOOKUP_PASSWD!=2 fprintf(f, " passwd"); #endif #if defined(LOOKUP_PGSQL) && LOOKUP_PGSQL!=2 fprintf(f, " pgsql"); #endif #if defined(LOOKUP_REDIS) && LOOKUP_REDIS!=2 fprintf(f, " redis"); #endif #if defined(LOOKUP_SQLITE) && LOOKUP_SQLITE!=2 fprintf(f, " sqlite"); #endif #if defined(LOOKUP_TESTDB) && LOOKUP_TESTDB!=2 fprintf(f, " testdb"); #endif #if defined(LOOKUP_WHOSON) && LOOKUP_WHOSON!=2 fprintf(f, " whoson"); #endif fprintf(f, "\n"); fprintf(f, "Authenticators:"); #ifdef AUTH_CRAM_MD5 fprintf(f, " cram_md5"); #endif #ifdef AUTH_CYRUS_SASL fprintf(f, " cyrus_sasl"); #endif #ifdef AUTH_DOVECOT fprintf(f, " dovecot"); #endif #ifdef AUTH_GSASL fprintf(f, " gsasl"); #endif #ifdef AUTH_HEIMDAL_GSSAPI fprintf(f, " heimdal_gssapi"); #endif #ifdef AUTH_PLAINTEXT fprintf(f, " plaintext"); #endif #ifdef AUTH_SPA fprintf(f, " spa"); #endif #ifdef AUTH_TLS fprintf(f, " tls"); #endif fprintf(f, "\n"); fprintf(f, "Routers:"); #ifdef ROUTER_ACCEPT fprintf(f, " accept"); #endif #ifdef ROUTER_DNSLOOKUP fprintf(f, " dnslookup"); #endif #ifdef ROUTER_IPLITERAL fprintf(f, " ipliteral"); #endif #ifdef ROUTER_IPLOOKUP fprintf(f, " iplookup"); #endif #ifdef ROUTER_MANUALROUTE fprintf(f, " manualroute"); #endif #ifdef ROUTER_QUERYPROGRAM fprintf(f, " queryprogram"); #endif #ifdef ROUTER_REDIRECT fprintf(f, " redirect"); #endif fprintf(f, "\n"); fprintf(f, "Transports:"); #ifdef TRANSPORT_APPENDFILE fprintf(f, " appendfile"); #ifdef SUPPORT_MAILDIR fprintf(f, "/maildir"); #endif #ifdef SUPPORT_MAILSTORE fprintf(f, "/mailstore"); #endif #ifdef SUPPORT_MBX fprintf(f, "/mbx"); #endif #endif #ifdef TRANSPORT_AUTOREPLY fprintf(f, " autoreply"); #endif #ifdef TRANSPORT_LMTP fprintf(f, " lmtp"); #endif #ifdef TRANSPORT_PIPE fprintf(f, " pipe"); #endif #ifdef EXPERIMENTAL_QUEUEFILE fprintf(f, " queuefile"); #endif #ifdef TRANSPORT_SMTP fprintf(f, " smtp"); #endif fprintf(f, "\n"); if (fixed_never_users[0] > 0) { int i; fprintf(f, "Fixed never_users: "); for (i = 1; i <= (int)fixed_never_users[0] - 1; i++) fprintf(f, "%d:", (unsigned int)fixed_never_users[i]); fprintf(f, "%d\n", (unsigned int)fixed_never_users[i]); } fprintf(f, "Configure owner: %d:%d\n", config_uid, config_gid); fprintf(f, "Size of off_t: " SIZE_T_FMT "\n", sizeof(off_t)); /* Everything else is details which are only worth reporting when debugging. Perhaps the tls_version_report should move into this too. */ DEBUG(D_any) do { int i; /* clang defines __GNUC__ (at least, for me) so test for it first */ #if defined(__clang__) fprintf(f, "Compiler: CLang [%s]\n", __clang_version__); #elif defined(__GNUC__) fprintf(f, "Compiler: GCC [%s]\n", # ifdef __VERSION__ __VERSION__ # else "? unknown version ?" # endif ); #else fprintf(f, "Compiler: <unknown>\n"); #endif #if defined(__GLIBC__) && !defined(__UCLIBC__) fprintf(f, "Library version: Glibc: Compile: %d.%d\n", __GLIBC__, __GLIBC_MINOR__); if (__GLIBC_PREREQ(2, 1)) fprintf(f, " Runtime: %s\n", gnu_get_libc_version()); #endif #ifdef SUPPORT_TLS tls_version_report(f); #endif #ifdef SUPPORT_I18N utf8_version_report(f); #endif for (authi = auths_available; *authi->driver_name != '\0'; ++authi) if (authi->version_report) (*authi->version_report)(f); /* PCRE_PRERELEASE is either defined and empty or a bare sequence of characters; unless it's an ancient version of PCRE in which case it is not defined. */ #ifndef PCRE_PRERELEASE # define PCRE_PRERELEASE #endif #define QUOTE(X) #X #define EXPAND_AND_QUOTE(X) QUOTE(X) fprintf(f, "Library version: PCRE: Compile: %d.%d%s\n" " Runtime: %s\n", PCRE_MAJOR, PCRE_MINOR, EXPAND_AND_QUOTE(PCRE_PRERELEASE) "", pcre_version()); #undef QUOTE #undef EXPAND_AND_QUOTE init_lookup_list(); for (i = 0; i < lookup_list_count; i++) if (lookup_list[i]->version_report) lookup_list[i]->version_report(f); #ifdef WHITELIST_D_MACROS fprintf(f, "WHITELIST_D_MACROS: \"%s\"\n", WHITELIST_D_MACROS); #else fprintf(f, "WHITELIST_D_MACROS unset\n"); #endif #ifdef TRUSTED_CONFIG_LIST fprintf(f, "TRUSTED_CONFIG_LIST: \"%s\"\n", TRUSTED_CONFIG_LIST); #else fprintf(f, "TRUSTED_CONFIG_LIST unset\n"); #endif } while (0); } /************************************************* * Show auxiliary information about Exim * *************************************************/ static void show_exim_information(enum commandline_info request, FILE *stream) { const uschar **pp; switch(request) { case CMDINFO_NONE: fprintf(stream, "Oops, something went wrong.\n"); return; case CMDINFO_HELP: fprintf(stream, "The -bI: flag takes a string indicating which information to provide.\n" "If the string is not recognised, you'll get this help (on stderr).\n" "\n" " exim -bI:help this information\n" " exim -bI:dscp dscp value keywords known\n" " exim -bI:sieve list of supported sieve extensions, one per line.\n" ); return; case CMDINFO_SIEVE: for (pp = exim_sieve_extension_list; *pp; ++pp) fprintf(stream, "%s\n", *pp); return; case CMDINFO_DSCP: dscp_list_to_stream(stream); return; } } /************************************************* * Quote a local part * *************************************************/ /* This function is used when a sender address or a From: or Sender: header line is being created from the caller's login, or from an authenticated_id. It applies appropriate quoting rules for a local part. Argument: the local part Returns: the local part, quoted if necessary */ uschar * local_part_quote(uschar *lpart) { BOOL needs_quote = FALSE; int size, ptr; uschar *yield; uschar *t; for (t = lpart; !needs_quote && *t != 0; t++) { needs_quote = !isalnum(*t) && strchr("!#$%&'*+-/=?^_`{|}~", *t) == NULL && (*t != '.' || t == lpart || t[1] == 0); } if (!needs_quote) return lpart; size = ptr = 0; yield = string_catn(NULL, &size, &ptr, US"\"", 1); for (;;) { uschar *nq = US Ustrpbrk(lpart, "\\\""); if (nq == NULL) { yield = string_cat(yield, &size, &ptr, lpart); break; } yield = string_catn(yield, &size, &ptr, lpart, nq - lpart); yield = string_catn(yield, &size, &ptr, US"\\", 1); yield = string_catn(yield, &size, &ptr, nq, 1); lpart = nq + 1; } yield = string_catn(yield, &size, &ptr, US"\"", 1); yield[ptr] = 0; return yield; } #ifdef USE_READLINE /************************************************* * Load readline() functions * *************************************************/ /* This function is called from testing executions that read data from stdin, but only when running as the calling user. Currently, only -be does this. The function loads the readline() function library and passes back the functions. On some systems, it needs the curses library, so load that too, but try without it if loading fails. All this functionality has to be requested at build time. Arguments: fn_readline_ptr pointer to where to put the readline pointer fn_addhist_ptr pointer to where to put the addhistory function Returns: the dlopen handle or NULL on failure */ static void * set_readline(char * (**fn_readline_ptr)(const char *), void (**fn_addhist_ptr)(const char *)) { void *dlhandle; void *dlhandle_curses = dlopen("libcurses." DYNLIB_FN_EXT, RTLD_GLOBAL|RTLD_LAZY); dlhandle = dlopen("libreadline." DYNLIB_FN_EXT, RTLD_GLOBAL|RTLD_NOW); if (dlhandle_curses != NULL) dlclose(dlhandle_curses); if (dlhandle != NULL) { /* Checked manual pages; at least in GNU Readline 6.1, the prototypes are: * char * readline (const char *prompt); * void add_history (const char *string); */ *fn_readline_ptr = (char *(*)(const char*))dlsym(dlhandle, "readline"); *fn_addhist_ptr = (void(*)(const char*))dlsym(dlhandle, "add_history"); } else { DEBUG(D_any) debug_printf("failed to load readline: %s\n", dlerror()); } return dlhandle; } #endif /************************************************* * Get a line from stdin for testing things * *************************************************/ /* This function is called when running tests that can take a number of lines of input (for example, -be and -bt). It handles continuations and trailing spaces. And prompting and a blank line output on eof. If readline() is in use, the arguments are non-NULL and provide the relevant functions. Arguments: fn_readline readline function or NULL fn_addhist addhist function or NULL Returns: pointer to dynamic memory, or NULL at end of file */ static uschar * get_stdinput(char *(*fn_readline)(const char *), void(*fn_addhist)(const char *)) { int i; int size = 0; int ptr = 0; uschar *yield = NULL; if (fn_readline == NULL) { printf("> "); fflush(stdout); } for (i = 0;; i++) { uschar buffer[1024]; uschar *p, *ss; #ifdef USE_READLINE char *readline_line = NULL; if (fn_readline != NULL) { if ((readline_line = fn_readline((i > 0)? "":"> ")) == NULL) break; if (*readline_line != 0 && fn_addhist != NULL) fn_addhist(readline_line); p = US readline_line; } else #endif /* readline() not in use */ { if (Ufgets(buffer, sizeof(buffer), stdin) == NULL) break; p = buffer; } /* Handle the line */ ss = p + (int)Ustrlen(p); while (ss > p && isspace(ss[-1])) ss--; if (i > 0) { while (p < ss && isspace(*p)) p++; /* leading space after cont */ } yield = string_catn(yield, &size, &ptr, p, ss - p); #ifdef USE_READLINE if (fn_readline != NULL) free(readline_line); #endif /* yield can only be NULL if ss==p */ if (ss == p || yield[ptr-1] != '\\') { if (yield) yield[ptr] = 0; break; } yield[--ptr] = 0; } if (yield == NULL) printf("\n"); return yield; } /************************************************* * Output usage information for the program * *************************************************/ /* This function is called when there are no recipients or a specific --help argument was added. Arguments: progname information on what name we were called by Returns: DOES NOT RETURN */ static void exim_usage(uschar *progname) { /* Handle specific program invocation variants */ if (Ustrcmp(progname, US"-mailq") == 0) { fprintf(stderr, "mailq - list the contents of the mail queue\n\n" "For a list of options, see the Exim documentation.\n"); exit(EXIT_FAILURE); } /* Generic usage - we output this whatever happens */ fprintf(stderr, "Exim is a Mail Transfer Agent. It is normally called by Mail User Agents,\n" "not directly from a shell command line. Options and/or arguments control\n" "what it does when called. For a list of options, see the Exim documentation.\n"); exit(EXIT_FAILURE); } /************************************************* * Validate that the macros given are okay * *************************************************/ /* Typically, Exim will drop privileges if macros are supplied. In some cases, we want to not do so. Arguments: opt_D_used - true if the commandline had a "-D" option Returns: true if trusted, false otherwise */ static BOOL macros_trusted(BOOL opt_D_used) { #ifdef WHITELIST_D_MACROS macro_item *m; uschar *whitelisted, *end, *p, **whites, **w; int white_count, i, n; size_t len; BOOL prev_char_item, found; #endif if (!opt_D_used) return TRUE; #ifndef WHITELIST_D_MACROS return FALSE; #else /* We only trust -D overrides for some invoking users: root, the exim run-time user, the optional config owner user. I don't know why config-owner would be needed, but since they can own the config files anyway, there's no security risk to letting them override -D. */ if ( ! ((real_uid == root_uid) || (real_uid == exim_uid) #ifdef CONFIGURE_OWNER || (real_uid == config_uid) #endif )) { debug_printf("macros_trusted rejecting macros for uid %d\n", (int) real_uid); return FALSE; } /* Get a list of macros which are whitelisted */ whitelisted = string_copy_malloc(US WHITELIST_D_MACROS); prev_char_item = FALSE; white_count = 0; for (p = whitelisted; *p != '\0'; ++p) { if (*p == ':' || isspace(*p)) { *p = '\0'; if (prev_char_item) ++white_count; prev_char_item = FALSE; continue; } if (!prev_char_item) prev_char_item = TRUE; } end = p; if (prev_char_item) ++white_count; if (!white_count) return FALSE; whites = store_malloc(sizeof(uschar *) * (white_count+1)); for (p = whitelisted, i = 0; (p != end) && (i < white_count); ++p) { if (*p != '\0') { whites[i++] = p; if (i == white_count) break; while (*p != '\0' && p < end) ++p; } } whites[i] = NULL; /* The list of commandline macros should be very short. Accept the N*M complexity. */ for (m = macros; m; m = m->next) if (m->command_line) { found = FALSE; for (w = whites; *w; ++w) if (Ustrcmp(*w, m->name) == 0) { found = TRUE; break; } if (!found) return FALSE; if (m->replacement == NULL) continue; len = Ustrlen(m->replacement); if (len == 0) continue; n = pcre_exec(regex_whitelisted_macro, NULL, CS m->replacement, len, 0, PCRE_EOPT, NULL, 0); if (n < 0) { if (n != PCRE_ERROR_NOMATCH) debug_printf("macros_trusted checking %s returned %d\n", m->name, n); return FALSE; } } DEBUG(D_any) debug_printf("macros_trusted overridden to true by whitelisting\n"); return TRUE; #endif } /************************************************* * Entry point and high-level code * *************************************************/ /* Entry point for the Exim mailer. Analyse the arguments and arrange to take the appropriate action. All the necessary functions are present in the one binary. I originally thought one should split it up, but it turns out that so much of the apparatus is needed in each chunk that one might as well just have it all available all the time, which then makes the coding easier as well. Arguments: argc count of entries in argv argv argument strings, with argv[0] being the program name Returns: EXIT_SUCCESS if terminated successfully EXIT_FAILURE otherwise, except when a message has been sent to the sender, and -oee was given */ int main(int argc, char **cargv) { uschar **argv = USS cargv; int arg_receive_timeout = -1; int arg_smtp_receive_timeout = -1; int arg_error_handling = error_handling; int filter_sfd = -1; int filter_ufd = -1; int group_count; int i, rv; int list_queue_option = 0; int msg_action = 0; int msg_action_arg = -1; int namelen = (argv[0] == NULL)? 0 : Ustrlen(argv[0]); int queue_only_reason = 0; #ifdef EXIM_PERL int perl_start_option = 0; #endif int recipients_arg = argc; int sender_address_domain = 0; int test_retry_arg = -1; int test_rewrite_arg = -1; BOOL arg_queue_only = FALSE; BOOL bi_option = FALSE; BOOL checking = FALSE; BOOL count_queue = FALSE; BOOL expansion_test = FALSE; BOOL extract_recipients = FALSE; BOOL flag_G = FALSE; BOOL flag_n = FALSE; BOOL forced_delivery = FALSE; BOOL f_end_dot = FALSE; BOOL deliver_give_up = FALSE; BOOL list_queue = FALSE; BOOL list_options = FALSE; BOOL list_config = FALSE; BOOL local_queue_only; BOOL more = TRUE; BOOL one_msg_action = FALSE; BOOL opt_D_used = FALSE; BOOL queue_only_set = FALSE; BOOL receiving_message = TRUE; BOOL sender_ident_set = FALSE; BOOL session_local_queue_only; BOOL unprivileged; BOOL removed_privilege = FALSE; BOOL usage_wanted = FALSE; BOOL verify_address_mode = FALSE; BOOL verify_as_sender = FALSE; BOOL version_printed = FALSE; uschar *alias_arg = NULL; uschar *called_as = US""; uschar *cmdline_syslog_name = NULL; uschar *start_queue_run_id = NULL; uschar *stop_queue_run_id = NULL; uschar *expansion_test_message = NULL; uschar *ftest_domain = NULL; uschar *ftest_localpart = NULL; uschar *ftest_prefix = NULL; uschar *ftest_suffix = NULL; uschar *log_oneline = NULL; uschar *malware_test_file = NULL; uschar *real_sender_address; uschar *originator_home = US"/"; size_t sz; void *reset_point; struct passwd *pw; struct stat statbuf; pid_t passed_qr_pid = (pid_t)0; int passed_qr_pipe = -1; gid_t group_list[NGROUPS_MAX]; /* For the -bI: flag */ enum commandline_info info_flag = CMDINFO_NONE; BOOL info_stdout = FALSE; /* Possible options for -R and -S */ static uschar *rsopts[] = { US"f", US"ff", US"r", US"rf", US"rff" }; /* Need to define this in case we need to change the environment in order to get rid of a bogus time zone. We have to make it char rather than uschar because some OS define it in /usr/include/unistd.h. */ extern char **environ; /* If the Exim user and/or group and/or the configuration file owner/group were defined by ref:name at build time, we must now find the actual uid/gid values. This is a feature to make the lives of binary distributors easier. */ #ifdef EXIM_USERNAME if (route_finduser(US EXIM_USERNAME, &pw, &exim_uid)) { if (exim_uid == 0) { fprintf(stderr, "exim: refusing to run with uid 0 for \"%s\"\n", EXIM_USERNAME); exit(EXIT_FAILURE); } /* If ref:name uses a number as the name, route_finduser() returns TRUE with exim_uid set and pw coerced to NULL. */ if (pw) exim_gid = pw->pw_gid; #ifndef EXIM_GROUPNAME else { fprintf(stderr, "exim: ref:name should specify a usercode, not a group.\n" "exim: can't let you get away with it unless you also specify a group.\n"); exit(EXIT_FAILURE); } #endif } else { fprintf(stderr, "exim: failed to find uid for user name \"%s\"\n", EXIM_USERNAME); exit(EXIT_FAILURE); } #endif #ifdef EXIM_GROUPNAME if (!route_findgroup(US EXIM_GROUPNAME, &exim_gid)) { fprintf(stderr, "exim: failed to find gid for group name \"%s\"\n", EXIM_GROUPNAME); exit(EXIT_FAILURE); } #endif #ifdef CONFIGURE_OWNERNAME if (!route_finduser(US CONFIGURE_OWNERNAME, NULL, &config_uid)) { fprintf(stderr, "exim: failed to find uid for user name \"%s\"\n", CONFIGURE_OWNERNAME); exit(EXIT_FAILURE); } #endif /* We default the system_filter_user to be the Exim run-time user, as a sane non-root value. */ system_filter_uid = exim_uid; #ifdef CONFIGURE_GROUPNAME if (!route_findgroup(US CONFIGURE_GROUPNAME, &config_gid)) { fprintf(stderr, "exim: failed to find gid for group name \"%s\"\n", CONFIGURE_GROUPNAME); exit(EXIT_FAILURE); } #endif /* In the Cygwin environment, some initialization used to need doing. It was fudged in by means of this macro; now no longer but we'll leave it in case of others. */ #ifdef OS_INIT OS_INIT #endif /* Check a field which is patched when we are running Exim within its testing harness; do a fast initial check, and then the whole thing. */ running_in_test_harness = *running_status == '<' && Ustrcmp(running_status, "<<<testing>>>") == 0; /* The C standard says that the equivalent of setlocale(LC_ALL, "C") is obeyed at the start of a program; however, it seems that some environments do not follow this. A "strange" locale can affect the formatting of timestamps, so we make quite sure. */ setlocale(LC_ALL, "C"); /* Set up the default handler for timing using alarm(). */ os_non_restarting_signal(SIGALRM, sigalrm_handler); /* Ensure we have a buffer for constructing log entries. Use malloc directly, because store_malloc writes a log entry on failure. */ if (!(log_buffer = US malloc(LOG_BUFFER_SIZE))) { fprintf(stderr, "exim: failed to get store for log buffer\n"); exit(EXIT_FAILURE); } /* Initialize the default log options. */ bits_set(log_selector, log_selector_size, log_default); /* Set log_stderr to stderr, provided that stderr exists. This gets reset to NULL when the daemon is run and the file is closed. We have to use this indirection, because some systems don't allow writing to the variable "stderr". */ if (fstat(fileno(stderr), &statbuf) >= 0) log_stderr = stderr; /* Arrange for the PCRE regex library to use our store functions. Note that the normal calls are actually macros that add additional arguments for debugging purposes so we have to assign specially constructed functions here. The default is to use store in the stacking pool, but this is overridden in the regex_must_compile() function. */ pcre_malloc = function_store_get; pcre_free = function_dummy_free; /* Ensure there is a big buffer for temporary use in several places. It is put in malloc store so that it can be freed for enlargement if necessary. */ big_buffer = store_malloc(big_buffer_size); /* Set up the handler for the data request signal, and set the initial descriptive text. */ set_process_info("initializing"); os_restarting_signal(SIGUSR1, usr1_handler); /* SIGHUP is used to get the daemon to reconfigure. It gets set as appropriate in the daemon code. For the rest of Exim's uses, we ignore it. */ signal(SIGHUP, SIG_IGN); /* We don't want to die on pipe errors as the code is written to handle the write error instead. */ signal(SIGPIPE, SIG_IGN); /* Under some circumstance on some OS, Exim can get called with SIGCHLD set to SIG_IGN. This causes subprocesses that complete before the parent process waits for them not to hang around, so when Exim calls wait(), nothing is there. The wait() code has been made robust against this, but let's ensure that SIGCHLD is set to SIG_DFL, because it's tidier to wait and get a process ending status. We use sigaction rather than plain signal() on those OS where SA_NOCLDWAIT exists, because we want to be sure it is turned off. (There was a problem on AIX with this.) */ #ifdef SA_NOCLDWAIT { struct sigaction act; act.sa_handler = SIG_DFL; sigemptyset(&(act.sa_mask)); act.sa_flags = 0; sigaction(SIGCHLD, &act, NULL); } #else signal(SIGCHLD, SIG_DFL); #endif /* Save the arguments for use if we re-exec exim as a daemon after receiving SIGHUP. */ sighup_argv = argv; /* Set up the version number. Set up the leading 'E' for the external form of message ids, set the pointer to the internal form, and initialize it to indicate no message being processed. */ version_init(); message_id_option[0] = '-'; message_id_external = message_id_option + 1; message_id_external[0] = 'E'; message_id = message_id_external + 1; message_id[0] = 0; /* Set the umask to zero so that any files Exim creates using open() are created with the modes that it specifies. NOTE: Files created with fopen() have a problem, which was not recognized till rather late (February 2006). With this umask, such files will be world writeable. (They are all content scanning files in the spool directory, which isn't world-accessible, so this is not a disaster, but it's untidy.) I don't want to change this overall setting, however, because it will interact badly with the open() calls. Instead, there's now a function called modefopen() that fiddles with the umask while calling fopen(). */ (void)umask(0); /* Precompile the regular expression for matching a message id. Keep this in step with the code that generates ids in the accept.c module. We need to do this here, because the -M options check their arguments for syntactic validity using mac_ismsgid, which uses this. */ regex_ismsgid = regex_must_compile(US"^(?:[^\\W_]{6}-){2}[^\\W_]{2}$", FALSE, TRUE); /* Precompile the regular expression that is used for matching an SMTP error code, possibly extended, at the start of an error message. Note that the terminating whitespace character is included. */ regex_smtp_code = regex_must_compile(US"^\\d\\d\\d\\s(?:\\d\\.\\d\\d?\\d?\\.\\d\\d?\\d?\\s)?", FALSE, TRUE); #ifdef WHITELIST_D_MACROS /* Precompile the regular expression used to filter the content of macros given to -D for permissibility. */ regex_whitelisted_macro = regex_must_compile(US"^[A-Za-z0-9_/.-]*$", FALSE, TRUE); #endif for (i = 0; i < REGEX_VARS; i++) regex_vars[i] = NULL; /* If the program is called as "mailq" treat it as equivalent to "exim -bp"; this seems to be a generally accepted convention, since one finds symbolic links called "mailq" in standard OS configurations. */ if ((namelen == 5 && Ustrcmp(argv[0], "mailq") == 0) || (namelen > 5 && Ustrncmp(argv[0] + namelen - 6, "/mailq", 6) == 0)) { list_queue = TRUE; receiving_message = FALSE; called_as = US"-mailq"; } /* If the program is called as "rmail" treat it as equivalent to "exim -i -oee", thus allowing UUCP messages to be input using non-SMTP mode, i.e. preventing a single dot on a line from terminating the message, and returning with zero return code, even in cases of error (provided an error message has been sent). */ if ((namelen == 5 && Ustrcmp(argv[0], "rmail") == 0) || (namelen > 5 && Ustrncmp(argv[0] + namelen - 6, "/rmail", 6) == 0)) { dot_ends = FALSE; called_as = US"-rmail"; errors_sender_rc = EXIT_SUCCESS; } /* If the program is called as "rsmtp" treat it as equivalent to "exim -bS"; this is a smail convention. */ if ((namelen == 5 && Ustrcmp(argv[0], "rsmtp") == 0) || (namelen > 5 && Ustrncmp(argv[0] + namelen - 6, "/rsmtp", 6) == 0)) { smtp_input = smtp_batched_input = TRUE; called_as = US"-rsmtp"; } /* If the program is called as "runq" treat it as equivalent to "exim -q"; this is a smail convention. */ if ((namelen == 4 && Ustrcmp(argv[0], "runq") == 0) || (namelen > 4 && Ustrncmp(argv[0] + namelen - 5, "/runq", 5) == 0)) { queue_interval = 0; receiving_message = FALSE; called_as = US"-runq"; } /* If the program is called as "newaliases" treat it as equivalent to "exim -bi"; this is a sendmail convention. */ if ((namelen == 10 && Ustrcmp(argv[0], "newaliases") == 0) || (namelen > 10 && Ustrncmp(argv[0] + namelen - 11, "/newaliases", 11) == 0)) { bi_option = TRUE; receiving_message = FALSE; called_as = US"-newaliases"; } /* Save the original effective uid for a couple of uses later. It should normally be root, but in some esoteric environments it may not be. */ original_euid = geteuid(); /* Get the real uid and gid. If the caller is root, force the effective uid/gid to be the same as the real ones. This makes a difference only if Exim is setuid (or setgid) to something other than root, which could be the case in some special configurations. */ real_uid = getuid(); real_gid = getgid(); if (real_uid == root_uid) { rv = setgid(real_gid); if (rv) { fprintf(stderr, "exim: setgid(%ld) failed: %s\n", (long int)real_gid, strerror(errno)); exit(EXIT_FAILURE); } rv = setuid(real_uid); if (rv) { fprintf(stderr, "exim: setuid(%ld) failed: %s\n", (long int)real_uid, strerror(errno)); exit(EXIT_FAILURE); } } /* If neither the original real uid nor the original euid was root, Exim is running in an unprivileged state. */ unprivileged = (real_uid != root_uid && original_euid != root_uid); /* Scan the program's arguments. Some can be dealt with right away; others are simply recorded for checking and handling afterwards. Do a high-level switch on the second character (the one after '-'), to save some effort. */ for (i = 1; i < argc; i++) { BOOL badarg = FALSE; uschar *arg = argv[i]; uschar *argrest; int switchchar; /* An argument not starting with '-' is the start of a recipients list; break out of the options-scanning loop. */ if (arg[0] != '-') { recipients_arg = i; break; } /* An option consisting of -- terminates the options */ if (Ustrcmp(arg, "--") == 0) { recipients_arg = i + 1; break; } /* Handle flagged options */ switchchar = arg[1]; argrest = arg+2; /* Make all -ex options synonymous with -oex arguments, since that is assumed by various callers. Also make -qR options synonymous with -R options, as that seems to be required as well. Allow for -qqR too, and the same for -S options. */ if (Ustrncmp(arg+1, "oe", 2) == 0 || Ustrncmp(arg+1, "qR", 2) == 0 || Ustrncmp(arg+1, "qS", 2) == 0) { switchchar = arg[2]; argrest++; } else if (Ustrncmp(arg+1, "qqR", 3) == 0 || Ustrncmp(arg+1, "qqS", 3) == 0) { switchchar = arg[3]; argrest += 2; queue_2stage = TRUE; } /* Make -r synonymous with -f, since it is a documented alias */ else if (arg[1] == 'r') switchchar = 'f'; /* Make -ov synonymous with -v */ else if (Ustrcmp(arg, "-ov") == 0) { switchchar = 'v'; argrest++; } /* deal with --option_aliases */ else if (switchchar == '-') { if (Ustrcmp(argrest, "help") == 0) { usage_wanted = TRUE; break; } else if (Ustrcmp(argrest, "version") == 0) { switchchar = 'b'; argrest = US"V"; } } /* High-level switch on active initial letter */ switch(switchchar) { /* sendmail uses -Ac and -Am to control which .cf file is used; we ignore them. */ case 'A': if (*argrest == '\0') { badarg = TRUE; break; } else { BOOL ignore = FALSE; switch (*argrest) { case 'c': case 'm': if (*(argrest + 1) == '\0') ignore = TRUE; break; } if (!ignore) { badarg = TRUE; break; } } break; /* -Btype is a sendmail option for 7bit/8bit setting. Exim is 8-bit clean so has no need of it. */ case 'B': if (*argrest == 0) i++; /* Skip over the type */ break; case 'b': receiving_message = FALSE; /* Reset TRUE for -bm, -bS, -bs below */ /* -bd: Run in daemon mode, awaiting SMTP connections. -bdf: Ditto, but in the foreground. */ if (*argrest == 'd') { daemon_listen = TRUE; if (*(++argrest) == 'f') background_daemon = FALSE; else if (*argrest != 0) { badarg = TRUE; break; } } /* -be: Run in expansion test mode -bem: Ditto, but read a message from a file first */ else if (*argrest == 'e') { expansion_test = checking = TRUE; if (argrest[1] == 'm') { if (++i >= argc) { badarg = TRUE; break; } expansion_test_message = argv[i]; argrest++; } if (argrest[1] != 0) { badarg = TRUE; break; } } /* -bF: Run system filter test */ else if (*argrest == 'F') { filter_test |= checking = FTEST_SYSTEM; if (*(++argrest) != 0) { badarg = TRUE; break; } if (++i < argc) filter_test_sfile = argv[i]; else { fprintf(stderr, "exim: file name expected after %s\n", argv[i-1]); exit(EXIT_FAILURE); } } /* -bf: Run user filter test -bfd: Set domain for filter testing -bfl: Set local part for filter testing -bfp: Set prefix for filter testing -bfs: Set suffix for filter testing */ else if (*argrest == 'f') { if (*(++argrest) == 0) { filter_test |= checking = FTEST_USER; if (++i < argc) filter_test_ufile = argv[i]; else { fprintf(stderr, "exim: file name expected after %s\n", argv[i-1]); exit(EXIT_FAILURE); } } else { if (++i >= argc) { fprintf(stderr, "exim: string expected after %s\n", arg); exit(EXIT_FAILURE); } if (Ustrcmp(argrest, "d") == 0) ftest_domain = argv[i]; else if (Ustrcmp(argrest, "l") == 0) ftest_localpart = argv[i]; else if (Ustrcmp(argrest, "p") == 0) ftest_prefix = argv[i]; else if (Ustrcmp(argrest, "s") == 0) ftest_suffix = argv[i]; else { badarg = TRUE; break; } } } /* -bh: Host checking - an IP address must follow. */ else if (Ustrcmp(argrest, "h") == 0 || Ustrcmp(argrest, "hc") == 0) { if (++i >= argc) { badarg = TRUE; break; } sender_host_address = argv[i]; host_checking = checking = log_testing_mode = TRUE; host_checking_callout = argrest[1] == 'c'; message_logs = FALSE; } /* -bi: This option is used by sendmail to initialize *the* alias file, though it has the -oA option to specify a different file. Exim has no concept of *the* alias file, but since Sun's YP make script calls sendmail this way, some support must be provided. */ else if (Ustrcmp(argrest, "i") == 0) bi_option = TRUE; /* -bI: provide information, of the type to follow after a colon. This is an Exim flag. */ else if (argrest[0] == 'I' && Ustrlen(argrest) >= 2 && argrest[1] == ':') { uschar *p = &argrest[2]; info_flag = CMDINFO_HELP; if (Ustrlen(p)) { if (strcmpic(p, CUS"sieve") == 0) { info_flag = CMDINFO_SIEVE; info_stdout = TRUE; } else if (strcmpic(p, CUS"dscp") == 0) { info_flag = CMDINFO_DSCP; info_stdout = TRUE; } else if (strcmpic(p, CUS"help") == 0) { info_stdout = TRUE; } } } /* -bm: Accept and deliver message - the default option. Reinstate receiving_message, which got turned off for all -b options. */ else if (Ustrcmp(argrest, "m") == 0) receiving_message = TRUE; /* -bmalware: test the filename given for malware */ else if (Ustrcmp(argrest, "malware") == 0) { if (++i >= argc) { badarg = TRUE; break; } checking = TRUE; malware_test_file = argv[i]; } /* -bnq: For locally originating messages, do not qualify unqualified addresses. In the envelope, this causes errors; in header lines they just get left. */ else if (Ustrcmp(argrest, "nq") == 0) { allow_unqualified_sender = FALSE; allow_unqualified_recipient = FALSE; } /* -bpxx: List the contents of the mail queue, in various forms. If the option is -bpc, just a queue count is needed. Otherwise, if the first letter after p is r, then order is random. */ else if (*argrest == 'p') { if (*(++argrest) == 'c') { count_queue = TRUE; if (*(++argrest) != 0) badarg = TRUE; break; } if (*argrest == 'r') { list_queue_option = 8; argrest++; } else list_queue_option = 0; list_queue = TRUE; /* -bp: List the contents of the mail queue, top-level only */ if (*argrest == 0) {} /* -bpu: List the contents of the mail queue, top-level undelivered */ else if (Ustrcmp(argrest, "u") == 0) list_queue_option += 1; /* -bpa: List the contents of the mail queue, including all delivered */ else if (Ustrcmp(argrest, "a") == 0) list_queue_option += 2; /* Unknown after -bp[r] */ else { badarg = TRUE; break; } } /* -bP: List the configuration variables given as the address list. Force -v, so configuration errors get displayed. */ else if (Ustrcmp(argrest, "P") == 0) { /* -bP config: we need to setup here, because later, * when list_options is checked, the config is read already */ if (argv[i+1] && Ustrcmp(argv[i+1], "config") == 0) { list_config = TRUE; readconf_save_config(version_string); } else { list_options = TRUE; debug_selector |= D_v; debug_file = stderr; } } /* -brt: Test retry configuration lookup */ else if (Ustrcmp(argrest, "rt") == 0) { checking = TRUE; test_retry_arg = i + 1; goto END_ARG; } /* -brw: Test rewrite configuration */ else if (Ustrcmp(argrest, "rw") == 0) { checking = TRUE; test_rewrite_arg = i + 1; goto END_ARG; } /* -bS: Read SMTP commands on standard input, but produce no replies - all errors are reported by sending messages. */ else if (Ustrcmp(argrest, "S") == 0) smtp_input = smtp_batched_input = receiving_message = TRUE; /* -bs: Read SMTP commands on standard input and produce SMTP replies on standard output. */ else if (Ustrcmp(argrest, "s") == 0) smtp_input = receiving_message = TRUE; /* -bt: address testing mode */ else if (Ustrcmp(argrest, "t") == 0) address_test_mode = checking = log_testing_mode = TRUE; /* -bv: verify addresses */ else if (Ustrcmp(argrest, "v") == 0) verify_address_mode = checking = log_testing_mode = TRUE; /* -bvs: verify sender addresses */ else if (Ustrcmp(argrest, "vs") == 0) { verify_address_mode = checking = log_testing_mode = TRUE; verify_as_sender = TRUE; } /* -bV: Print version string and support details */ else if (Ustrcmp(argrest, "V") == 0) { printf("Exim version %s #%s built %s\n", version_string, version_cnumber, version_date); printf("%s\n", CS version_copyright); version_printed = TRUE; show_whats_supported(stdout); log_testing_mode = TRUE; } /* -bw: inetd wait mode, accept a listening socket as stdin */ else if (*argrest == 'w') { inetd_wait_mode = TRUE; background_daemon = FALSE; daemon_listen = TRUE; if (*(++argrest) != '\0') { inetd_wait_timeout = readconf_readtime(argrest, 0, FALSE); if (inetd_wait_timeout <= 0) { fprintf(stderr, "exim: bad time value %s: abandoned\n", argv[i]); exit(EXIT_FAILURE); } } } else badarg = TRUE; break; /* -C: change configuration file list; ignore if it isn't really a change! Enforce a prefix check if required. */ case 'C': if (*argrest == 0) { if(++i < argc) argrest = argv[i]; else { badarg = TRUE; break; } } if (Ustrcmp(config_main_filelist, argrest) != 0) { #ifdef ALT_CONFIG_PREFIX int sep = 0; int len = Ustrlen(ALT_CONFIG_PREFIX); const uschar *list = argrest; uschar *filename; while((filename = string_nextinlist(&list, &sep, big_buffer, big_buffer_size)) != NULL) { if ((Ustrlen(filename) < len || Ustrncmp(filename, ALT_CONFIG_PREFIX, len) != 0 || Ustrstr(filename, "/../") != NULL) && (Ustrcmp(filename, "/dev/null") != 0 || real_uid != root_uid)) { fprintf(stderr, "-C Permission denied\n"); exit(EXIT_FAILURE); } } #endif if (real_uid != root_uid) { #ifdef TRUSTED_CONFIG_LIST if (real_uid != exim_uid #ifdef CONFIGURE_OWNER && real_uid != config_uid #endif ) trusted_config = FALSE; else { FILE *trust_list = Ufopen(TRUSTED_CONFIG_LIST, "rb"); if (trust_list) { struct stat statbuf; if (fstat(fileno(trust_list), &statbuf) != 0 || (statbuf.st_uid != root_uid /* owner not root */ #ifdef CONFIGURE_OWNER && statbuf.st_uid != config_uid /* owner not the special one */ #endif ) || /* or */ (statbuf.st_gid != root_gid /* group not root */ #ifdef CONFIGURE_GROUP && statbuf.st_gid != config_gid /* group not the special one */ #endif && (statbuf.st_mode & 020) != 0 /* group writeable */ ) || /* or */ (statbuf.st_mode & 2) != 0) /* world writeable */ { trusted_config = FALSE; fclose(trust_list); } else { /* Well, the trust list at least is up to scratch... */ void *reset_point = store_get(0); uschar *trusted_configs[32]; int nr_configs = 0; int i = 0; while (Ufgets(big_buffer, big_buffer_size, trust_list)) { uschar *start = big_buffer, *nl; while (*start && isspace(*start)) start++; if (*start != '/') continue; nl = Ustrchr(start, '\n'); if (nl) *nl = 0; trusted_configs[nr_configs++] = string_copy(start); if (nr_configs == 32) break; } fclose(trust_list); if (nr_configs) { int sep = 0; const uschar *list = argrest; uschar *filename; while (trusted_config && (filename = string_nextinlist(&list, &sep, big_buffer, big_buffer_size)) != NULL) { for (i=0; i < nr_configs; i++) { if (Ustrcmp(filename, trusted_configs[i]) == 0) break; } if (i == nr_configs) { trusted_config = FALSE; break; } } store_reset(reset_point); } else { /* No valid prefixes found in trust_list file. */ trusted_config = FALSE; } } } else { /* Could not open trust_list file. */ trusted_config = FALSE; } } #else /* Not root; don't trust config */ trusted_config = FALSE; #endif } config_main_filelist = argrest; config_changed = TRUE; } break; /* -D: set up a macro definition */ case 'D': #ifdef DISABLE_D_OPTION fprintf(stderr, "exim: -D is not available in this Exim binary\n"); exit(EXIT_FAILURE); #else { int ptr = 0; macro_item *m; uschar name[24]; uschar *s = argrest; opt_D_used = TRUE; while (isspace(*s)) s++; if (*s < 'A' || *s > 'Z') { fprintf(stderr, "exim: macro name set by -D must start with " "an upper case letter\n"); exit(EXIT_FAILURE); } while (isalnum(*s) || *s == '_') { if (ptr < sizeof(name)-1) name[ptr++] = *s; s++; } name[ptr] = 0; if (ptr == 0) { badarg = TRUE; break; } while (isspace(*s)) s++; if (*s != 0) { if (*s++ != '=') { badarg = TRUE; break; } while (isspace(*s)) s++; } for (m = macros; m; m = m->next) if (Ustrcmp(m->name, name) == 0) { fprintf(stderr, "exim: duplicated -D in command line\n"); exit(EXIT_FAILURE); } m = macro_create(name, s, TRUE, FALSE); if (clmacro_count >= MAX_CLMACROS) { fprintf(stderr, "exim: too many -D options on command line\n"); exit(EXIT_FAILURE); } clmacros[clmacro_count++] = string_sprintf("-D%s=%s", m->name, m->replacement); } #endif break; /* -d: Set debug level (see also -v below) or set the drop_cr option. The latter is now a no-op, retained for compatibility only. If -dd is used, debugging subprocesses of the daemon is disabled. */ case 'd': if (Ustrcmp(argrest, "ropcr") == 0) { /* drop_cr = TRUE; */ } /* Use an intermediate variable so that we don't set debugging while decoding the debugging bits. */ else { unsigned int selector = D_default; debug_selector = 0; debug_file = NULL; if (*argrest == 'd') { debug_daemon = TRUE; argrest++; } if (*argrest != 0) decode_bits(&selector, 1, debug_notall, argrest, debug_options, debug_options_count, US"debug", 0); debug_selector = selector; } break; /* -E: This is a local error message. This option is not intended for external use at all, but is not restricted to trusted callers because it does no harm (just suppresses certain error messages) and if Exim is run not setuid root it won't always be trusted when it generates error messages using this option. If there is a message id following -E, point message_reference at it, for logging. */ case 'E': local_error_message = TRUE; if (mac_ismsgid(argrest)) message_reference = argrest; break; /* -ex: The vacation program calls sendmail with the undocumented "-eq" option, so it looks as if historically the -oex options are also callable without the leading -o. So we have to accept them. Before the switch, anything starting -oe has been converted to -e. Exim does not support all of the sendmail error options. */ case 'e': if (Ustrcmp(argrest, "e") == 0) { arg_error_handling = ERRORS_SENDER; errors_sender_rc = EXIT_SUCCESS; } else if (Ustrcmp(argrest, "m") == 0) arg_error_handling = ERRORS_SENDER; else if (Ustrcmp(argrest, "p") == 0) arg_error_handling = ERRORS_STDERR; else if (Ustrcmp(argrest, "q") == 0) arg_error_handling = ERRORS_STDERR; else if (Ustrcmp(argrest, "w") == 0) arg_error_handling = ERRORS_SENDER; else badarg = TRUE; break; /* -F: Set sender's full name, used instead of the gecos entry from the password file. Since users can usually alter their gecos entries, there's no security involved in using this instead. The data can follow the -F or be in the next argument. */ case 'F': if (*argrest == 0) { if(++i < argc) argrest = argv[i]; else { badarg = TRUE; break; } } originator_name = argrest; sender_name_forced = TRUE; break; /* -f: Set sender's address - this value is only actually used if Exim is run by a trusted user, or if untrusted_set_sender is set and matches the address, except that the null address can always be set by any user. The test for this happens later, when the value given here is ignored when not permitted. For an untrusted user, the actual sender is still put in Sender: if it doesn't match the From: header (unless no_local_from_check is set). The data can follow the -f or be in the next argument. The -r switch is an obsolete form of -f but since there appear to be programs out there that use anything that sendmail has ever supported, better accept it - the synonymizing is done before the switch above. At this stage, we must allow domain literal addresses, because we don't know what the setting of allow_domain_literals is yet. Ditto for trailing dots and strip_trailing_dot. */ case 'f': { int dummy_start, dummy_end; uschar *errmess; if (*argrest == 0) { if (i+1 < argc) argrest = argv[++i]; else { badarg = TRUE; break; } } if (*argrest == 0) sender_address = string_sprintf(""); /* Ensure writeable memory */ else { uschar *temp = argrest + Ustrlen(argrest) - 1; while (temp >= argrest && isspace(*temp)) temp--; if (temp >= argrest && *temp == '.') f_end_dot = TRUE; allow_domain_literals = TRUE; strip_trailing_dot = TRUE; #ifdef SUPPORT_I18N allow_utf8_domains = TRUE; #endif sender_address = parse_extract_address(argrest, &errmess, &dummy_start, &dummy_end, &sender_address_domain, TRUE); #ifdef SUPPORT_I18N message_smtputf8 = string_is_utf8(sender_address); allow_utf8_domains = FALSE; #endif allow_domain_literals = FALSE; strip_trailing_dot = FALSE; if (sender_address == NULL) { fprintf(stderr, "exim: bad -f address \"%s\": %s\n", argrest, errmess); return EXIT_FAILURE; } } sender_address_forced = TRUE; } break; /* -G: sendmail invocation to specify that it's a gateway submission and sendmail may complain about problems instead of fixing them. We make it equivalent to an ACL "control = suppress_local_fixups" and do not at this time complain about problems. */ case 'G': flag_G = TRUE; break; /* -h: Set the hop count for an incoming message. Exim does not currently support this; it always computes it by counting the Received: headers. To put it in will require a change to the spool header file format. */ case 'h': if (*argrest == 0) { if(++i < argc) argrest = argv[i]; else { badarg = TRUE; break; } } if (!isdigit(*argrest)) badarg = TRUE; break; /* -i: Set flag so dot doesn't end non-SMTP input (same as -oi, seems not to be documented for sendmail but mailx (at least) uses it) */ case 'i': if (*argrest == 0) dot_ends = FALSE; else badarg = TRUE; break; /* -L: set the identifier used for syslog; equivalent to setting syslog_processname in the config file, but needs to be an admin option. */ case 'L': if (*argrest == '\0') { if(++i < argc) argrest = argv[i]; else { badarg = TRUE; break; } } sz = Ustrlen(argrest); if (sz > 32) { fprintf(stderr, "exim: the -L syslog name is too long: \"%s\"\n", argrest); return EXIT_FAILURE; } if (sz < 1) { fprintf(stderr, "exim: the -L syslog name is too short\n"); return EXIT_FAILURE; } cmdline_syslog_name = argrest; break; case 'M': receiving_message = FALSE; /* -MC: continue delivery of another message via an existing open file descriptor. This option is used for an internal call by the smtp transport when there is a pending message waiting to go to an address to which it has got a connection. Five subsequent arguments are required: transport name, host name, IP address, sequence number, and message_id. Transports may decline to create new processes if the sequence number gets too big. The channel is stdin. This (-MC) must be the last argument. There's a subsequent check that the real-uid is privileged. If we are running in the test harness. delay for a bit, to let the process that set this one up complete. This makes for repeatability of the logging, etc. output. */ if (Ustrcmp(argrest, "C") == 0) { union sockaddr_46 interface_sock; EXIM_SOCKLEN_T size = sizeof(interface_sock); if (argc != i + 6) { fprintf(stderr, "exim: too many or too few arguments after -MC\n"); return EXIT_FAILURE; } if (msg_action_arg >= 0) { fprintf(stderr, "exim: incompatible arguments\n"); return EXIT_FAILURE; } continue_transport = argv[++i]; continue_hostname = argv[++i]; continue_host_address = argv[++i]; continue_sequence = Uatoi(argv[++i]); msg_action = MSG_DELIVER; msg_action_arg = ++i; forced_delivery = TRUE; queue_run_pid = passed_qr_pid; queue_run_pipe = passed_qr_pipe; if (!mac_ismsgid(argv[i])) { fprintf(stderr, "exim: malformed message id %s after -MC option\n", argv[i]); return EXIT_FAILURE; } /* Set up $sending_ip_address and $sending_port, unless proxied */ if (!continue_proxy_cipher) if (getsockname(fileno(stdin), (struct sockaddr *)(&interface_sock), &size) == 0) sending_ip_address = host_ntoa(-1, &interface_sock, NULL, &sending_port); else { fprintf(stderr, "exim: getsockname() failed after -MC option: %s\n", strerror(errno)); return EXIT_FAILURE; } if (running_in_test_harness) millisleep(500); break; } else if (*argrest == 'C' && argrest[1] && !argrest[2]) { switch(argrest[1]) { /* -MCA: set the smtp_authenticated flag; this is useful only when it precedes -MC (see above). The flag indicates that the host to which Exim is connected has accepted an AUTH sequence. */ case 'A': smtp_authenticated = TRUE; break; /* -MCD: set the smtp_use_dsn flag; this indicates that the host that exim is connected to supports the esmtp extension DSN */ case 'D': smtp_peer_options |= PEER_OFFERED_DSN; break; /* -MCG: set the queue name, to a non-default value */ case 'G': if (++i < argc) queue_name = string_copy(argv[i]); else badarg = TRUE; break; /* -MCK: the peer offered CHUNKING. Must precede -MC */ case 'K': smtp_peer_options |= PEER_OFFERED_CHUNKING; break; /* -MCP: set the smtp_use_pipelining flag; this is useful only when it preceded -MC (see above) */ case 'P': smtp_peer_options |= PEER_OFFERED_PIPE; break; /* -MCQ: pass on the pid of the queue-running process that started this chain of deliveries and the fd of its synchronizing pipe; this is useful only when it precedes -MC (see above) */ case 'Q': if (++i < argc) passed_qr_pid = (pid_t)(Uatol(argv[i])); else badarg = TRUE; if (++i < argc) passed_qr_pipe = (int)(Uatol(argv[i])); else badarg = TRUE; break; /* -MCS: set the smtp_use_size flag; this is useful only when it precedes -MC (see above) */ case 'S': smtp_peer_options |= PEER_OFFERED_SIZE; break; #ifdef SUPPORT_TLS /* -MCt: similar to -MCT below but the connection is still open via a proxy proces which handles the TLS context and coding. Require three arguments for the proxied local address and port, and the TLS cipher. */ case 't': if (++i < argc) sending_ip_address = argv[i]; else badarg = TRUE; if (++i < argc) sending_port = (int)(Uatol(argv[i])); else badarg = TRUE; if (++i < argc) continue_proxy_cipher = argv[i]; else badarg = TRUE; /*FALLTHROUGH*/ /* -MCT: set the tls_offered flag; this is useful only when it precedes -MC (see above). The flag indicates that the host to which Exim is connected has offered TLS support. */ case 'T': smtp_peer_options |= PEER_OFFERED_TLS; break; #endif default: badarg = TRUE; break; } break; } /* -M[x]: various operations on the following list of message ids: -M deliver the messages, ignoring next retry times and thawing -Mc deliver the messages, checking next retry times, no thawing -Mf freeze the messages -Mg give up on the messages -Mt thaw the messages -Mrm remove the messages In the above cases, this must be the last option. There are also the following options which are followed by a single message id, and which act on that message. Some of them use the "recipient" addresses as well. -Mar add recipient(s) -Mmad mark all recipients delivered -Mmd mark recipients(s) delivered -Mes edit sender -Mset load a message for use with -be -Mvb show body -Mvc show copy (of whole message, in RFC 2822 format) -Mvh show header -Mvl show log */ else if (*argrest == 0) { msg_action = MSG_DELIVER; forced_delivery = deliver_force_thaw = TRUE; } else if (Ustrcmp(argrest, "ar") == 0) { msg_action = MSG_ADD_RECIPIENT; one_msg_action = TRUE; } else if (Ustrcmp(argrest, "c") == 0) msg_action = MSG_DELIVER; else if (Ustrcmp(argrest, "es") == 0) { msg_action = MSG_EDIT_SENDER; one_msg_action = TRUE; } else if (Ustrcmp(argrest, "f") == 0) msg_action = MSG_FREEZE; else if (Ustrcmp(argrest, "g") == 0) { msg_action = MSG_DELIVER; deliver_give_up = TRUE; } else if (Ustrcmp(argrest, "mad") == 0) { msg_action = MSG_MARK_ALL_DELIVERED; } else if (Ustrcmp(argrest, "md") == 0) { msg_action = MSG_MARK_DELIVERED; one_msg_action = TRUE; } else if (Ustrcmp(argrest, "rm") == 0) msg_action = MSG_REMOVE; else if (Ustrcmp(argrest, "set") == 0) { msg_action = MSG_LOAD; one_msg_action = TRUE; } else if (Ustrcmp(argrest, "t") == 0) msg_action = MSG_THAW; else if (Ustrcmp(argrest, "vb") == 0) { msg_action = MSG_SHOW_BODY; one_msg_action = TRUE; } else if (Ustrcmp(argrest, "vc") == 0) { msg_action = MSG_SHOW_COPY; one_msg_action = TRUE; } else if (Ustrcmp(argrest, "vh") == 0) { msg_action = MSG_SHOW_HEADER; one_msg_action = TRUE; } else if (Ustrcmp(argrest, "vl") == 0) { msg_action = MSG_SHOW_LOG; one_msg_action = TRUE; } else { badarg = TRUE; break; } /* All the -Mxx options require at least one message id. */ msg_action_arg = i + 1; if (msg_action_arg >= argc) { fprintf(stderr, "exim: no message ids given after %s option\n", arg); return EXIT_FAILURE; } /* Some require only message ids to follow */ if (!one_msg_action) { int j; for (j = msg_action_arg; j < argc; j++) if (!mac_ismsgid(argv[j])) { fprintf(stderr, "exim: malformed message id %s after %s option\n", argv[j], arg); return EXIT_FAILURE; } goto END_ARG; /* Remaining args are ids */ } /* Others require only one message id, possibly followed by addresses, which will be handled as normal arguments. */ else { if (!mac_ismsgid(argv[msg_action_arg])) { fprintf(stderr, "exim: malformed message id %s after %s option\n", argv[msg_action_arg], arg); return EXIT_FAILURE; } i++; } break; /* Some programs seem to call the -om option without the leading o; for sendmail it askes for "me too". Exim always does this. */ case 'm': if (*argrest != 0) badarg = TRUE; break; /* -N: don't do delivery - a debugging option that stops transports doing their thing. It implies debugging at the D_v level. */ case 'N': if (*argrest == 0) { dont_deliver = TRUE; debug_selector |= D_v; debug_file = stderr; } else badarg = TRUE; break; /* -n: This means "don't alias" in sendmail, apparently. For normal invocations, it has no effect. It may affect some other options. */ case 'n': flag_n = TRUE; break; /* -O: Just ignore it. In sendmail, apparently -O option=value means set option to the specified value. This form uses long names. We need to handle -O option=value and -Ooption=value. */ case 'O': if (*argrest == 0) { if (++i >= argc) { fprintf(stderr, "exim: string expected after -O\n"); exit(EXIT_FAILURE); } } break; case 'o': /* -oA: Set an argument for the bi command (sendmail's "alternate alias file" option). */ if (*argrest == 'A') { alias_arg = argrest + 1; if (alias_arg[0] == 0) { if (i+1 < argc) alias_arg = argv[++i]; else { fprintf(stderr, "exim: string expected after -oA\n"); exit(EXIT_FAILURE); } } } /* -oB: Set a connection message max value for remote deliveries */ else if (*argrest == 'B') { uschar *p = argrest + 1; if (p[0] == 0) { if (i+1 < argc && isdigit((argv[i+1][0]))) p = argv[++i]; else { connection_max_messages = 1; p = NULL; } } if (p != NULL) { if (!isdigit(*p)) { fprintf(stderr, "exim: number expected after -oB\n"); exit(EXIT_FAILURE); } connection_max_messages = Uatoi(p); } } /* -odb: background delivery */ else if (Ustrcmp(argrest, "db") == 0) { synchronous_delivery = FALSE; arg_queue_only = FALSE; queue_only_set = TRUE; } /* -odf: foreground delivery (smail-compatible option); same effect as -odi: interactive (synchronous) delivery (sendmail-compatible option) */ else if (Ustrcmp(argrest, "df") == 0 || Ustrcmp(argrest, "di") == 0) { synchronous_delivery = TRUE; arg_queue_only = FALSE; queue_only_set = TRUE; } /* -odq: queue only */ else if (Ustrcmp(argrest, "dq") == 0) { synchronous_delivery = FALSE; arg_queue_only = TRUE; queue_only_set = TRUE; } /* -odqs: queue SMTP only - do local deliveries and remote routing, but no remote delivery */ else if (Ustrcmp(argrest, "dqs") == 0) { queue_smtp = TRUE; arg_queue_only = FALSE; queue_only_set = TRUE; } /* -oex: Sendmail error flags. As these are also accepted without the leading -o prefix, for compatibility with vacation and other callers, they are handled with -e above. */ /* -oi: Set flag so dot doesn't end non-SMTP input (same as -i) -oitrue: Another sendmail syntax for the same */ else if (Ustrcmp(argrest, "i") == 0 || Ustrcmp(argrest, "itrue") == 0) dot_ends = FALSE; /* -oM*: Set various characteristics for an incoming message; actually acted on for trusted callers only. */ else if (*argrest == 'M') { if (i+1 >= argc) { fprintf(stderr, "exim: data expected after -o%s\n", argrest); exit(EXIT_FAILURE); } /* -oMa: Set sender host address */ if (Ustrcmp(argrest, "Ma") == 0) sender_host_address = argv[++i]; /* -oMaa: Set authenticator name */ else if (Ustrcmp(argrest, "Maa") == 0) sender_host_authenticated = argv[++i]; /* -oMas: setting authenticated sender */ else if (Ustrcmp(argrest, "Mas") == 0) authenticated_sender = argv[++i]; /* -oMai: setting authenticated id */ else if (Ustrcmp(argrest, "Mai") == 0) authenticated_id = argv[++i]; /* -oMi: Set incoming interface address */ else if (Ustrcmp(argrest, "Mi") == 0) interface_address = argv[++i]; /* -oMm: Message reference */ else if (Ustrcmp(argrest, "Mm") == 0) { if (!mac_ismsgid(argv[i+1])) { fprintf(stderr,"-oMm must be a valid message ID\n"); exit(EXIT_FAILURE); } if (!trusted_config) { fprintf(stderr,"-oMm must be called by a trusted user/config\n"); exit(EXIT_FAILURE); } message_reference = argv[++i]; } /* -oMr: Received protocol */ else if (Ustrcmp(argrest, "Mr") == 0) received_protocol = argv[++i]; /* -oMs: Set sender host name */ else if (Ustrcmp(argrest, "Ms") == 0) sender_host_name = argv[++i]; /* -oMt: Set sender ident */ else if (Ustrcmp(argrest, "Mt") == 0) { sender_ident_set = TRUE; sender_ident = argv[++i]; } /* Else a bad argument */ else { badarg = TRUE; break; } } /* -om: Me-too flag for aliases. Exim always does this. Some programs seem to call this as -m (undocumented), so that is also accepted (see above). */ else if (Ustrcmp(argrest, "m") == 0) {} /* -oo: An ancient flag for old-style addresses which still seems to crop up in some calls (see in SCO). */ else if (Ustrcmp(argrest, "o") == 0) {} /* -oP <name>: set pid file path for daemon */ else if (Ustrcmp(argrest, "P") == 0) override_pid_file_path = argv[++i]; /* -or <n>: set timeout for non-SMTP acceptance -os <n>: set timeout for SMTP acceptance */ else if (*argrest == 'r' || *argrest == 's') { int *tp = (*argrest == 'r')? &arg_receive_timeout : &arg_smtp_receive_timeout; if (argrest[1] == 0) { if (i+1 < argc) *tp= readconf_readtime(argv[++i], 0, FALSE); } else *tp = readconf_readtime(argrest + 1, 0, FALSE); if (*tp < 0) { fprintf(stderr, "exim: bad time value %s: abandoned\n", argv[i]); exit(EXIT_FAILURE); } } /* -oX <list>: Override local_interfaces and/or default daemon ports */ else if (Ustrcmp(argrest, "X") == 0) override_local_interfaces = argv[++i]; /* Unknown -o argument */ else badarg = TRUE; break; /* -ps: force Perl startup; -pd force delayed Perl startup */ case 'p': #ifdef EXIM_PERL if (*argrest == 's' && argrest[1] == 0) { perl_start_option = 1; break; } if (*argrest == 'd' && argrest[1] == 0) { perl_start_option = -1; break; } #endif /* -panythingelse is taken as the Sendmail-compatible argument -prval:sval, which sets the host protocol and host name */ if (*argrest == 0) { if (i+1 < argc) argrest = argv[++i]; else { badarg = TRUE; break; } } if (*argrest != 0) { uschar *hn = Ustrchr(argrest, ':'); if (hn == NULL) { received_protocol = argrest; } else { int old_pool = store_pool; store_pool = POOL_PERM; received_protocol = string_copyn(argrest, hn - argrest); store_pool = old_pool; sender_host_name = hn + 1; } } break; case 'q': receiving_message = FALSE; if (queue_interval >= 0) { fprintf(stderr, "exim: -q specified more than once\n"); exit(EXIT_FAILURE); } /* -qq...: Do queue runs in a 2-stage manner */ if (*argrest == 'q') { queue_2stage = TRUE; argrest++; } /* -qi...: Do only first (initial) deliveries */ if (*argrest == 'i') { queue_run_first_delivery = TRUE; argrest++; } /* -qf...: Run the queue, forcing deliveries -qff..: Ditto, forcing thawing as well */ if (*argrest == 'f') { queue_run_force = TRUE; if (*++argrest == 'f') { deliver_force_thaw = TRUE; argrest++; } } /* -q[f][f]l...: Run the queue only on local deliveries */ if (*argrest == 'l') { queue_run_local = TRUE; argrest++; } /* -q[f][f][l][G<name>]... Work on the named queue */ if (*argrest == 'G') { int i; for (argrest++, i = 0; argrest[i] && argrest[i] != '/'; ) i++; queue_name = string_copyn(argrest, i); argrest += i; if (*argrest == '/') argrest++; } /* -q[f][f][l][G<name>]: Run the queue, optionally forced, optionally local only, optionally named, optionally starting from a given message id. */ if (*argrest == 0 && (i + 1 >= argc || argv[i+1][0] == '-' || mac_ismsgid(argv[i+1]))) { queue_interval = 0; if (i+1 < argc && mac_ismsgid(argv[i+1])) start_queue_run_id = argv[++i]; if (i+1 < argc && mac_ismsgid(argv[i+1])) stop_queue_run_id = argv[++i]; } /* -q[f][f][l][G<name>/]<n>: Run the queue at regular intervals, optionally forced, optionally local only, optionally named. */ else if ((queue_interval = readconf_readtime(*argrest ? argrest : argv[++i], 0, FALSE)) <= 0) { fprintf(stderr, "exim: bad time value %s: abandoned\n", argv[i]); exit(EXIT_FAILURE); } break; case 'R': /* Synonymous with -qR... */ receiving_message = FALSE; /* -Rf: As -R (below) but force all deliveries, -Rff: Ditto, but also thaw all frozen messages, -Rr: String is regex -Rrf: Regex and force -Rrff: Regex and force and thaw in all cases provided there are no further characters in this argument. */ if (*argrest != 0) { int i; for (i = 0; i < nelem(rsopts); i++) if (Ustrcmp(argrest, rsopts[i]) == 0) { if (i != 2) queue_run_force = TRUE; if (i >= 2) deliver_selectstring_regex = TRUE; if (i == 1 || i == 4) deliver_force_thaw = TRUE; argrest += Ustrlen(rsopts[i]); } } /* -R: Set string to match in addresses for forced queue run to pick out particular messages. */ if (*argrest) deliver_selectstring = argrest; else if (i+1 < argc) deliver_selectstring = argv[++i]; else { fprintf(stderr, "exim: string expected after -R\n"); exit(EXIT_FAILURE); } break; /* -r: an obsolete synonym for -f (see above) */ /* -S: Like -R but works on sender. */ case 'S': /* Synonymous with -qS... */ receiving_message = FALSE; /* -Sf: As -S (below) but force all deliveries, -Sff: Ditto, but also thaw all frozen messages, -Sr: String is regex -Srf: Regex and force -Srff: Regex and force and thaw in all cases provided there are no further characters in this argument. */ if (*argrest) { int i; for (i = 0; i < nelem(rsopts); i++) if (Ustrcmp(argrest, rsopts[i]) == 0) { if (i != 2) queue_run_force = TRUE; if (i >= 2) deliver_selectstring_sender_regex = TRUE; if (i == 1 || i == 4) deliver_force_thaw = TRUE; argrest += Ustrlen(rsopts[i]); } } /* -S: Set string to match in addresses for forced queue run to pick out particular messages. */ if (*argrest) deliver_selectstring_sender = argrest; else if (i+1 < argc) deliver_selectstring_sender = argv[++i]; else { fprintf(stderr, "exim: string expected after -S\n"); exit(EXIT_FAILURE); } break; /* -Tqt is an option that is exclusively for use by the testing suite. It is not recognized in other circumstances. It allows for the setting up of explicit "queue times" so that various warning/retry things can be tested. Otherwise variability of clock ticks etc. cause problems. */ case 'T': if (running_in_test_harness && Ustrcmp(argrest, "qt") == 0) fudged_queue_times = argv[++i]; else badarg = TRUE; break; /* -t: Set flag to extract recipients from body of message. */ case 't': if (*argrest == 0) extract_recipients = TRUE; /* -ti: Set flag to extract recipients from body of message, and also specify that dot does not end the message. */ else if (Ustrcmp(argrest, "i") == 0) { extract_recipients = TRUE; dot_ends = FALSE; } /* -tls-on-connect: don't wait for STARTTLS (for old clients) */ #ifdef SUPPORT_TLS else if (Ustrcmp(argrest, "ls-on-connect") == 0) tls_in.on_connect = TRUE; #endif else badarg = TRUE; break; /* -U: This means "initial user submission" in sendmail, apparently. The doc claims that in future sendmail may refuse syntactically invalid messages instead of fixing them. For the moment, we just ignore it. */ case 'U': break; /* -v: verify things - this is a very low-level debugging */ case 'v': if (*argrest == 0) { debug_selector |= D_v; debug_file = stderr; } else badarg = TRUE; break; /* -x: AIX uses this to indicate some fancy 8-bit character stuff: The -x flag tells the sendmail command that mail from a local mail program has National Language Support (NLS) extended characters in the body of the mail item. The sendmail command can send mail with extended NLS characters across networks that normally corrupts these 8-bit characters. As Exim is 8-bit clean, it just ignores this flag. */ case 'x': if (*argrest != 0) badarg = TRUE; break; /* -X: in sendmail: takes one parameter, logfile, and sends debugging logs to that file. We swallow the parameter and otherwise ignore it. */ case 'X': if (*argrest == '\0') if (++i >= argc) { fprintf(stderr, "exim: string expected after -X\n"); exit(EXIT_FAILURE); } break; case 'z': if (*argrest == '\0') if (++i < argc) log_oneline = argv[i]; else { fprintf(stderr, "exim: file name expected after %s\n", argv[i-1]); exit(EXIT_FAILURE); } break; /* All other initial characters are errors */ default: badarg = TRUE; break; } /* End of high-level switch statement */ /* Failed to recognize the option, or syntax error */ if (badarg) { fprintf(stderr, "exim abandoned: unknown, malformed, or incomplete " "option %s\n", arg); exit(EXIT_FAILURE); } } /* If -R or -S have been specified without -q, assume a single queue run. */ if ( (deliver_selectstring || deliver_selectstring_sender) && queue_interval < 0) queue_interval = 0; END_ARG: /* If usage_wanted is set we call the usage function - which never returns */ if (usage_wanted) exim_usage(called_as); /* Arguments have been processed. Check for incompatibilities. */ if (( (smtp_input || extract_recipients || recipients_arg < argc) && (daemon_listen || queue_interval >= 0 || bi_option || test_retry_arg >= 0 || test_rewrite_arg >= 0 || filter_test != FTEST_NONE || (msg_action_arg > 0 && !one_msg_action)) ) || ( msg_action_arg > 0 && (daemon_listen || queue_interval > 0 || list_options || (checking && msg_action != MSG_LOAD) || bi_option || test_retry_arg >= 0 || test_rewrite_arg >= 0) ) || ( (daemon_listen || queue_interval > 0) && (sender_address != NULL || list_options || list_queue || checking || bi_option) ) || ( daemon_listen && queue_interval == 0 ) || ( inetd_wait_mode && queue_interval >= 0 ) || ( list_options && (checking || smtp_input || extract_recipients || filter_test != FTEST_NONE || bi_option) ) || ( verify_address_mode && (address_test_mode || smtp_input || extract_recipients || filter_test != FTEST_NONE || bi_option) ) || ( address_test_mode && (smtp_input || extract_recipients || filter_test != FTEST_NONE || bi_option) ) || ( smtp_input && (sender_address != NULL || filter_test != FTEST_NONE || extract_recipients) ) || ( deliver_selectstring != NULL && queue_interval < 0 ) || ( msg_action == MSG_LOAD && (!expansion_test || expansion_test_message != NULL) ) ) { fprintf(stderr, "exim: incompatible command-line options or arguments\n"); exit(EXIT_FAILURE); } /* If debugging is set up, set the file and the file descriptor to pass on to child processes. It should, of course, be 2 for stderr. Also, force the daemon to run in the foreground. */ if (debug_selector != 0) { debug_file = stderr; debug_fd = fileno(debug_file); background_daemon = FALSE; if (running_in_test_harness) millisleep(100); /* lets caller finish */ if (debug_selector != D_v) /* -v only doesn't show this */ { debug_printf("Exim version %s uid=%ld gid=%ld pid=%d D=%x\n", version_string, (long int)real_uid, (long int)real_gid, (int)getpid(), debug_selector); if (!version_printed) show_whats_supported(stderr); } } /* When started with root privilege, ensure that the limits on the number of open files and the number of processes (where that is accessible) are sufficiently large, or are unset, in case Exim has been called from an environment where the limits are screwed down. Not all OS have the ability to change some of these limits. */ if (unprivileged) { DEBUG(D_any) debug_print_ids(US"Exim has no root privilege:"); } else { struct rlimit rlp; #ifdef RLIMIT_NOFILE if (getrlimit(RLIMIT_NOFILE, &rlp) < 0) { log_write(0, LOG_MAIN|LOG_PANIC, "getrlimit(RLIMIT_NOFILE) failed: %s", strerror(errno)); rlp.rlim_cur = rlp.rlim_max = 0; } /* I originally chose 1000 as a nice big number that was unlikely to be exceeded. It turns out that some older OS have a fixed upper limit of 256. */ if (rlp.rlim_cur < 1000) { rlp.rlim_cur = rlp.rlim_max = 1000; if (setrlimit(RLIMIT_NOFILE, &rlp) < 0) { rlp.rlim_cur = rlp.rlim_max = 256; if (setrlimit(RLIMIT_NOFILE, &rlp) < 0) log_write(0, LOG_MAIN|LOG_PANIC, "setrlimit(RLIMIT_NOFILE) failed: %s", strerror(errno)); } } #endif #ifdef RLIMIT_NPROC if (getrlimit(RLIMIT_NPROC, &rlp) < 0) { log_write(0, LOG_MAIN|LOG_PANIC, "getrlimit(RLIMIT_NPROC) failed: %s", strerror(errno)); rlp.rlim_cur = rlp.rlim_max = 0; } #ifdef RLIM_INFINITY if (rlp.rlim_cur != RLIM_INFINITY && rlp.rlim_cur < 1000) { rlp.rlim_cur = rlp.rlim_max = RLIM_INFINITY; #else if (rlp.rlim_cur < 1000) { rlp.rlim_cur = rlp.rlim_max = 1000; #endif if (setrlimit(RLIMIT_NPROC, &rlp) < 0) log_write(0, LOG_MAIN|LOG_PANIC, "setrlimit(RLIMIT_NPROC) failed: %s", strerror(errno)); } #endif } /* Exim is normally entered as root (but some special configurations are possible that don't do this). However, it always spins off sub-processes that set their uid and gid as required for local delivery. We don't want to pass on any extra groups that root may belong to, so we want to get rid of them all at this point. We need to obey setgroups() at this stage, before possibly giving up root privilege for a changed configuration file, but later on we might need to check on the additional groups for the admin user privilege - can't do that till after reading the config, which might specify the exim gid. Therefore, save the group list here first. */ group_count = getgroups(NGROUPS_MAX, group_list); if (group_count < 0) { fprintf(stderr, "exim: getgroups() failed: %s\n", strerror(errno)); exit(EXIT_FAILURE); } /* There is a fundamental difference in some BSD systems in the matter of groups. FreeBSD and BSDI are known to be different; NetBSD and OpenBSD are known not to be different. On the "different" systems there is a single group list, and the first entry in it is the current group. On all other versions of Unix there is a supplementary group list, which is in *addition* to the current group. Consequently, to get rid of all extraneous groups on a "standard" system you pass over 0 groups to setgroups(), while on a "different" system you pass over a single group - the current group, which is always the first group in the list. Calling setgroups() with zero groups on a "different" system results in an error return. The following code should cope with both types of system. However, if this process isn't running as root, setgroups() can't be used since you have to be root to run it, even if throwing away groups. Not being root here happens only in some unusual configurations. We just ignore the error. */ if (setgroups(0, NULL) != 0) { if (setgroups(1, group_list) != 0 && !unprivileged) { fprintf(stderr, "exim: setgroups() failed: %s\n", strerror(errno)); exit(EXIT_FAILURE); } } /* If the configuration file name has been altered by an argument on the command line (either a new file name or a macro definition) and the caller is not root, or if this is a filter testing run, remove any setuid privilege the program has and run as the underlying user. The exim user is locked out of this, which severely restricts the use of -C for some purposes. Otherwise, set the real ids to the effective values (should be root unless run from inetd, which it can either be root or the exim uid, if one is configured). There is a private mechanism for bypassing some of this, in order to make it possible to test lots of configurations automatically, without having either to recompile each time, or to patch in an actual configuration file name and other values (such as the path name). If running in the test harness, pretend that configuration file changes and macro definitions haven't happened. */ if (( /* EITHER */ (!trusted_config || /* Config changed, or */ !macros_trusted(opt_D_used)) && /* impermissible macros and */ real_uid != root_uid && /* Not root, and */ !running_in_test_harness /* Not fudged */ ) || /* OR */ expansion_test /* expansion testing */ || /* OR */ filter_test != FTEST_NONE) /* Filter testing */ { setgroups(group_count, group_list); exim_setugid(real_uid, real_gid, FALSE, US"-C, -D, -be or -bf forces real uid"); removed_privilege = TRUE; /* In the normal case when Exim is called like this, stderr is available and should be used for any logging information because attempts to write to the log will usually fail. To arrange this, we unset really_exim. However, if no stderr is available there is no point - we might as well have a go at the log (if it fails, syslog will be written). Note that if the invoker is Exim, the logs remain available. Messing with this causes unlogged successful deliveries. */ if ((log_stderr != NULL) && (real_uid != exim_uid)) really_exim = FALSE; } /* Privilege is to be retained for the moment. It may be dropped later, depending on the job that this Exim process has been asked to do. For now, set the real uid to the effective so that subsequent re-execs of Exim are done by a privileged user. */ else exim_setugid(geteuid(), getegid(), FALSE, US"forcing real = effective"); /* If testing a filter, open the file(s) now, before wasting time doing other setups and reading the message. */ if ((filter_test & FTEST_SYSTEM) != 0) { filter_sfd = Uopen(filter_test_sfile, O_RDONLY, 0); if (filter_sfd < 0) { fprintf(stderr, "exim: failed to open %s: %s\n", filter_test_sfile, strerror(errno)); return EXIT_FAILURE; } } if ((filter_test & FTEST_USER) != 0) { filter_ufd = Uopen(filter_test_ufile, O_RDONLY, 0); if (filter_ufd < 0) { fprintf(stderr, "exim: failed to open %s: %s\n", filter_test_ufile, strerror(errno)); return EXIT_FAILURE; } } /* Initialise lookup_list If debugging, already called above via version reporting. In either case, we initialise the list of available lookups while running as root. All dynamically modules are loaded from a directory which is hard-coded into the binary and is code which, if not a module, would be part of Exim already. Ability to modify the content of the directory is equivalent to the ability to modify a setuid binary! This needs to happen before we read the main configuration. */ init_lookup_list(); #ifdef SUPPORT_I18N if (running_in_test_harness) smtputf8_advertise_hosts = NULL; #endif /* Read the main runtime configuration data; this gives up if there is a failure. It leaves the configuration file open so that the subsequent configuration data for delivery can be read if needed. NOTE: immediatly after opening the configuration file we change the working directory to "/"! Later we change to $spool_directory. We do it there, because during readconf_main() some expansion takes place already. */ /* Store the initial cwd before we change directories */ if ((initial_cwd = os_getcwd(NULL, 0)) == NULL) { perror("exim: can't get the current working directory"); exit(EXIT_FAILURE); } /* checking: -be[m] expansion test - -b[fF] filter test new -bh[c] host test - -bmalware malware_test_file new -brt retry test new -brw rewrite test new -bt address test - -bv[s] address verify - list_options: -bP <option> (except -bP config, which sets list_config) If any of these options is set, we suppress warnings about configuration issues (currently about tls_advertise_hosts and keep_environment not being defined) */ readconf_main(checking || list_options); if (builtin_macros_create_trigger) DEBUG(D_any) debug_printf("Builtin macros created (expensive) due to config line '%.*s'\n", Ustrlen(builtin_macros_create_trigger)-1, builtin_macros_create_trigger); /* Now in directory "/" */ if (cleanup_environment() == FALSE) log_write(0, LOG_PANIC_DIE, "Can't cleanup environment"); /* If an action on specific messages is requested, or if a daemon or queue runner is being started, we need to know if Exim was called by an admin user. This is the case if the real user is root or exim, or if the real group is exim, or if one of the supplementary groups is exim or a group listed in admin_groups. We don't fail all message actions immediately if not admin_user, since some actions can be performed by non-admin users. Instead, set admin_user for later interrogation. */ if (real_uid == root_uid || real_uid == exim_uid || real_gid == exim_gid) admin_user = TRUE; else { int i, j; for (i = 0; i < group_count && !admin_user; i++) if (group_list[i] == exim_gid) admin_user = TRUE; else if (admin_groups) for (j = 1; j <= (int)admin_groups[0] && !admin_user; j++) if (admin_groups[j] == group_list[i]) admin_user = TRUE; } /* Another group of privileged users are the trusted users. These are root, exim, and any caller matching trusted_users or trusted_groups. Trusted callers are permitted to specify sender_addresses with -f on the command line, and other message parameters as well. */ if (real_uid == root_uid || real_uid == exim_uid) trusted_caller = TRUE; else { int i, j; if (trusted_users) for (i = 1; i <= (int)trusted_users[0] && !trusted_caller; i++) if (trusted_users[i] == real_uid) trusted_caller = TRUE; if (trusted_groups) for (i = 1; i <= (int)trusted_groups[0] && !trusted_caller; i++) if (trusted_groups[i] == real_gid) trusted_caller = TRUE; else for (j = 0; j < group_count && !trusted_caller; j++) if (trusted_groups[i] == group_list[j]) trusted_caller = TRUE; } /* At this point, we know if the user is privileged and some command-line options become possibly imperssible, depending upon the configuration file. */ if (checking && commandline_checks_require_admin && !admin_user) { fprintf(stderr, "exim: those command-line flags are set to require admin\n"); exit(EXIT_FAILURE); } /* Handle the decoding of logging options. */ decode_bits(log_selector, log_selector_size, log_notall, log_selector_string, log_options, log_options_count, US"log", 0); DEBUG(D_any) { int i; debug_printf("configuration file is %s\n", config_main_filename); debug_printf("log selectors ="); for (i = 0; i < log_selector_size; i++) debug_printf(" %08x", log_selector[i]); debug_printf("\n"); } /* If domain literals are not allowed, check the sender address that was supplied with -f. Ditto for a stripped trailing dot. */ if (sender_address != NULL) { if (sender_address[sender_address_domain] == '[' && !allow_domain_literals) { fprintf(stderr, "exim: bad -f address \"%s\": domain literals not " "allowed\n", sender_address); return EXIT_FAILURE; } if (f_end_dot && !strip_trailing_dot) { fprintf(stderr, "exim: bad -f address \"%s.\": domain is malformed " "(trailing dot not allowed)\n", sender_address); return EXIT_FAILURE; } } /* See if an admin user overrode our logging. */ if (cmdline_syslog_name != NULL) { if (admin_user) { syslog_processname = cmdline_syslog_name; log_file_path = string_copy(CUS"syslog"); } else { /* not a panic, non-privileged users should not be able to spam paniclog */ fprintf(stderr, "exim: you lack sufficient privilege to specify syslog process name\n"); return EXIT_FAILURE; } } /* Paranoia check of maximum lengths of certain strings. There is a check on the length of the log file path in log.c, which will come into effect if there are any calls to write the log earlier than this. However, if we get this far but the string is very long, it is better to stop now than to carry on and (e.g.) receive a message and then have to collapse. The call to log_write() from here will cause the ultimate panic collapse if the complete file name exceeds the buffer length. */ if (Ustrlen(log_file_path) > 200) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "log_file_path is longer than 200 chars: aborting"); if (Ustrlen(pid_file_path) > 200) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "pid_file_path is longer than 200 chars: aborting"); if (Ustrlen(spool_directory) > 200) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "spool_directory is longer than 200 chars: aborting"); /* Length check on the process name given to syslog for its TAG field, which is only permitted to be 32 characters or less. See RFC 3164. */ if (Ustrlen(syslog_processname) > 32) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "syslog_processname is longer than 32 chars: aborting"); if (log_oneline) if (admin_user) { log_write(0, LOG_MAIN, "%s", log_oneline); return EXIT_SUCCESS; } else return EXIT_FAILURE; /* In some operating systems, the environment variable TMPDIR controls where temporary files are created; Exim doesn't use these (apart from when delivering to MBX mailboxes), but called libraries such as DBM libraries may require them. If TMPDIR is found in the environment, reset it to the value defined in the EXIM_TMPDIR macro, if this macro is defined. For backward compatibility this macro may be called TMPDIR in old "Local/Makefile"s. It's converted to EXIM_TMPDIR by the build scripts. */ #ifdef EXIM_TMPDIR { uschar **p; if (environ) for (p = USS environ; *p; p++) if (Ustrncmp(*p, "TMPDIR=", 7) == 0 && Ustrcmp(*p+7, EXIM_TMPDIR) != 0) { uschar * newp = store_malloc(Ustrlen(EXIM_TMPDIR) + 8); sprintf(CS newp, "TMPDIR=%s", EXIM_TMPDIR); *p = newp; DEBUG(D_any) debug_printf("reset TMPDIR=%s in environment\n", EXIM_TMPDIR); } } #endif /* Timezone handling. If timezone_string is "utc", set a flag to cause all timestamps to be in UTC (gmtime() is used instead of localtime()). Otherwise, we may need to get rid of a bogus timezone setting. This can arise when Exim is called by a user who has set the TZ variable. This then affects the timestamps in log files and in Received: headers, and any created Date: header lines. The required timezone is settable in the configuration file, so nothing can be done about this earlier - but hopefully nothing will normally be logged earlier than this. We have to make a new environment if TZ is wrong, but don't bother if timestamps_utc is set, because then all times are in UTC anyway. */ if (timezone_string && strcmpic(timezone_string, US"UTC") == 0) timestamps_utc = TRUE; else { uschar *envtz = US getenv("TZ"); if (envtz ? !timezone_string || Ustrcmp(timezone_string, envtz) != 0 : timezone_string != NULL ) { uschar **p = USS environ; uschar **new; uschar **newp; int count = 0; if (environ) while (*p++) count++; if (!envtz) count++; newp = new = store_malloc(sizeof(uschar *) * (count + 1)); if (environ) for (p = USS environ; *p; p++) if (Ustrncmp(*p, "TZ=", 3) != 0) *newp++ = *p; if (timezone_string) { *newp = store_malloc(Ustrlen(timezone_string) + 4); sprintf(CS *newp++, "TZ=%s", timezone_string); } *newp = NULL; environ = CSS new; tzset(); DEBUG(D_any) debug_printf("Reset TZ to %s: time is %s\n", timezone_string, tod_stamp(tod_log)); } } /* Handle the case when we have removed the setuid privilege because of -C or -D. This means that the caller of Exim was not root. There is a problem if we were running as the Exim user. The sysadmin may expect this case to retain privilege because "the binary was called by the Exim user", but it hasn't, because either the -D option set macros, or the -C option set a non-trusted configuration file. There are two possibilities: (1) If deliver_drop_privilege is set, Exim is not going to re-exec in order to do message deliveries. Thus, the fact that it is running as a non-privileged user is plausible, and might be wanted in some special configurations. However, really_exim will have been set false when privilege was dropped, to stop Exim trying to write to its normal log files. Therefore, re-enable normal log processing, assuming the sysadmin has set up the log directory correctly. (2) If deliver_drop_privilege is not set, the configuration won't work as apparently intended, and so we log a panic message. In order to retain root for -C or -D, the caller must either be root or be invoking a trusted configuration file (when deliver_drop_privilege is false). */ if ( removed_privilege && (!trusted_config || opt_D_used) && real_uid == exim_uid) if (deliver_drop_privilege) really_exim = TRUE; /* let logging work normally */ else log_write(0, LOG_MAIN|LOG_PANIC, "exim user lost privilege for using %s option", trusted_config? "-D" : "-C"); /* Start up Perl interpreter if Perl support is configured and there is a perl_startup option, and the configuration or the command line specifies initializing starting. Note that the global variables are actually called opt_perl_xxx to avoid clashing with perl's namespace (perl_*). */ #ifdef EXIM_PERL if (perl_start_option != 0) opt_perl_at_start = (perl_start_option > 0); if (opt_perl_at_start && opt_perl_startup != NULL) { uschar *errstr; DEBUG(D_any) debug_printf("Starting Perl interpreter\n"); errstr = init_perl(opt_perl_startup); if (errstr != NULL) { fprintf(stderr, "exim: error in perl_startup code: %s\n", errstr); return EXIT_FAILURE; } opt_perl_started = TRUE; } #endif /* EXIM_PERL */ /* Log the arguments of the call if the configuration file said so. This is a debugging feature for finding out what arguments certain MUAs actually use. Don't attempt it if logging is disabled, or if listing variables or if verifying/testing addresses or expansions. */ if (((debug_selector & D_any) != 0 || LOGGING(arguments)) && really_exim && !list_options && !checking) { int i; uschar *p = big_buffer; Ustrcpy(p, "cwd= (failed)"); Ustrncpy(p + 4, initial_cwd, big_buffer_size-5); while (*p) p++; (void)string_format(p, big_buffer_size - (p - big_buffer), " %d args:", argc); while (*p) p++; for (i = 0; i < argc; i++) { int len = Ustrlen(argv[i]); const uschar *printing; uschar *quote; if (p + len + 8 >= big_buffer + big_buffer_size) { Ustrcpy(p, " ..."); log_write(0, LOG_MAIN, "%s", big_buffer); Ustrcpy(big_buffer, "..."); p = big_buffer + 3; } printing = string_printing(argv[i]); if (printing[0] == 0) quote = US"\""; else { const uschar *pp = printing; quote = US""; while (*pp != 0) if (isspace(*pp++)) { quote = US"\""; break; } } sprintf(CS p, " %s%.*s%s", quote, (int)(big_buffer_size - (p - big_buffer) - 4), printing, quote); while (*p) p++; } if (LOGGING(arguments)) log_write(0, LOG_MAIN, "%s", big_buffer); else debug_printf("%s\n", big_buffer); } /* Set the working directory to be the top-level spool directory. We don't rely on this in the code, which always uses fully qualified names, but it's useful for core dumps etc. Don't complain if it fails - the spool directory might not be generally accessible and calls with the -C option (and others) have lost privilege by now. Before the chdir, we try to ensure that the directory exists. */ if (Uchdir(spool_directory) != 0) { int dummy; (void)directory_make(spool_directory, US"", SPOOL_DIRECTORY_MODE, FALSE); dummy = /* quieten compiler */ Uchdir(spool_directory); } /* Handle calls with the -bi option. This is a sendmail option to rebuild *the* alias file. Exim doesn't have such a concept, but this call is screwed into Sun's YP makefiles. Handle this by calling a configured script, as the real user who called Exim. The -oA option can be used to pass an argument to the script. */ if (bi_option) { (void)fclose(config_file); if (bi_command != NULL) { int i = 0; uschar *argv[3]; argv[i++] = bi_command; if (alias_arg != NULL) argv[i++] = alias_arg; argv[i++] = NULL; setgroups(group_count, group_list); exim_setugid(real_uid, real_gid, FALSE, US"running bi_command"); DEBUG(D_exec) debug_printf("exec %.256s %.256s\n", argv[0], (argv[1] == NULL)? US"" : argv[1]); execv(CS argv[0], (char *const *)argv); fprintf(stderr, "exim: exec failed: %s\n", strerror(errno)); exit(EXIT_FAILURE); } else { DEBUG(D_any) debug_printf("-bi used but bi_command not set; exiting\n"); exit(EXIT_SUCCESS); } } /* We moved the admin/trusted check to be immediately after reading the configuration file. We leave these prints here to ensure that syslog setup, logfile setup, and so on has already happened. */ if (trusted_caller) DEBUG(D_any) debug_printf("trusted user\n"); if (admin_user) DEBUG(D_any) debug_printf("admin user\n"); /* Only an admin user may start the daemon or force a queue run in the default configuration, but the queue run restriction can be relaxed. Only an admin user may request that a message be returned to its sender forthwith. Only an admin user may specify a debug level greater than D_v (because it might show passwords, etc. in lookup queries). Only an admin user may request a queue count. Only an admin user can use the test interface to scan for email (because Exim will be in the spool dir and able to look at mails). */ if (!admin_user) { BOOL debugset = (debug_selector & ~D_v) != 0; if (deliver_give_up || daemon_listen || malware_test_file || (count_queue && queue_list_requires_admin) || (list_queue && queue_list_requires_admin) || (queue_interval >= 0 && prod_requires_admin) || (debugset && !running_in_test_harness)) { fprintf(stderr, "exim:%s permission denied\n", debugset? " debugging" : ""); exit(EXIT_FAILURE); } } /* If the real user is not root or the exim uid, the argument for passing in an open TCP/IP connection for another message is not permitted, nor is running with the -N option for any delivery action, unless this call to exim is one that supplied an input message, or we are using a patched exim for regression testing. */ if (real_uid != root_uid && real_uid != exim_uid && (continue_hostname != NULL || (dont_deliver && (queue_interval >= 0 || daemon_listen || msg_action_arg > 0) )) && !running_in_test_harness) { fprintf(stderr, "exim: Permission denied\n"); return EXIT_FAILURE; } /* If the caller is not trusted, certain arguments are ignored when running for real, but are permitted when checking things (-be, -bv, -bt, -bh, -bf, -bF). Note that authority for performing certain actions on messages is tested in the queue_action() function. */ if (!trusted_caller && !checking) { sender_host_name = sender_host_address = interface_address = sender_ident = received_protocol = NULL; sender_host_port = interface_port = 0; sender_host_authenticated = authenticated_sender = authenticated_id = NULL; } /* If a sender host address is set, extract the optional port number off the end of it and check its syntax. Do the same thing for the interface address. Exim exits if the syntax is bad. */ else { if (sender_host_address != NULL) sender_host_port = check_port(sender_host_address); if (interface_address != NULL) interface_port = check_port(interface_address); } /* If the caller is trusted, then they can use -G to suppress_local_fixups. */ if (flag_G) { if (trusted_caller) { suppress_local_fixups = suppress_local_fixups_default = TRUE; DEBUG(D_acl) debug_printf("suppress_local_fixups forced on by -G\n"); } else { fprintf(stderr, "exim: permission denied (-G requires a trusted user)\n"); return EXIT_FAILURE; } } /* If an SMTP message is being received check to see if the standard input is a TCP/IP socket. If it is, we assume that Exim was called from inetd if the caller is root or the Exim user, or if the port is a privileged one. Otherwise, barf. */ if (smtp_input) { union sockaddr_46 inetd_sock; EXIM_SOCKLEN_T size = sizeof(inetd_sock); if (getpeername(0, (struct sockaddr *)(&inetd_sock), &size) == 0) { int family = ((struct sockaddr *)(&inetd_sock))->sa_family; if (family == AF_INET || family == AF_INET6) { union sockaddr_46 interface_sock; size = sizeof(interface_sock); if (getsockname(0, (struct sockaddr *)(&interface_sock), &size) == 0) interface_address = host_ntoa(-1, &interface_sock, NULL, &interface_port); if (host_is_tls_on_connect_port(interface_port)) tls_in.on_connect = TRUE; if (real_uid == root_uid || real_uid == exim_uid || interface_port < 1024) { is_inetd = TRUE; sender_host_address = host_ntoa(-1, (struct sockaddr *)(&inetd_sock), NULL, &sender_host_port); if (mua_wrapper) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Input from " "inetd is not supported when mua_wrapper is set"); } else { fprintf(stderr, "exim: Permission denied (unprivileged user, unprivileged port)\n"); return EXIT_FAILURE; } } } } /* If the load average is going to be needed while receiving a message, get it now for those OS that require the first call to os_getloadavg() to be done as root. There will be further calls later for each message received. */ #ifdef LOAD_AVG_NEEDS_ROOT if (receiving_message && (queue_only_load >= 0 || (is_inetd && smtp_load_reserve >= 0) )) { load_average = OS_GETLOADAVG(); } #endif /* The queue_only configuration option can be overridden by -odx on the command line, except that if queue_only_override is false, queue_only cannot be unset from the command line. */ if (queue_only_set && (queue_only_override || arg_queue_only)) queue_only = arg_queue_only; /* The receive_timeout and smtp_receive_timeout options can be overridden by -or and -os. */ if (arg_receive_timeout >= 0) receive_timeout = arg_receive_timeout; if (arg_smtp_receive_timeout >= 0) smtp_receive_timeout = arg_smtp_receive_timeout; /* If Exim was started with root privilege, unless we have already removed the root privilege above as a result of -C, -D, -be, -bf or -bF, remove it now except when starting the daemon or doing some kind of delivery or address testing (-bt). These are the only cases when root need to be retained. We run as exim for -bv and -bh. However, if deliver_drop_privilege is set, root is retained only for starting the daemon. We always do the initgroups() in this situation (controlled by the TRUE below), in order to be as close as possible to the state Exim usually runs in. */ if (!unprivileged && /* originally had root AND */ !removed_privilege && /* still got root AND */ !daemon_listen && /* not starting the daemon */ queue_interval <= 0 && /* (either kind of daemon) */ ( /* AND EITHER */ deliver_drop_privilege || /* requested unprivileged */ ( /* OR */ queue_interval < 0 && /* not running the queue */ (msg_action_arg < 0 || /* and */ msg_action != MSG_DELIVER) && /* not delivering and */ (!checking || !address_test_mode) /* not address checking */ ) ) ) exim_setugid(exim_uid, exim_gid, TRUE, US"privilege not needed"); /* When we are retaining a privileged uid, we still change to the exim gid. */ else { int rv; rv = setgid(exim_gid); /* Impact of failure is that some stuff might end up with an incorrect group. We track this for failures from root, since any attempt to change privilege by root should succeed and failures should be examined. For non-root, there's no security risk. For me, it's { exim -bV } on a just-built binary, no need to complain then. */ if (rv == -1) if (!(unprivileged || removed_privilege)) { fprintf(stderr, "exim: changing group failed: %s\n", strerror(errno)); exit(EXIT_FAILURE); } else DEBUG(D_any) debug_printf("changing group to %ld failed: %s\n", (long int)exim_gid, strerror(errno)); } /* Handle a request to scan a file for malware */ if (malware_test_file) { #ifdef WITH_CONTENT_SCAN int result; set_process_info("scanning file for malware"); result = malware_in_file(malware_test_file); if (result == FAIL) { printf("No malware found.\n"); exit(EXIT_SUCCESS); } if (result != OK) { printf("Malware lookup returned non-okay/fail: %d\n", result); exit(EXIT_FAILURE); } if (malware_name) printf("Malware found: %s\n", malware_name); else printf("Malware scan detected malware of unknown name.\n"); #else printf("Malware scanning not enabled at compile time.\n"); #endif exit(EXIT_FAILURE); } /* Handle a request to list the delivery queue */ if (list_queue) { set_process_info("listing the queue"); queue_list(list_queue_option, argv + recipients_arg, argc - recipients_arg); exit(EXIT_SUCCESS); } /* Handle a request to count the delivery queue */ if (count_queue) { set_process_info("counting the queue"); queue_count(); exit(EXIT_SUCCESS); } /* Handle actions on specific messages, except for the force delivery and message load actions, which are done below. Some actions take a whole list of message ids, which are known to continue up to the end of the arguments. Others take a single message id and then operate on the recipients list. */ if (msg_action_arg > 0 && msg_action != MSG_DELIVER && msg_action != MSG_LOAD) { int yield = EXIT_SUCCESS; set_process_info("acting on specified messages"); if (!one_msg_action) { for (i = msg_action_arg; i < argc; i++) if (!queue_action(argv[i], msg_action, NULL, 0, 0)) yield = EXIT_FAILURE; } else if (!queue_action(argv[msg_action_arg], msg_action, argv, argc, recipients_arg)) yield = EXIT_FAILURE; exit(yield); } /* We used to set up here to skip reading the ACL section, on (msg_action_arg > 0 || (queue_interval == 0 && !daemon_listen) Now, since the intro of the ${acl } expansion, ACL definitions may be needed in transports so we lost the optimisation. */ readconf_rest(); /* The configuration data will have been read into POOL_PERM because we won't ever want to reset back past it. Change the current pool to POOL_MAIN. In fact, this is just a bit of pedantic tidiness. It wouldn't really matter if the configuration were read into POOL_MAIN, because we don't do any resets till later on. However, it seems right, and it does ensure that both pools get used. */ store_pool = POOL_MAIN; /* Handle the -brt option. This is for checking out retry configurations. The next three arguments are a domain name or a complete address, and optionally two error numbers. All it does is to call the function that scans the retry configuration data. */ if (test_retry_arg >= 0) { retry_config *yield; int basic_errno = 0; int more_errno = 0; uschar *s1, *s2; if (test_retry_arg >= argc) { printf("-brt needs a domain or address argument\n"); exim_exit(EXIT_FAILURE); } s1 = argv[test_retry_arg++]; s2 = NULL; /* If the first argument contains no @ and no . it might be a local user or it might be a single-component name. Treat as a domain. */ if (Ustrchr(s1, '@') == NULL && Ustrchr(s1, '.') == NULL) { printf("Warning: \"%s\" contains no '@' and no '.' characters. It is " "being \ntreated as a one-component domain, not as a local part.\n\n", s1); } /* There may be an optional second domain arg. */ if (test_retry_arg < argc && Ustrchr(argv[test_retry_arg], '.') != NULL) s2 = argv[test_retry_arg++]; /* The final arg is an error name */ if (test_retry_arg < argc) { uschar *ss = argv[test_retry_arg]; uschar *error = readconf_retry_error(ss, ss + Ustrlen(ss), &basic_errno, &more_errno); if (error != NULL) { printf("%s\n", CS error); return EXIT_FAILURE; } /* For the {MAIL,RCPT,DATA}_4xx errors, a value of 255 means "any", and a code > 100 as an error is for matching codes to the decade. Turn them into a real error code, off the decade. */ if (basic_errno == ERRNO_MAIL4XX || basic_errno == ERRNO_RCPT4XX || basic_errno == ERRNO_DATA4XX) { int code = (more_errno >> 8) & 255; if (code == 255) more_errno = (more_errno & 0xffff00ff) | (21 << 8); else if (code > 100) more_errno = (more_errno & 0xffff00ff) | ((code - 96) << 8); } } yield = retry_find_config(s1, s2, basic_errno, more_errno); if (yield == NULL) printf("No retry information found\n"); else { retry_rule *r; more_errno = yield->more_errno; printf("Retry rule: %s ", yield->pattern); if (yield->basic_errno == ERRNO_EXIMQUOTA) { printf("quota%s%s ", (more_errno > 0)? "_" : "", (more_errno > 0)? readconf_printtime(more_errno) : US""); } else if (yield->basic_errno == ECONNREFUSED) { printf("refused%s%s ", (more_errno > 0)? "_" : "", (more_errno == 'M')? "MX" : (more_errno == 'A')? "A" : ""); } else if (yield->basic_errno == ETIMEDOUT) { printf("timeout"); if ((more_errno & RTEF_CTOUT) != 0) printf("_connect"); more_errno &= 255; if (more_errno != 0) printf("_%s", (more_errno == 'M')? "MX" : "A"); printf(" "); } else if (yield->basic_errno == ERRNO_AUTHFAIL) printf("auth_failed "); else printf("* "); for (r = yield->rules; r != NULL; r = r->next) { printf("%c,%s", r->rule, readconf_printtime(r->timeout)); /* Do not */ printf(",%s", readconf_printtime(r->p1)); /* amalgamate */ if (r->rule == 'G') { int x = r->p2; int f = x % 1000; int d = 100; printf(",%d.", x/1000); do { printf("%d", f/d); f %= d; d /= 10; } while (f != 0); } printf("; "); } printf("\n"); } exim_exit(EXIT_SUCCESS); } /* Handle a request to list one or more configuration options */ /* If -n was set, we suppress some information */ if (list_options) { set_process_info("listing variables"); if (recipients_arg >= argc) readconf_print(US"all", NULL, flag_n); else for (i = recipients_arg; i < argc; i++) { if (i < argc - 1 && (Ustrcmp(argv[i], "router") == 0 || Ustrcmp(argv[i], "transport") == 0 || Ustrcmp(argv[i], "authenticator") == 0 || Ustrcmp(argv[i], "macro") == 0 || Ustrcmp(argv[i], "environment") == 0)) { readconf_print(argv[i+1], argv[i], flag_n); i++; } else readconf_print(argv[i], NULL, flag_n); } exim_exit(EXIT_SUCCESS); } if (list_config) { set_process_info("listing config"); readconf_print(US"config", NULL, flag_n); exim_exit(EXIT_SUCCESS); } /* Initialise subsystems as required */ #ifndef DISABLE_DKIM dkim_exim_init(); #endif deliver_init(); /* Handle a request to deliver one or more messages that are already on the queue. Values of msg_action other than MSG_DELIVER and MSG_LOAD are dealt with above. MSG_LOAD is handled with -be (which is the only time it applies) below. Delivery of specific messages is typically used for a small number when prodding by hand (when the option forced_delivery will be set) or when re-execing to regain root privilege. Each message delivery must happen in a separate process, so we fork a process for each one, and run them sequentially so that debugging output doesn't get intertwined, and to avoid spawning too many processes if a long list is given. However, don't fork for the last one; this saves a process in the common case when Exim is called to deliver just one message. */ if (msg_action_arg > 0 && msg_action != MSG_LOAD) { if (prod_requires_admin && !admin_user) { fprintf(stderr, "exim: Permission denied\n"); exim_exit(EXIT_FAILURE); } set_process_info("delivering specified messages"); if (deliver_give_up) forced_delivery = deliver_force_thaw = TRUE; for (i = msg_action_arg; i < argc; i++) { int status; pid_t pid; if (i == argc - 1) (void)deliver_message(argv[i], forced_delivery, deliver_give_up); else if ((pid = fork()) == 0) { (void)deliver_message(argv[i], forced_delivery, deliver_give_up); _exit(EXIT_SUCCESS); } else if (pid < 0) { fprintf(stderr, "failed to fork delivery process for %s: %s\n", argv[i], strerror(errno)); exim_exit(EXIT_FAILURE); } else wait(&status); } exim_exit(EXIT_SUCCESS); } /* If only a single queue run is requested, without SMTP listening, we can just turn into a queue runner, with an optional starting message id. */ if (queue_interval == 0 && !daemon_listen) { DEBUG(D_queue_run) debug_printf("Single queue run%s%s%s%s\n", (start_queue_run_id == NULL)? US"" : US" starting at ", (start_queue_run_id == NULL)? US"" : start_queue_run_id, (stop_queue_run_id == NULL)? US"" : US" stopping at ", (stop_queue_run_id == NULL)? US"" : stop_queue_run_id); if (*queue_name) set_process_info("running the '%s' queue (single queue run)", queue_name); else set_process_info("running the queue (single queue run)"); queue_run(start_queue_run_id, stop_queue_run_id, FALSE); exim_exit(EXIT_SUCCESS); } /* Find the login name of the real user running this process. This is always needed when receiving a message, because it is written into the spool file. It may also be used to construct a from: or a sender: header, and in this case we need the user's full name as well, so save a copy of it, checked for RFC822 syntax and munged if necessary, if it hasn't previously been set by the -F argument. We may try to get the passwd entry more than once, in case NIS or other delays are in evidence. Save the home directory for use in filter testing (only). */ for (i = 0;;) { if ((pw = getpwuid(real_uid)) != NULL) { originator_login = string_copy(US pw->pw_name); originator_home = string_copy(US pw->pw_dir); /* If user name has not been set by -F, set it from the passwd entry unless -f has been used to set the sender address by a trusted user. */ if (originator_name == NULL) { if (sender_address == NULL || (!trusted_caller && filter_test == FTEST_NONE)) { uschar *name = US pw->pw_gecos; uschar *amp = Ustrchr(name, '&'); uschar buffer[256]; /* Most Unix specify that a '&' character in the gecos field is replaced by a copy of the login name, and some even specify that the first character should be upper cased, so that's what we do. */ if (amp != NULL) { int loffset; string_format(buffer, sizeof(buffer), "%.*s%n%s%s", amp - name, name, &loffset, originator_login, amp + 1); buffer[loffset] = toupper(buffer[loffset]); name = buffer; } /* If a pattern for matching the gecos field was supplied, apply it and then expand the name string. */ if (gecos_pattern != NULL && gecos_name != NULL) { const pcre *re; re = regex_must_compile(gecos_pattern, FALSE, TRUE); /* Use malloc */ if (regex_match_and_setup(re, name, 0, -1)) { uschar *new_name = expand_string(gecos_name); expand_nmax = -1; if (new_name != NULL) { DEBUG(D_receive) debug_printf("user name \"%s\" extracted from " "gecos field \"%s\"\n", new_name, name); name = new_name; } else DEBUG(D_receive) debug_printf("failed to expand gecos_name string " "\"%s\": %s\n", gecos_name, expand_string_message); } else DEBUG(D_receive) debug_printf("gecos_pattern \"%s\" did not match " "gecos field \"%s\"\n", gecos_pattern, name); store_free((void *)re); } originator_name = string_copy(name); } /* A trusted caller has used -f but not -F */ else originator_name = US""; } /* Break the retry loop */ break; } if (++i > finduser_retries) break; sleep(1); } /* If we cannot get a user login, log the incident and give up, unless the configuration specifies something to use. When running in the test harness, any setting of unknown_login overrides the actual name. */ if (originator_login == NULL || running_in_test_harness) { if (unknown_login != NULL) { originator_login = expand_string(unknown_login); if (originator_name == NULL && unknown_username != NULL) originator_name = expand_string(unknown_username); if (originator_name == NULL) originator_name = US""; } if (originator_login == NULL) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Failed to get user name for uid %d", (int)real_uid); } /* Ensure that the user name is in a suitable form for use as a "phrase" in an RFC822 address.*/ originator_name = string_copy(parse_fix_phrase(originator_name, Ustrlen(originator_name), big_buffer, big_buffer_size)); /* If a message is created by this call of Exim, the uid/gid of its originator are those of the caller. These values are overridden if an existing message is read in from the spool. */ originator_uid = real_uid; originator_gid = real_gid; DEBUG(D_receive) debug_printf("originator: uid=%d gid=%d login=%s name=%s\n", (int)originator_uid, (int)originator_gid, originator_login, originator_name); /* Run in daemon and/or queue-running mode. The function daemon_go() never returns. We leave this till here so that the originator_ fields are available for incoming messages via the daemon. The daemon cannot be run in mua_wrapper mode. */ if (daemon_listen || inetd_wait_mode || queue_interval > 0) { if (mua_wrapper) { fprintf(stderr, "Daemon cannot be run when mua_wrapper is set\n"); log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Daemon cannot be run when " "mua_wrapper is set"); } daemon_go(); } /* If the sender ident has not been set (by a trusted caller) set it to the caller. This will get overwritten below for an inetd call. If a trusted caller has set it empty, unset it. */ if (sender_ident == NULL) sender_ident = originator_login; else if (sender_ident[0] == 0) sender_ident = NULL; /* Handle the -brw option, which is for checking out rewriting rules. Cause log writes (on errors) to go to stderr instead. Can't do this earlier, as want the originator_* variables set. */ if (test_rewrite_arg >= 0) { really_exim = FALSE; if (test_rewrite_arg >= argc) { printf("-brw needs an address argument\n"); exim_exit(EXIT_FAILURE); } rewrite_test(argv[test_rewrite_arg]); exim_exit(EXIT_SUCCESS); } /* A locally-supplied message is considered to be coming from a local user unless a trusted caller supplies a sender address with -f, or is passing in the message via SMTP (inetd invocation or otherwise). */ if ((sender_address == NULL && !smtp_input) || (!trusted_caller && filter_test == FTEST_NONE)) { sender_local = TRUE; /* A trusted caller can supply authenticated_sender and authenticated_id via -oMas and -oMai and if so, they will already be set. Otherwise, force defaults except when host checking. */ if (authenticated_sender == NULL && !host_checking) authenticated_sender = string_sprintf("%s@%s", originator_login, qualify_domain_sender); if (authenticated_id == NULL && !host_checking) authenticated_id = originator_login; } /* Trusted callers are always permitted to specify the sender address. Untrusted callers may specify it if it matches untrusted_set_sender, or if what is specified is the empty address. However, if a trusted caller does not specify a sender address for SMTP input, we leave sender_address unset. This causes the MAIL commands to be honoured. */ if ((!smtp_input && sender_address == NULL) || !receive_check_set_sender(sender_address)) { /* Either the caller is not permitted to set a general sender, or this is non-SMTP input and the trusted caller has not set a sender. If there is no sender, or if a sender other than <> is set, override with the originator's login (which will get qualified below), except when checking things. */ if (sender_address == NULL /* No sender_address set */ || /* OR */ (sender_address[0] != 0 && /* Non-empty sender address, AND */ !checking)) /* Not running tests, including filter tests */ { sender_address = originator_login; sender_address_forced = FALSE; sender_address_domain = 0; } } /* Remember whether an untrusted caller set the sender address */ sender_set_untrusted = sender_address != originator_login && !trusted_caller; /* Ensure that the sender address is fully qualified unless it is the empty address, which indicates an error message, or doesn't exist (root caller, smtp interface, no -f argument). */ if (sender_address != NULL && sender_address[0] != 0 && sender_address_domain == 0) sender_address = string_sprintf("%s@%s", local_part_quote(sender_address), qualify_domain_sender); DEBUG(D_receive) debug_printf("sender address = %s\n", sender_address); /* Handle a request to verify a list of addresses, or test them for delivery. This must follow the setting of the sender address, since routers can be predicated upon the sender. If no arguments are given, read addresses from stdin. Set debug_level to at least D_v to get full output for address testing. */ if (verify_address_mode || address_test_mode) { int exit_value = 0; int flags = vopt_qualify; if (verify_address_mode) { if (!verify_as_sender) flags |= vopt_is_recipient; DEBUG(D_verify) debug_print_ids(US"Verifying:"); } else { flags |= vopt_is_recipient; debug_selector |= D_v; debug_file = stderr; debug_fd = fileno(debug_file); DEBUG(D_verify) debug_print_ids(US"Address testing:"); } if (recipients_arg < argc) { while (recipients_arg < argc) { uschar *s = argv[recipients_arg++]; while (*s != 0) { BOOL finished = FALSE; uschar *ss = parse_find_address_end(s, FALSE); if (*ss == ',') *ss = 0; else finished = TRUE; test_address(s, flags, &exit_value); s = ss; if (!finished) while (*(++s) != 0 && (*s == ',' || isspace(*s))); } } } else for (;;) { uschar *s = get_stdinput(NULL, NULL); if (s == NULL) break; test_address(s, flags, &exit_value); } route_tidyup(); exim_exit(exit_value); } /* Handle expansion checking. Either expand items on the command line, or read from stdin if there aren't any. If -Mset was specified, load the message so that its variables can be used, but restrict this facility to admin users. Otherwise, if -bem was used, read a message from stdin. */ if (expansion_test) { dns_init(FALSE, FALSE, FALSE); if (msg_action_arg > 0 && msg_action == MSG_LOAD) { uschar spoolname[256]; /* Not big_buffer; used in spool_read_header() */ if (!admin_user) { fprintf(stderr, "exim: permission denied\n"); exit(EXIT_FAILURE); } message_id = argv[msg_action_arg]; (void)string_format(spoolname, sizeof(spoolname), "%s-H", message_id); if ((deliver_datafile = spool_open_datafile(message_id)) < 0) printf ("Failed to load message datafile %s\n", message_id); if (spool_read_header(spoolname, TRUE, FALSE) != spool_read_OK) printf ("Failed to load message %s\n", message_id); } /* Read a test message from a file. We fudge it up to be on stdin, saving stdin itself for later reading of expansion strings. */ else if (expansion_test_message != NULL) { int save_stdin = dup(0); int fd = Uopen(expansion_test_message, O_RDONLY, 0); if (fd < 0) { fprintf(stderr, "exim: failed to open %s: %s\n", expansion_test_message, strerror(errno)); return EXIT_FAILURE; } (void) dup2(fd, 0); filter_test = FTEST_USER; /* Fudge to make it look like filter test */ message_ended = END_NOTENDED; read_message_body(receive_msg(extract_recipients)); message_linecount += body_linecount; (void)dup2(save_stdin, 0); (void)close(save_stdin); clearerr(stdin); /* Required by Darwin */ } /* Allow $recipients for this testing */ enable_dollar_recipients = TRUE; /* Expand command line items */ if (recipients_arg < argc) { while (recipients_arg < argc) { uschar *s = argv[recipients_arg++]; uschar *ss = expand_string(s); if (ss == NULL) printf ("Failed: %s\n", expand_string_message); else printf("%s\n", CS ss); } } /* Read stdin */ else { char *(*fn_readline)(const char *) = NULL; void (*fn_addhist)(const char *) = NULL; #ifdef USE_READLINE void *dlhandle = set_readline(&fn_readline, &fn_addhist); #endif for (;;) { uschar *ss; uschar *source = get_stdinput(fn_readline, fn_addhist); if (source == NULL) break; ss = expand_string(source); if (ss == NULL) printf ("Failed: %s\n", expand_string_message); else printf("%s\n", CS ss); } #ifdef USE_READLINE if (dlhandle != NULL) dlclose(dlhandle); #endif } /* The data file will be open after -Mset */ if (deliver_datafile >= 0) { (void)close(deliver_datafile); deliver_datafile = -1; } exim_exit(EXIT_SUCCESS); } /* The active host name is normally the primary host name, but it can be varied for hosts that want to play several parts at once. We need to ensure that it is set for host checking, and for receiving messages. */ smtp_active_hostname = primary_hostname; if (raw_active_hostname != NULL) { uschar *nah = expand_string(raw_active_hostname); if (nah == NULL) { if (!expand_string_forcedfail) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to expand \"%s\" " "(smtp_active_hostname): %s", raw_active_hostname, expand_string_message); } else if (nah[0] != 0) smtp_active_hostname = nah; } /* Handle host checking: this facility mocks up an incoming SMTP call from a given IP address so that the blocking and relay configuration can be tested. Unless a sender_ident was set by -oMt, we discard it (the default is the caller's login name). An RFC 1413 call is made only if we are running in the test harness and an incoming interface and both ports are specified, because there is no TCP/IP call to find the ident for. */ if (host_checking) { int x[4]; int size; if (!sender_ident_set) { sender_ident = NULL; if (running_in_test_harness && sender_host_port != 0 && interface_address != NULL && interface_port != 0) verify_get_ident(1413); } /* In case the given address is a non-canonical IPv6 address, canonicalize it. The code works for both IPv4 and IPv6, as it happens. */ size = host_aton(sender_host_address, x); sender_host_address = store_get(48); /* large enough for full IPv6 */ (void)host_nmtoa(size, x, -1, sender_host_address, ':'); /* Now set up for testing */ host_build_sender_fullhost(); smtp_input = TRUE; smtp_in = stdin; smtp_out = stdout; sender_local = FALSE; sender_host_notsocket = TRUE; debug_file = stderr; debug_fd = fileno(debug_file); fprintf(stdout, "\n**** SMTP testing session as if from host %s\n" "**** but without any ident (RFC 1413) callback.\n" "**** This is not for real!\n\n", sender_host_address); memset(sender_host_cache, 0, sizeof(sender_host_cache)); if (verify_check_host(&hosts_connection_nolog) == OK) BIT_CLEAR(log_selector, log_selector_size, Li_smtp_connection); log_write(L_smtp_connection, LOG_MAIN, "%s", smtp_get_connection_info()); /* NOTE: We do *not* call smtp_log_no_mail() if smtp_start_session() fails, because a log line has already been written for all its failure exists (usually "connection refused: <reason>") and writing another one is unnecessary clutter. */ if (smtp_start_session()) { for (reset_point = store_get(0); ; store_reset(reset_point)) { if (smtp_setup_msg() <= 0) break; if (!receive_msg(FALSE)) break; return_path = sender_address = NULL; dnslist_domain = dnslist_matched = NULL; #ifndef DISABLE_DKIM dkim_cur_signer = NULL; #endif acl_var_m = NULL; deliver_localpart_orig = NULL; deliver_domain_orig = NULL; callout_address = sending_ip_address = NULL; sender_rate = sender_rate_limit = sender_rate_period = NULL; } smtp_log_no_mail(); } exim_exit(EXIT_SUCCESS); } /* Arrange for message reception if recipients or SMTP were specified; otherwise complain unless a version print (-bV) happened or this is a filter verification test or info dump. In the former case, show the configuration file name. */ if (recipients_arg >= argc && !extract_recipients && !smtp_input) { if (version_printed) { printf("Configuration file is %s\n", config_main_filename); return EXIT_SUCCESS; } if (info_flag != CMDINFO_NONE) { show_exim_information(info_flag, info_stdout ? stdout : stderr); return info_stdout ? EXIT_SUCCESS : EXIT_FAILURE; } if (filter_test == FTEST_NONE) exim_usage(called_as); } /* If mua_wrapper is set, Exim is being used to turn an MUA that submits on the standard input into an MUA that submits to a smarthost over TCP/IP. We know that we are not called from inetd, because that is rejected above. The following configuration settings are forced here: (1) Synchronous delivery (-odi) (2) Errors to stderr (-oep == -oeq) (3) No parallel remote delivery (4) Unprivileged delivery We don't force overall queueing options because there are several of them; instead, queueing is avoided below when mua_wrapper is set. However, we do need to override any SMTP queueing. */ if (mua_wrapper) { synchronous_delivery = TRUE; arg_error_handling = ERRORS_STDERR; remote_max_parallel = 1; deliver_drop_privilege = TRUE; queue_smtp = FALSE; queue_smtp_domains = NULL; #ifdef SUPPORT_I18N message_utf8_downconvert = -1; /* convert-if-needed */ #endif } /* Prepare to accept one or more new messages on the standard input. When a message has been read, its id is returned in message_id[]. If doing immediate delivery, we fork a delivery process for each received message, except for the last one, where we can save a process switch. It is only in non-smtp mode that error_handling is allowed to be changed from its default of ERRORS_SENDER by argument. (Idle thought: are any of the sendmail error modes other than -oem ever actually used? Later: yes.) */ if (!smtp_input) error_handling = arg_error_handling; /* If this is an inetd call, ensure that stderr is closed to prevent panic logging being sent down the socket and make an identd call to get the sender_ident. */ else if (is_inetd) { (void)fclose(stderr); exim_nullstd(); /* Re-open to /dev/null */ verify_get_ident(IDENT_PORT); host_build_sender_fullhost(); set_process_info("handling incoming connection from %s via inetd", sender_fullhost); } /* If the sender host address has been set, build sender_fullhost if it hasn't already been done (which it will have been for inetd). This caters for the case when it is forced by -oMa. However, we must flag that it isn't a socket, so that the test for IP options is skipped for -bs input. */ if (sender_host_address != NULL && sender_fullhost == NULL) { host_build_sender_fullhost(); set_process_info("handling incoming connection from %s via -oMa", sender_fullhost); sender_host_notsocket = TRUE; } /* Otherwise, set the sender host as unknown except for inetd calls. This prevents host checking in the case of -bs not from inetd and also for -bS. */ else if (!is_inetd) sender_host_unknown = TRUE; /* If stdout does not exist, then dup stdin to stdout. This can happen if exim is started from inetd. In this case fd 0 will be set to the socket, but fd 1 will not be set. This also happens for passed SMTP channels. */ if (fstat(1, &statbuf) < 0) (void)dup2(0, 1); /* Set up the incoming protocol name and the state of the program. Root is allowed to force received protocol via the -oMr option above. If we have come via inetd, the process info has already been set up. We don't set received_protocol here for smtp input, as it varies according to batch/HELO/EHLO/AUTH/TLS. */ if (smtp_input) { if (!is_inetd) set_process_info("accepting a local %sSMTP message from <%s>", smtp_batched_input? "batched " : "", (sender_address!= NULL)? sender_address : originator_login); } else { int old_pool = store_pool; store_pool = POOL_PERM; if (!received_protocol) received_protocol = string_sprintf("local%s", called_as); store_pool = old_pool; set_process_info("accepting a local non-SMTP message from <%s>", sender_address); } /* Initialize the session_local_queue-only flag (this will be ignored if mua_wrapper is set) */ queue_check_only(); session_local_queue_only = queue_only; /* For non-SMTP and for batched SMTP input, check that there is enough space on the spool if so configured. On failure, we must not attempt to send an error message! (For interactive SMTP, the check happens at MAIL FROM and an SMTP error code is given.) */ if ((!smtp_input || smtp_batched_input) && !receive_check_fs(0)) { fprintf(stderr, "exim: insufficient disk space\n"); return EXIT_FAILURE; } /* If this is smtp input of any kind, real or batched, handle the start of the SMTP session. NOTE: We do *not* call smtp_log_no_mail() if smtp_start_session() fails, because a log line has already been written for all its failure exists (usually "connection refused: <reason>") and writing another one is unnecessary clutter. */ if (smtp_input) { smtp_in = stdin; smtp_out = stdout; memset(sender_host_cache, 0, sizeof(sender_host_cache)); if (verify_check_host(&hosts_connection_nolog) == OK) BIT_CLEAR(log_selector, log_selector_size, Li_smtp_connection); log_write(L_smtp_connection, LOG_MAIN, "%s", smtp_get_connection_info()); if (!smtp_start_session()) { mac_smtp_fflush(); exim_exit(EXIT_SUCCESS); } } /* Otherwise, set up the input size limit here. */ else { thismessage_size_limit = expand_string_integer(message_size_limit, TRUE); if (expand_string_message) if (thismessage_size_limit == -1) log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to expand " "message_size_limit: %s", expand_string_message); else log_write(0, LOG_MAIN|LOG_PANIC_DIE, "invalid value for " "message_size_limit: %s", expand_string_message); } /* Loop for several messages when reading SMTP input. If we fork any child processes, we don't want to wait for them unless synchronous delivery is requested, so set SIGCHLD to SIG_IGN in that case. This is not necessarily the same as SIG_DFL, despite the fact that documentation often lists the default as "ignore". This is a confusing area. This is what I know: At least on some systems (e.g. Solaris), just setting SIG_IGN causes child processes that complete simply to go away without ever becoming defunct. You can't then wait for them - but we don't want to wait for them in the non-synchronous delivery case. However, this behaviour of SIG_IGN doesn't happen for all OS (e.g. *BSD is different). But that's not the end of the story. Some (many? all?) systems have the SA_NOCLDWAIT option for sigaction(). This requests the behaviour that Solaris has by default, so it seems that the difference is merely one of default (compare restarting vs non-restarting signals). To cover all cases, Exim sets SIG_IGN with SA_NOCLDWAIT here if it can. If not, it just sets SIG_IGN. To be on the safe side it also calls waitpid() at the end of the loop below. Paranoia rules. February 2003: That's *still* not the end of the story. There are now versions of Linux (where SIG_IGN does work) that are picky. If, having set SIG_IGN, a process then calls waitpid(), a grumble is written to the system log, because this is logically inconsistent. In other words, it doesn't like the paranoia. As a consequence of this, the waitpid() below is now excluded if we are sure that SIG_IGN works. */ if (!synchronous_delivery) { #ifdef SA_NOCLDWAIT struct sigaction act; act.sa_handler = SIG_IGN; sigemptyset(&(act.sa_mask)); act.sa_flags = SA_NOCLDWAIT; sigaction(SIGCHLD, &act, NULL); #else signal(SIGCHLD, SIG_IGN); #endif } /* Save the current store pool point, for resetting at the start of each message, and save the real sender address, if any. */ reset_point = store_get(0); real_sender_address = sender_address; /* Loop to receive messages; receive_msg() returns TRUE if there are more messages to be read (SMTP input), or FALSE otherwise (not SMTP, or SMTP channel collapsed). */ while (more) { message_id[0] = 0; /* Handle the SMTP case; call smtp_setup_mst() to deal with the initial SMTP input and build the recipients list, before calling receive_msg() to read the message proper. Whatever sender address is given in the SMTP transaction is often ignored for local senders - we use the actual sender, which is normally either the underlying user running this process or a -f argument provided by a trusted caller. It is saved in real_sender_address. The test for whether to accept the SMTP sender is encapsulated in receive_check_set_sender(). */ if (smtp_input) { int rc; if ((rc = smtp_setup_msg()) > 0) { if (real_sender_address != NULL && !receive_check_set_sender(sender_address)) { sender_address = raw_sender = real_sender_address; sender_address_unrewritten = NULL; } /* For batched SMTP, we have to run the acl_not_smtp_start ACL, since it isn't really SMTP, so no other ACL will run until the acl_not_smtp one at the very end. The result of the ACL is ignored (as for other non-SMTP messages). It is run for its potential side effects. */ if (smtp_batched_input && acl_not_smtp_start != NULL) { uschar *user_msg, *log_msg; enable_dollar_recipients = TRUE; (void)acl_check(ACL_WHERE_NOTSMTP_START, NULL, acl_not_smtp_start, &user_msg, &log_msg); enable_dollar_recipients = FALSE; } /* Now get the data for the message */ more = receive_msg(extract_recipients); if (message_id[0] == 0) { cancel_cutthrough_connection(TRUE, US"receive dropped"); if (more) goto moreloop; smtp_log_no_mail(); /* Log no mail if configured */ exim_exit(EXIT_FAILURE); } } else { cancel_cutthrough_connection(TRUE, US"message setup dropped"); smtp_log_no_mail(); /* Log no mail if configured */ exim_exit((rc == 0)? EXIT_SUCCESS : EXIT_FAILURE); } } /* In the non-SMTP case, we have all the information from the command line, but must process it in case it is in the more general RFC822 format, and in any case, to detect syntax errors. Also, it appears that the use of comma-separated lists as single arguments is common, so we had better support them. */ else { int i; int rcount = 0; int count = argc - recipients_arg; uschar **list = argv + recipients_arg; /* These options cannot be changed dynamically for non-SMTP messages */ active_local_sender_retain = local_sender_retain; active_local_from_check = local_from_check; /* Save before any rewriting */ raw_sender = string_copy(sender_address); /* Loop for each argument */ for (i = 0; i < count; i++) { int start, end, domain; uschar *errmess; uschar *s = list[i]; /* Loop for each comma-separated address */ while (*s != 0) { BOOL finished = FALSE; uschar *recipient; uschar *ss = parse_find_address_end(s, FALSE); if (*ss == ',') *ss = 0; else finished = TRUE; /* Check max recipients - if -t was used, these aren't recipients */ if (recipients_max > 0 && ++rcount > recipients_max && !extract_recipients) if (error_handling == ERRORS_STDERR) { fprintf(stderr, "exim: too many recipients\n"); exim_exit(EXIT_FAILURE); } else { return moan_to_sender(ERRMESS_TOOMANYRECIP, NULL, NULL, stdin, TRUE)? errors_sender_rc : EXIT_FAILURE; } #ifdef SUPPORT_I18N { BOOL b = allow_utf8_domains; allow_utf8_domains = TRUE; #endif recipient = parse_extract_address(s, &errmess, &start, &end, &domain, FALSE); #ifdef SUPPORT_I18N if (string_is_utf8(recipient)) message_smtputf8 = TRUE; else allow_utf8_domains = b; } #endif if (domain == 0 && !allow_unqualified_recipient) { recipient = NULL; errmess = US"unqualified recipient address not allowed"; } if (recipient == NULL) { if (error_handling == ERRORS_STDERR) { fprintf(stderr, "exim: bad recipient address \"%s\": %s\n", string_printing(list[i]), errmess); exim_exit(EXIT_FAILURE); } else { error_block eblock; eblock.next = NULL; eblock.text1 = string_printing(list[i]); eblock.text2 = errmess; return moan_to_sender(ERRMESS_BADARGADDRESS, &eblock, NULL, stdin, TRUE)? errors_sender_rc : EXIT_FAILURE; } } receive_add_recipient(recipient, -1); s = ss; if (!finished) while (*(++s) != 0 && (*s == ',' || isspace(*s))); } } /* Show the recipients when debugging */ DEBUG(D_receive) { int i; if (sender_address != NULL) debug_printf("Sender: %s\n", sender_address); if (recipients_list != NULL) { debug_printf("Recipients:\n"); for (i = 0; i < recipients_count; i++) debug_printf(" %s\n", recipients_list[i].address); } } /* Run the acl_not_smtp_start ACL if required. The result of the ACL is ignored; rejecting here would just add complication, and it can just as well be done later. Allow $recipients to be visible in the ACL. */ if (acl_not_smtp_start) { uschar *user_msg, *log_msg; enable_dollar_recipients = TRUE; (void)acl_check(ACL_WHERE_NOTSMTP_START, NULL, acl_not_smtp_start, &user_msg, &log_msg); enable_dollar_recipients = FALSE; } /* Pause for a while waiting for input. If none received in that time, close the logfile, if we had one open; then if we wait for a long-running datasource (months, in one use-case) log rotation will not leave us holding the file copy. */ if (!receive_timeout) { struct timeval t = { 30*60, 0 }; /* 30 minutes */ fd_set r; FD_ZERO(&r); FD_SET(0, &r); if (select(1, &r, NULL, NULL, &t) == 0) mainlog_close(); } /* Read the data for the message. If filter_test is not FTEST_NONE, this will just read the headers for the message, and not write anything onto the spool. */ message_ended = END_NOTENDED; more = receive_msg(extract_recipients); /* more is always FALSE here (not SMTP message) when reading a message for real; when reading the headers of a message for filter testing, it is TRUE if the headers were terminated by '.' and FALSE otherwise. */ if (message_id[0] == 0) exim_exit(EXIT_FAILURE); } /* Non-SMTP message reception */ /* If this is a filter testing run, there are headers in store, but no message on the spool. Run the filtering code in testing mode, setting the domain to the qualify domain and the local part to the current user, unless they have been set by options. The prefix and suffix are left unset unless specified. The the return path is set to to the sender unless it has already been set from a return-path header in the message. */ if (filter_test != FTEST_NONE) { deliver_domain = (ftest_domain != NULL)? ftest_domain : qualify_domain_recipient; deliver_domain_orig = deliver_domain; deliver_localpart = (ftest_localpart != NULL)? ftest_localpart : originator_login; deliver_localpart_orig = deliver_localpart; deliver_localpart_prefix = ftest_prefix; deliver_localpart_suffix = ftest_suffix; deliver_home = originator_home; if (return_path == NULL) { printf("Return-path copied from sender\n"); return_path = string_copy(sender_address); } else printf("Return-path = %s\n", (return_path[0] == 0)? US"<>" : return_path); printf("Sender = %s\n", (sender_address[0] == 0)? US"<>" : sender_address); receive_add_recipient( string_sprintf("%s%s%s@%s", (ftest_prefix == NULL)? US"" : ftest_prefix, deliver_localpart, (ftest_suffix == NULL)? US"" : ftest_suffix, deliver_domain), -1); printf("Recipient = %s\n", recipients_list[0].address); if (ftest_prefix != NULL) printf("Prefix = %s\n", ftest_prefix); if (ftest_suffix != NULL) printf("Suffix = %s\n", ftest_suffix); if (chdir("/")) /* Get away from wherever the user is running this from */ { DEBUG(D_receive) debug_printf("chdir(\"/\") failed\n"); exim_exit(EXIT_FAILURE); } /* Now we run either a system filter test, or a user filter test, or both. In the latter case, headers added by the system filter will persist and be available to the user filter. We need to copy the filter variables explicitly. */ if ((filter_test & FTEST_SYSTEM) != 0) { if (!filter_runtest(filter_sfd, filter_test_sfile, TRUE, more)) exim_exit(EXIT_FAILURE); } memcpy(filter_sn, filter_n, sizeof(filter_sn)); if ((filter_test & FTEST_USER) != 0) { if (!filter_runtest(filter_ufd, filter_test_ufile, FALSE, more)) exim_exit(EXIT_FAILURE); } exim_exit(EXIT_SUCCESS); } /* Else act on the result of message reception. We should not get here unless message_id[0] is non-zero. If queue_only is set, session_local_queue_only will be TRUE. If it is not, check on the number of messages received in this connection. */ if (!session_local_queue_only && smtp_accept_queue_per_connection > 0 && receive_messagecount > smtp_accept_queue_per_connection) { session_local_queue_only = TRUE; queue_only_reason = 2; } /* Initialize local_queue_only from session_local_queue_only. If it is false, and queue_only_load is set, check that the load average is below it. If it is not, set local_queue_only TRUE. If queue_only_load_latch is true (the default), we put the whole session into queue_only mode. It then remains this way for any subsequent messages on the same SMTP connection. This is a deliberate choice; even though the load average may fall, it doesn't seem right to deliver later messages on the same call when not delivering earlier ones. However, there are odd cases where this is not wanted, so this can be changed by setting queue_only_load_latch false. */ local_queue_only = session_local_queue_only; if (!local_queue_only && queue_only_load >= 0) { local_queue_only = (load_average = OS_GETLOADAVG()) > queue_only_load; if (local_queue_only) { queue_only_reason = 3; if (queue_only_load_latch) session_local_queue_only = TRUE; } } /* If running as an MUA wrapper, all queueing options and freezing options are ignored. */ if (mua_wrapper) local_queue_only = queue_only_policy = deliver_freeze = FALSE; /* Log the queueing here, when it will get a message id attached, but not if queue_only is set (case 0). Case 1 doesn't happen here (too many connections). */ if (local_queue_only) { cancel_cutthrough_connection(TRUE, US"no delivery; queueing"); switch(queue_only_reason) { case 2: log_write(L_delay_delivery, LOG_MAIN, "no immediate delivery: more than %d messages " "received in one connection", smtp_accept_queue_per_connection); break; case 3: log_write(L_delay_delivery, LOG_MAIN, "no immediate delivery: load average %.2f", (double)load_average/1000.0); break; } } else if (queue_only_policy || deliver_freeze) cancel_cutthrough_connection(TRUE, US"no delivery; queueing"); /* Else do the delivery unless the ACL or local_scan() called for queue only or froze the message. Always deliver in a separate process. A fork failure is not a disaster, as the delivery will eventually happen on a subsequent queue run. The search cache must be tidied before the fork, as the parent will do it before exiting. The child will trigger a lookup failure and thereby defer the delivery if it tries to use (for example) a cached ldap connection that the parent has called unbind on. */ else { pid_t pid; search_tidyup(); if ((pid = fork()) == 0) { int rc; close_unwanted(); /* Close unwanted file descriptors and TLS */ exim_nullstd(); /* Ensure std{in,out,err} exist */ /* Re-exec Exim if we need to regain privilege (note: in mua_wrapper mode, deliver_drop_privilege is forced TRUE). */ if (geteuid() != root_uid && !deliver_drop_privilege && !unprivileged) { delivery_re_exec(CEE_EXEC_EXIT); /* Control does not return here. */ } /* No need to re-exec */ rc = deliver_message(message_id, FALSE, FALSE); search_tidyup(); _exit((!mua_wrapper || rc == DELIVER_MUA_SUCCEEDED)? EXIT_SUCCESS : EXIT_FAILURE); } if (pid < 0) { cancel_cutthrough_connection(TRUE, US"delivery fork failed"); log_write(0, LOG_MAIN|LOG_PANIC, "failed to fork automatic delivery " "process: %s", strerror(errno)); } else { release_cutthrough_connection(US"msg passed for delivery"); /* In the parent, wait if synchronous delivery is required. This will always be the case in MUA wrapper mode. */ if (synchronous_delivery) { int status; while (wait(&status) != pid); if ((status & 0x00ff) != 0) log_write(0, LOG_MAIN|LOG_PANIC, "process %d crashed with signal %d while delivering %s", (int)pid, status & 0x00ff, message_id); if (mua_wrapper && (status & 0xffff) != 0) exim_exit(EXIT_FAILURE); } } } /* The loop will repeat if more is TRUE. If we do not know know that the OS automatically reaps children (see comments above the loop), clear away any finished subprocesses here, in case there are lots of messages coming in from the same source. */ #ifndef SIG_IGN_WORKS while (waitpid(-1, NULL, WNOHANG) > 0); #endif moreloop: return_path = sender_address = NULL; authenticated_sender = NULL; deliver_localpart_orig = NULL; deliver_domain_orig = NULL; deliver_host = deliver_host_address = NULL; dnslist_domain = dnslist_matched = NULL; #ifdef WITH_CONTENT_SCAN malware_name = NULL; #endif callout_address = NULL; sending_ip_address = NULL; acl_var_m = NULL; { int i; for(i=0; i<REGEX_VARS; i++) regex_vars[i] = NULL; } store_reset(reset_point); } exim_exit(EXIT_SUCCESS); /* Never returns */ return 0; /* To stop compiler warning */ } /* End of exim.c */
./CrossVul/dataset_final_sorted/CWE-404/c/bad_2525_1
crossvul-cpp_data_bad_3351_2
/* * linux/fs/nfs/callback.c * * Copyright (C) 2004 Trond Myklebust * * NFSv4 callback handling */ #include <linux/completion.h> #include <linux/ip.h> #include <linux/module.h> #include <linux/sched/signal.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/svcsock.h> #include <linux/nfs_fs.h> #include <linux/errno.h> #include <linux/mutex.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/sunrpc/svcauth_gss.h> #include <linux/sunrpc/bc_xprt.h> #include <net/inet_sock.h> #include "nfs4_fs.h" #include "callback.h" #include "internal.h" #include "netns.h" #define NFSDBG_FACILITY NFSDBG_CALLBACK struct nfs_callback_data { unsigned int users; struct svc_serv *serv; }; static struct nfs_callback_data nfs_callback_info[NFS4_MAX_MINOR_VERSION + 1]; static DEFINE_MUTEX(nfs_callback_mutex); static struct svc_program nfs4_callback_program; static int nfs4_callback_up_net(struct svc_serv *serv, struct net *net) { int ret; struct nfs_net *nn = net_generic(net, nfs_net_id); ret = svc_create_xprt(serv, "tcp", net, PF_INET, nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); if (ret <= 0) goto out_err; nn->nfs_callback_tcpport = ret; dprintk("NFS: Callback listener port = %u (af %u, net %p)\n", nn->nfs_callback_tcpport, PF_INET, net); ret = svc_create_xprt(serv, "tcp", net, PF_INET6, nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); if (ret > 0) { nn->nfs_callback_tcpport6 = ret; dprintk("NFS: Callback listener port = %u (af %u, net %p)\n", nn->nfs_callback_tcpport6, PF_INET6, net); } else if (ret != -EAFNOSUPPORT) goto out_err; return 0; out_err: return (ret) ? ret : -ENOMEM; } /* * This is the NFSv4 callback kernel thread. */ static int nfs4_callback_svc(void *vrqstp) { int err; struct svc_rqst *rqstp = vrqstp; set_freezable(); while (!kthread_should_stop()) { /* * Listen for a request on the socket */ err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT); if (err == -EAGAIN || err == -EINTR) continue; svc_process(rqstp); } return 0; } #if defined(CONFIG_NFS_V4_1) /* * The callback service for NFSv4.1 callbacks */ static int nfs41_callback_svc(void *vrqstp) { struct svc_rqst *rqstp = vrqstp; struct svc_serv *serv = rqstp->rq_server; struct rpc_rqst *req; int error; DEFINE_WAIT(wq); set_freezable(); while (!kthread_should_stop()) { if (try_to_freeze()) continue; prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); spin_lock_bh(&serv->sv_cb_lock); if (!list_empty(&serv->sv_cb_list)) { req = list_first_entry(&serv->sv_cb_list, struct rpc_rqst, rq_bc_list); list_del(&req->rq_bc_list); spin_unlock_bh(&serv->sv_cb_lock); finish_wait(&serv->sv_cb_waitq, &wq); dprintk("Invoking bc_svc_process()\n"); error = bc_svc_process(serv, req, rqstp); dprintk("bc_svc_process() returned w/ error code= %d\n", error); } else { spin_unlock_bh(&serv->sv_cb_lock); schedule(); finish_wait(&serv->sv_cb_waitq, &wq); } flush_signals(current); } return 0; } static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt, struct svc_serv *serv) { if (minorversion) /* * Save the svc_serv in the transport so that it can * be referenced when the session backchannel is initialized */ xprt->bc_serv = serv; } #else static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt, struct svc_serv *serv) { } #endif /* CONFIG_NFS_V4_1 */ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt, struct svc_serv *serv) { int nrservs = nfs_callback_nr_threads; int ret; nfs_callback_bc_serv(minorversion, xprt, serv); if (nrservs < NFS4_MIN_NR_CALLBACK_THREADS) nrservs = NFS4_MIN_NR_CALLBACK_THREADS; if (serv->sv_nrthreads-1 == nrservs) return 0; ret = serv->sv_ops->svo_setup(serv, NULL, nrservs); if (ret) { serv->sv_ops->svo_setup(serv, NULL, 0); return ret; } dprintk("nfs_callback_up: service started\n"); return 0; } static void nfs_callback_down_net(u32 minorversion, struct svc_serv *serv, struct net *net) { struct nfs_net *nn = net_generic(net, nfs_net_id); if (--nn->cb_users[minorversion]) return; dprintk("NFS: destroy per-net callback data; net=%p\n", net); svc_shutdown_net(serv, net); } static int nfs_callback_up_net(int minorversion, struct svc_serv *serv, struct net *net, struct rpc_xprt *xprt) { struct nfs_net *nn = net_generic(net, nfs_net_id); int ret; if (nn->cb_users[minorversion]++) return 0; dprintk("NFS: create per-net callback data; net=%p\n", net); ret = svc_bind(serv, net); if (ret < 0) { printk(KERN_WARNING "NFS: bind callback service failed\n"); goto err_bind; } ret = -EPROTONOSUPPORT; if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0) ret = nfs4_callback_up_net(serv, net); else if (xprt->ops->bc_up) ret = xprt->ops->bc_up(serv, net); if (ret < 0) { printk(KERN_ERR "NFS: callback service start failed\n"); goto err_socks; } return 0; err_socks: svc_rpcb_cleanup(serv, net); err_bind: nn->cb_users[minorversion]--; dprintk("NFS: Couldn't create callback socket: err = %d; " "net = %p\n", ret, net); return ret; } static struct svc_serv_ops nfs40_cb_sv_ops = { .svo_function = nfs4_callback_svc, .svo_enqueue_xprt = svc_xprt_do_enqueue, .svo_setup = svc_set_num_threads, .svo_module = THIS_MODULE, }; #if defined(CONFIG_NFS_V4_1) static struct svc_serv_ops nfs41_cb_sv_ops = { .svo_function = nfs41_callback_svc, .svo_enqueue_xprt = svc_xprt_do_enqueue, .svo_setup = svc_set_num_threads, .svo_module = THIS_MODULE, }; static struct svc_serv_ops *nfs4_cb_sv_ops[] = { [0] = &nfs40_cb_sv_ops, [1] = &nfs41_cb_sv_ops, }; #else static struct svc_serv_ops *nfs4_cb_sv_ops[] = { [0] = &nfs40_cb_sv_ops, [1] = NULL, }; #endif static struct svc_serv *nfs_callback_create_svc(int minorversion) { struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion]; struct svc_serv *serv; struct svc_serv_ops *sv_ops; /* * Check whether we're already up and running. */ if (cb_info->serv) { /* * Note: increase service usage, because later in case of error * svc_destroy() will be called. */ svc_get(cb_info->serv); return cb_info->serv; } switch (minorversion) { case 0: sv_ops = nfs4_cb_sv_ops[0]; break; default: sv_ops = nfs4_cb_sv_ops[1]; } if (sv_ops == NULL) return ERR_PTR(-ENOTSUPP); /* * Sanity check: if there's no task, * we should be the first user ... */ if (cb_info->users) printk(KERN_WARNING "nfs_callback_create_svc: no kthread, %d users??\n", cb_info->users); serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops); if (!serv) { printk(KERN_ERR "nfs_callback_create_svc: create service failed\n"); return ERR_PTR(-ENOMEM); } cb_info->serv = serv; /* As there is only one thread we need to over-ride the * default maximum of 80 connections */ serv->sv_maxconn = 1024; dprintk("nfs_callback_create_svc: service created\n"); return serv; } /* * Bring up the callback thread if it is not already up. */ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt) { struct svc_serv *serv; struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion]; int ret; struct net *net = xprt->xprt_net; mutex_lock(&nfs_callback_mutex); serv = nfs_callback_create_svc(minorversion); if (IS_ERR(serv)) { ret = PTR_ERR(serv); goto err_create; } ret = nfs_callback_up_net(minorversion, serv, net, xprt); if (ret < 0) goto err_net; ret = nfs_callback_start_svc(minorversion, xprt, serv); if (ret < 0) goto err_start; cb_info->users++; /* * svc_create creates the svc_serv with sv_nrthreads == 1, and then * svc_prepare_thread increments that. So we need to call svc_destroy * on both success and failure so that the refcount is 1 when the * thread exits. */ err_net: if (!cb_info->users) cb_info->serv = NULL; svc_destroy(serv); err_create: mutex_unlock(&nfs_callback_mutex); return ret; err_start: nfs_callback_down_net(minorversion, serv, net); dprintk("NFS: Couldn't create server thread; err = %d\n", ret); goto err_net; } /* * Kill the callback thread if it's no longer being used. */ void nfs_callback_down(int minorversion, struct net *net) { struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion]; struct svc_serv *serv; mutex_lock(&nfs_callback_mutex); serv = cb_info->serv; nfs_callback_down_net(minorversion, serv, net); cb_info->users--; if (cb_info->users == 0) { svc_get(serv); serv->sv_ops->svo_setup(serv, NULL, 0); svc_destroy(serv); dprintk("nfs_callback_down: service destroyed\n"); cb_info->serv = NULL; } mutex_unlock(&nfs_callback_mutex); } /* Boolean check of RPC_AUTH_GSS principal */ int check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp) { char *p = rqstp->rq_cred.cr_principal; if (rqstp->rq_authop->flavour != RPC_AUTH_GSS) return 1; /* No RPC_AUTH_GSS on NFSv4.1 back channel yet */ if (clp->cl_minorversion != 0) return 0; /* * It might just be a normal user principal, in which case * userspace won't bother to tell us the name at all. */ if (p == NULL) return 0; /* * Did we get the acceptor from userland during the SETCLIENID * negotiation? */ if (clp->cl_acceptor) return !strcmp(p, clp->cl_acceptor); /* * Otherwise try to verify it using the cl_hostname. Note that this * doesn't work if a non-canonical hostname was used in the devname. */ /* Expect a GSS_C_NT_HOSTBASED_NAME like "nfs@serverhostname" */ if (memcmp(p, "nfs@", 4) != 0) return 0; p += 4; if (strcmp(p, clp->cl_hostname) != 0) return 0; return 1; } /* * pg_authenticate method for nfsv4 callback threads. * * The authflavor has been negotiated, so an incorrect flavor is a server * bug. Deny packets with incorrect authflavor. * * All other checking done after NFS decoding where the nfs_client can be * found in nfs4_callback_compound */ static int nfs_callback_authenticate(struct svc_rqst *rqstp) { switch (rqstp->rq_authop->flavour) { case RPC_AUTH_NULL: if (rqstp->rq_proc != CB_NULL) return SVC_DENIED; break; case RPC_AUTH_GSS: /* No RPC_AUTH_GSS support yet in NFSv4.1 */ if (svc_is_backchannel(rqstp)) return SVC_DENIED; } return SVC_OK; } /* * Define NFS4 callback program */ static struct svc_version *nfs4_callback_version[] = { [1] = &nfs4_callback_version1, [4] = &nfs4_callback_version4, }; static struct svc_stat nfs4_callback_stats; static struct svc_program nfs4_callback_program = { .pg_prog = NFS4_CALLBACK, /* RPC service number */ .pg_nvers = ARRAY_SIZE(nfs4_callback_version), /* Number of entries */ .pg_vers = nfs4_callback_version, /* version table */ .pg_name = "NFSv4 callback", /* service name */ .pg_class = "nfs", /* authentication class */ .pg_stats = &nfs4_callback_stats, .pg_authenticate = nfs_callback_authenticate, };
./CrossVul/dataset_final_sorted/CWE-404/c/bad_3351_2
crossvul-cpp_data_bad_3351_0
/* * linux/fs/lockd/svc.c * * This is the central lockd service. * * FIXME: Separate the lockd NFS server functionality from the lockd NFS * client functionality. Oh why didn't Sun create two separate * services in the first place? * * Authors: Olaf Kirch (okir@monad.swb.de) * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #include <linux/module.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/moduleparam.h> #include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/uio.h> #include <linux/smp.h> #include <linux/mutex.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/inetdevice.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/svc_xprt.h> #include <net/ip.h> #include <net/addrconf.h> #include <net/ipv6.h> #include <linux/lockd/lockd.h> #include <linux/nfs.h> #include "netns.h" #include "procfs.h" #define NLMDBG_FACILITY NLMDBG_SVC #define LOCKD_BUFSIZE (1024 + NLMSVC_XDRSIZE) #define ALLOWED_SIGS (sigmask(SIGKILL)) static struct svc_program nlmsvc_program; const struct nlmsvc_binding *nlmsvc_ops; EXPORT_SYMBOL_GPL(nlmsvc_ops); static DEFINE_MUTEX(nlmsvc_mutex); static unsigned int nlmsvc_users; static struct task_struct *nlmsvc_task; static struct svc_rqst *nlmsvc_rqst; unsigned long nlmsvc_timeout; unsigned int lockd_net_id; /* * These can be set at insmod time (useful for NFS as root filesystem), * and also changed through the sysctl interface. -- Jamie Lokier, Aug 2003 */ static unsigned long nlm_grace_period; static unsigned long nlm_timeout = LOCKD_DFLT_TIMEO; static int nlm_udpport, nlm_tcpport; /* RLIM_NOFILE defaults to 1024. That seems like a reasonable default here. */ static unsigned int nlm_max_connections = 1024; /* * Constants needed for the sysctl interface. */ static const unsigned long nlm_grace_period_min = 0; static const unsigned long nlm_grace_period_max = 240; static const unsigned long nlm_timeout_min = 3; static const unsigned long nlm_timeout_max = 20; static const int nlm_port_min = 0, nlm_port_max = 65535; #ifdef CONFIG_SYSCTL static struct ctl_table_header * nlm_sysctl_table; #endif static unsigned long get_lockd_grace_period(void) { /* Note: nlm_timeout should always be nonzero */ if (nlm_grace_period) return roundup(nlm_grace_period, nlm_timeout) * HZ; else return nlm_timeout * 5 * HZ; } static void grace_ender(struct work_struct *grace) { struct delayed_work *dwork = to_delayed_work(grace); struct lockd_net *ln = container_of(dwork, struct lockd_net, grace_period_end); locks_end_grace(&ln->lockd_manager); } static void set_grace_period(struct net *net) { unsigned long grace_period = get_lockd_grace_period(); struct lockd_net *ln = net_generic(net, lockd_net_id); locks_start_grace(net, &ln->lockd_manager); cancel_delayed_work_sync(&ln->grace_period_end); schedule_delayed_work(&ln->grace_period_end, grace_period); } static void restart_grace(void) { if (nlmsvc_ops) { struct net *net = &init_net; struct lockd_net *ln = net_generic(net, lockd_net_id); cancel_delayed_work_sync(&ln->grace_period_end); locks_end_grace(&ln->lockd_manager); nlmsvc_invalidate_all(); set_grace_period(net); } } /* * This is the lockd kernel thread */ static int lockd(void *vrqstp) { int err = 0; struct svc_rqst *rqstp = vrqstp; /* try_to_freeze() is called from svc_recv() */ set_freezable(); /* Allow SIGKILL to tell lockd to drop all of its locks */ allow_signal(SIGKILL); dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); /* * The main request loop. We don't terminate until the last * NFS mount or NFS daemon has gone away. */ while (!kthread_should_stop()) { long timeout = MAX_SCHEDULE_TIMEOUT; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); /* update sv_maxconn if it has changed */ rqstp->rq_server->sv_maxconn = nlm_max_connections; if (signalled()) { flush_signals(current); restart_grace(); continue; } timeout = nlmsvc_retry_blocked(); /* * Find a socket with data available and call its * recvfrom routine. */ err = svc_recv(rqstp, timeout); if (err == -EAGAIN || err == -EINTR) continue; dprintk("lockd: request from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); svc_process(rqstp); } flush_signals(current); if (nlmsvc_ops) nlmsvc_invalidate_all(); nlm_shutdown_hosts(); return 0; } static int create_lockd_listener(struct svc_serv *serv, const char *name, struct net *net, const int family, const unsigned short port) { struct svc_xprt *xprt; xprt = svc_find_xprt(serv, name, net, family, 0); if (xprt == NULL) return svc_create_xprt(serv, name, net, family, port, SVC_SOCK_DEFAULTS); svc_xprt_put(xprt); return 0; } static int create_lockd_family(struct svc_serv *serv, struct net *net, const int family) { int err; err = create_lockd_listener(serv, "udp", net, family, nlm_udpport); if (err < 0) return err; return create_lockd_listener(serv, "tcp", net, family, nlm_tcpport); } /* * Ensure there are active UDP and TCP listeners for lockd. * * Even if we have only TCP NFS mounts and/or TCP NFSDs, some * local services (such as rpc.statd) still require UDP, and * some NFS servers do not yet support NLM over TCP. * * Returns zero if all listeners are available; otherwise a * negative errno value is returned. */ static int make_socks(struct svc_serv *serv, struct net *net) { static int warned; int err; err = create_lockd_family(serv, net, PF_INET); if (err < 0) goto out_err; err = create_lockd_family(serv, net, PF_INET6); if (err < 0 && err != -EAFNOSUPPORT) goto out_err; warned = 0; return 0; out_err: if (warned++ == 0) printk(KERN_WARNING "lockd_up: makesock failed, error=%d\n", err); svc_shutdown_net(serv, net); return err; } static int lockd_up_net(struct svc_serv *serv, struct net *net) { struct lockd_net *ln = net_generic(net, lockd_net_id); int error; if (ln->nlmsvc_users++) return 0; error = svc_bind(serv, net); if (error) goto err_bind; error = make_socks(serv, net); if (error < 0) goto err_bind; set_grace_period(net); dprintk("lockd_up_net: per-net data created; net=%p\n", net); return 0; err_bind: ln->nlmsvc_users--; return error; } static void lockd_down_net(struct svc_serv *serv, struct net *net) { struct lockd_net *ln = net_generic(net, lockd_net_id); if (ln->nlmsvc_users) { if (--ln->nlmsvc_users == 0) { nlm_shutdown_hosts_net(net); cancel_delayed_work_sync(&ln->grace_period_end); locks_end_grace(&ln->lockd_manager); svc_shutdown_net(serv, net); dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net); } } else { printk(KERN_ERR "lockd_down_net: no users! task=%p, net=%p\n", nlmsvc_task, net); BUG(); } } static int lockd_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct sockaddr_in sin; if (event != NETDEV_DOWN) goto out; if (nlmsvc_rqst) { dprintk("lockd_inetaddr_event: removed %pI4\n", &ifa->ifa_local); sin.sin_family = AF_INET; sin.sin_addr.s_addr = ifa->ifa_local; svc_age_temp_xprts_now(nlmsvc_rqst->rq_server, (struct sockaddr *)&sin); } out: return NOTIFY_DONE; } static struct notifier_block lockd_inetaddr_notifier = { .notifier_call = lockd_inetaddr_event, }; #if IS_ENABLED(CONFIG_IPV6) static int lockd_inet6addr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; struct sockaddr_in6 sin6; if (event != NETDEV_DOWN) goto out; if (nlmsvc_rqst) { dprintk("lockd_inet6addr_event: removed %pI6\n", &ifa->addr); sin6.sin6_family = AF_INET6; sin6.sin6_addr = ifa->addr; if (ipv6_addr_type(&sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL) sin6.sin6_scope_id = ifa->idev->dev->ifindex; svc_age_temp_xprts_now(nlmsvc_rqst->rq_server, (struct sockaddr *)&sin6); } out: return NOTIFY_DONE; } static struct notifier_block lockd_inet6addr_notifier = { .notifier_call = lockd_inet6addr_event, }; #endif static void lockd_unregister_notifiers(void) { unregister_inetaddr_notifier(&lockd_inetaddr_notifier); #if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&lockd_inet6addr_notifier); #endif } static void lockd_svc_exit_thread(void) { lockd_unregister_notifiers(); svc_exit_thread(nlmsvc_rqst); } static int lockd_start_svc(struct svc_serv *serv) { int error; if (nlmsvc_rqst) return 0; /* * Create the kernel thread and wait for it to start. */ nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE); if (IS_ERR(nlmsvc_rqst)) { error = PTR_ERR(nlmsvc_rqst); printk(KERN_WARNING "lockd_up: svc_rqst allocation failed, error=%d\n", error); goto out_rqst; } svc_sock_update_bufs(serv); serv->sv_maxconn = nlm_max_connections; nlmsvc_task = kthread_create(lockd, nlmsvc_rqst, "%s", serv->sv_name); if (IS_ERR(nlmsvc_task)) { error = PTR_ERR(nlmsvc_task); printk(KERN_WARNING "lockd_up: kthread_run failed, error=%d\n", error); goto out_task; } nlmsvc_rqst->rq_task = nlmsvc_task; wake_up_process(nlmsvc_task); dprintk("lockd_up: service started\n"); return 0; out_task: lockd_svc_exit_thread(); nlmsvc_task = NULL; out_rqst: nlmsvc_rqst = NULL; return error; } static struct svc_serv_ops lockd_sv_ops = { .svo_shutdown = svc_rpcb_cleanup, .svo_enqueue_xprt = svc_xprt_do_enqueue, }; static struct svc_serv *lockd_create_svc(void) { struct svc_serv *serv; /* * Check whether we're already up and running. */ if (nlmsvc_rqst) { /* * Note: increase service usage, because later in case of error * svc_destroy() will be called. */ svc_get(nlmsvc_rqst->rq_server); return nlmsvc_rqst->rq_server; } /* * Sanity check: if there's no pid, * we should be the first user ... */ if (nlmsvc_users) printk(KERN_WARNING "lockd_up: no pid, %d users??\n", nlmsvc_users); if (!nlm_timeout) nlm_timeout = LOCKD_DFLT_TIMEO; nlmsvc_timeout = nlm_timeout * HZ; serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, &lockd_sv_ops); if (!serv) { printk(KERN_WARNING "lockd_up: create service failed\n"); return ERR_PTR(-ENOMEM); } register_inetaddr_notifier(&lockd_inetaddr_notifier); #if IS_ENABLED(CONFIG_IPV6) register_inet6addr_notifier(&lockd_inet6addr_notifier); #endif dprintk("lockd_up: service created\n"); return serv; } /* * Bring up the lockd process if it's not already up. */ int lockd_up(struct net *net) { struct svc_serv *serv; int error; mutex_lock(&nlmsvc_mutex); serv = lockd_create_svc(); if (IS_ERR(serv)) { error = PTR_ERR(serv); goto err_create; } error = lockd_up_net(serv, net); if (error < 0) goto err_net; error = lockd_start_svc(serv); if (error < 0) goto err_start; nlmsvc_users++; /* * Note: svc_serv structures have an initial use count of 1, * so we exit through here on both success and failure. */ err_put: svc_destroy(serv); err_create: mutex_unlock(&nlmsvc_mutex); return error; err_start: lockd_down_net(serv, net); err_net: lockd_unregister_notifiers(); goto err_put; } EXPORT_SYMBOL_GPL(lockd_up); /* * Decrement the user count and bring down lockd if we're the last. */ void lockd_down(struct net *net) { mutex_lock(&nlmsvc_mutex); lockd_down_net(nlmsvc_rqst->rq_server, net); if (nlmsvc_users) { if (--nlmsvc_users) goto out; } else { printk(KERN_ERR "lockd_down: no users! task=%p\n", nlmsvc_task); BUG(); } if (!nlmsvc_task) { printk(KERN_ERR "lockd_down: no lockd running.\n"); BUG(); } kthread_stop(nlmsvc_task); dprintk("lockd_down: service stopped\n"); lockd_svc_exit_thread(); dprintk("lockd_down: service destroyed\n"); nlmsvc_task = NULL; nlmsvc_rqst = NULL; out: mutex_unlock(&nlmsvc_mutex); } EXPORT_SYMBOL_GPL(lockd_down); #ifdef CONFIG_SYSCTL /* * Sysctl parameters (same as module parameters, different interface). */ static struct ctl_table nlm_sysctls[] = { { .procname = "nlm_grace_period", .data = &nlm_grace_period, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = (unsigned long *) &nlm_grace_period_min, .extra2 = (unsigned long *) &nlm_grace_period_max, }, { .procname = "nlm_timeout", .data = &nlm_timeout, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = (unsigned long *) &nlm_timeout_min, .extra2 = (unsigned long *) &nlm_timeout_max, }, { .procname = "nlm_udpport", .data = &nlm_udpport, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = (int *) &nlm_port_min, .extra2 = (int *) &nlm_port_max, }, { .procname = "nlm_tcpport", .data = &nlm_tcpport, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = (int *) &nlm_port_min, .extra2 = (int *) &nlm_port_max, }, { .procname = "nsm_use_hostnames", .data = &nsm_use_hostnames, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "nsm_local_state", .data = &nsm_local_state, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; static struct ctl_table nlm_sysctl_dir[] = { { .procname = "nfs", .mode = 0555, .child = nlm_sysctls, }, { } }; static struct ctl_table nlm_sysctl_root[] = { { .procname = "fs", .mode = 0555, .child = nlm_sysctl_dir, }, { } }; #endif /* CONFIG_SYSCTL */ /* * Module (and sysfs) parameters. */ #define param_set_min_max(name, type, which_strtol, min, max) \ static int param_set_##name(const char *val, struct kernel_param *kp) \ { \ char *endp; \ __typeof__(type) num = which_strtol(val, &endp, 0); \ if (endp == val || *endp || num < (min) || num > (max)) \ return -EINVAL; \ *((type *) kp->arg) = num; \ return 0; \ } static inline int is_callback(u32 proc) { return proc == NLMPROC_GRANTED || proc == NLMPROC_GRANTED_MSG || proc == NLMPROC_TEST_RES || proc == NLMPROC_LOCK_RES || proc == NLMPROC_CANCEL_RES || proc == NLMPROC_UNLOCK_RES || proc == NLMPROC_NSM_NOTIFY; } static int lockd_authenticate(struct svc_rqst *rqstp) { rqstp->rq_client = NULL; switch (rqstp->rq_authop->flavour) { case RPC_AUTH_NULL: case RPC_AUTH_UNIX: if (rqstp->rq_proc == 0) return SVC_OK; if (is_callback(rqstp->rq_proc)) { /* Leave it to individual procedures to * call nlmsvc_lookup_host(rqstp) */ return SVC_OK; } return svc_set_client(rqstp); } return SVC_DENIED; } param_set_min_max(port, int, simple_strtol, 0, 65535) param_set_min_max(grace_period, unsigned long, simple_strtoul, nlm_grace_period_min, nlm_grace_period_max) param_set_min_max(timeout, unsigned long, simple_strtoul, nlm_timeout_min, nlm_timeout_max) MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>"); MODULE_DESCRIPTION("NFS file locking service version " LOCKD_VERSION "."); MODULE_LICENSE("GPL"); module_param_call(nlm_grace_period, param_set_grace_period, param_get_ulong, &nlm_grace_period, 0644); module_param_call(nlm_timeout, param_set_timeout, param_get_ulong, &nlm_timeout, 0644); module_param_call(nlm_udpport, param_set_port, param_get_int, &nlm_udpport, 0644); module_param_call(nlm_tcpport, param_set_port, param_get_int, &nlm_tcpport, 0644); module_param(nsm_use_hostnames, bool, 0644); module_param(nlm_max_connections, uint, 0644); static int lockd_init_net(struct net *net) { struct lockd_net *ln = net_generic(net, lockd_net_id); INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender); INIT_LIST_HEAD(&ln->lockd_manager.list); ln->lockd_manager.block_opens = false; INIT_LIST_HEAD(&ln->nsm_handles); return 0; } static void lockd_exit_net(struct net *net) { } static struct pernet_operations lockd_net_ops = { .init = lockd_init_net, .exit = lockd_exit_net, .id = &lockd_net_id, .size = sizeof(struct lockd_net), }; /* * Initialising and terminating the module. */ static int __init init_nlm(void) { int err; #ifdef CONFIG_SYSCTL err = -ENOMEM; nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root); if (nlm_sysctl_table == NULL) goto err_sysctl; #endif err = register_pernet_subsys(&lockd_net_ops); if (err) goto err_pernet; err = lockd_create_procfs(); if (err) goto err_procfs; return 0; err_procfs: unregister_pernet_subsys(&lockd_net_ops); err_pernet: #ifdef CONFIG_SYSCTL unregister_sysctl_table(nlm_sysctl_table); err_sysctl: #endif return err; } static void __exit exit_nlm(void) { /* FIXME: delete all NLM clients */ nlm_shutdown_hosts(); lockd_remove_procfs(); unregister_pernet_subsys(&lockd_net_ops); #ifdef CONFIG_SYSCTL unregister_sysctl_table(nlm_sysctl_table); #endif } module_init(init_nlm); module_exit(exit_nlm); /* * Define NLM program and procedures */ static struct svc_version nlmsvc_version1 = { .vs_vers = 1, .vs_nproc = 17, .vs_proc = nlmsvc_procedures, .vs_xdrsize = NLMSVC_XDRSIZE, }; static struct svc_version nlmsvc_version3 = { .vs_vers = 3, .vs_nproc = 24, .vs_proc = nlmsvc_procedures, .vs_xdrsize = NLMSVC_XDRSIZE, }; #ifdef CONFIG_LOCKD_V4 static struct svc_version nlmsvc_version4 = { .vs_vers = 4, .vs_nproc = 24, .vs_proc = nlmsvc_procedures4, .vs_xdrsize = NLMSVC_XDRSIZE, }; #endif static struct svc_version * nlmsvc_version[] = { [1] = &nlmsvc_version1, [3] = &nlmsvc_version3, #ifdef CONFIG_LOCKD_V4 [4] = &nlmsvc_version4, #endif }; static struct svc_stat nlmsvc_stats; #define NLM_NRVERS ARRAY_SIZE(nlmsvc_version) static struct svc_program nlmsvc_program = { .pg_prog = NLM_PROGRAM, /* program number */ .pg_nvers = NLM_NRVERS, /* number of entries in nlmsvc_version */ .pg_vers = nlmsvc_version, /* version table */ .pg_name = "lockd", /* service name */ .pg_class = "nfsd", /* share authentication with nfsd */ .pg_stats = &nlmsvc_stats, /* stats table */ .pg_authenticate = &lockd_authenticate /* export authentication */ };
./CrossVul/dataset_final_sorted/CWE-404/c/bad_3351_0
crossvul-cpp_data_bad_1803_0
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * * This file is part of the SCTP kernel implementation * * These functions work with the state functions in sctp_sm_statefuns.c * to implement that state operations. These functions implement the * steps which require modifying existing data structures. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@austin.ibm.com> * Hui Huang <hui.huang@nokia.com> * Dajiang Zhang <dajiang.zhang@nokia.com> * Daisy Chang <daisyc@us.ibm.com> * Sridhar Samudrala <sri@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/skbuff.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/ip.h> #include <linux/gfp.h> #include <net/sock.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> static int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype, sctp_state_t state, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, sctp_disposition_t status, sctp_cmd_seq_t *commands, gfp_t gfp); static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, sctp_state_t state, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, sctp_disposition_t status, sctp_cmd_seq_t *commands, gfp_t gfp); static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, struct sctp_transport *t); /******************************************************************** * Helper functions ********************************************************************/ /* A helper function for delayed processing of INET ECN CE bit. */ static void sctp_do_ecn_ce_work(struct sctp_association *asoc, __u32 lowest_tsn) { /* Save the TSN away for comparison when we receive CWR */ asoc->last_ecne_tsn = lowest_tsn; asoc->need_ecne = 1; } /* Helper function for delayed processing of SCTP ECNE chunk. */ /* RFC 2960 Appendix A * * RFC 2481 details a specific bit for a sender to send in * the header of its next outbound TCP segment to indicate to * its peer that it has reduced its congestion window. This * is termed the CWR bit. For SCTP the same indication is made * by including the CWR chunk. This chunk contains one data * element, i.e. the TSN number that was sent in the ECNE chunk. * This element represents the lowest TSN number in the datagram * that was originally marked with the CE bit. */ static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc, __u32 lowest_tsn, struct sctp_chunk *chunk) { struct sctp_chunk *repl; /* Our previously transmitted packet ran into some congestion * so we should take action by reducing cwnd and ssthresh * and then ACK our peer that we we've done so by * sending a CWR. */ /* First, try to determine if we want to actually lower * our cwnd variables. Only lower them if the ECNE looks more * recent than the last response. */ if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) { struct sctp_transport *transport; /* Find which transport's congestion variables * need to be adjusted. */ transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn); /* Update the congestion variables. */ if (transport) sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_ECNE); asoc->last_cwr_tsn = lowest_tsn; } /* Always try to quiet the other end. In case of lost CWR, * resend last_cwr_tsn. */ repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk); /* If we run out of memory, it will look like a lost CWR. We'll * get back in sync eventually. */ return repl; } /* Helper function to do delayed processing of ECN CWR chunk. */ static void sctp_do_ecn_cwr_work(struct sctp_association *asoc, __u32 lowest_tsn) { /* Turn off ECNE getting auto-prepended to every outgoing * packet */ asoc->need_ecne = 0; } /* Generate SACK if necessary. We call this at the end of a packet. */ static int sctp_gen_sack(struct sctp_association *asoc, int force, sctp_cmd_seq_t *commands) { __u32 ctsn, max_tsn_seen; struct sctp_chunk *sack; struct sctp_transport *trans = asoc->peer.last_data_from; int error = 0; if (force || (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) || (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE))) asoc->peer.sack_needed = 1; ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); /* From 12.2 Parameters necessary per association (i.e. the TCB): * * Ack State : This flag indicates if the next received packet * : is to be responded to with a SACK. ... * : When DATA chunks are out of order, SACK's * : are not delayed (see Section 6). * * [This is actually not mentioned in Section 6, but we * implement it here anyway. --piggy] */ if (max_tsn_seen != ctsn) asoc->peer.sack_needed = 1; /* From 6.2 Acknowledgement on Reception of DATA Chunks: * * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, * an acknowledgement SHOULD be generated for at least every * second packet (not every second DATA chunk) received, and * SHOULD be generated within 200 ms of the arrival of any * unacknowledged DATA chunk. ... */ if (!asoc->peer.sack_needed) { asoc->peer.sack_cnt++; /* Set the SACK delay timeout based on the * SACK delay for the last transport * data was received from, or the default * for the association. */ if (trans) { /* We will need a SACK for the next packet. */ if (asoc->peer.sack_cnt >= trans->sackfreq - 1) asoc->peer.sack_needed = 1; asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = trans->sackdelay; } else { /* We will need a SACK for the next packet. */ if (asoc->peer.sack_cnt >= asoc->sackfreq - 1) asoc->peer.sack_needed = 1; asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; } /* Restart the SACK timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); } else { asoc->a_rwnd = asoc->rwnd; sack = sctp_make_sack(asoc); if (!sack) goto nomem; asoc->peer.sack_needed = 0; asoc->peer.sack_cnt = 0; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack)); /* Stop the SACK timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); } return error; nomem: error = -ENOMEM; return error; } /* When the T3-RTX timer expires, it calls this function to create the * relevant state machine event. */ void sctp_generate_t3_rtx_event(unsigned long peer) { int error; struct sctp_transport *transport = (struct sctp_transport *) peer; struct sctp_association *asoc = transport->asoc; struct net *net = sock_net(asoc->base.sk); /* Check whether a task is in the sock. */ bh_lock_sock(asoc->base.sk); if (sock_owned_by_user(asoc->base.sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20))) sctp_transport_hold(transport); goto out_unlock; } /* Is this transport really dead and just waiting around for * the timer to let go of the reference? */ if (transport->dead) goto out_unlock; /* Run through the state machine. */ error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX), asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); if (error) asoc->base.sk->sk_err = -error; out_unlock: bh_unlock_sock(asoc->base.sk); sctp_transport_put(transport); } /* This is a sa interface for producing timeout events. It works * for timeouts which use the association as their parameter. */ static void sctp_generate_timeout_event(struct sctp_association *asoc, sctp_event_timeout_t timeout_type) { struct net *net = sock_net(asoc->base.sk); int error = 0; bh_lock_sock(asoc->base.sk); if (sock_owned_by_user(asoc->base.sk)) { pr_debug("%s: sock is busy: timer %d\n", __func__, timeout_type); /* Try again later. */ if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20))) sctp_association_hold(asoc); goto out_unlock; } /* Is this association really dead and just waiting around for * the timer to let go of the reference? */ if (asoc->base.dead) goto out_unlock; /* Run through the state machine. */ error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, SCTP_ST_TIMEOUT(timeout_type), asoc->state, asoc->ep, asoc, (void *)timeout_type, GFP_ATOMIC); if (error) asoc->base.sk->sk_err = -error; out_unlock: bh_unlock_sock(asoc->base.sk); sctp_association_put(asoc); } static void sctp_generate_t1_cookie_event(unsigned long data) { struct sctp_association *asoc = (struct sctp_association *) data; sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE); } static void sctp_generate_t1_init_event(unsigned long data) { struct sctp_association *asoc = (struct sctp_association *) data; sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT); } static void sctp_generate_t2_shutdown_event(unsigned long data) { struct sctp_association *asoc = (struct sctp_association *) data; sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN); } static void sctp_generate_t4_rto_event(unsigned long data) { struct sctp_association *asoc = (struct sctp_association *) data; sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO); } static void sctp_generate_t5_shutdown_guard_event(unsigned long data) { struct sctp_association *asoc = (struct sctp_association *)data; sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD); } /* sctp_generate_t5_shutdown_guard_event() */ static void sctp_generate_autoclose_event(unsigned long data) { struct sctp_association *asoc = (struct sctp_association *) data; sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE); } /* Generate a heart beat event. If the sock is busy, reschedule. Make * sure that the transport is still valid. */ void sctp_generate_heartbeat_event(unsigned long data) { int error = 0; struct sctp_transport *transport = (struct sctp_transport *) data; struct sctp_association *asoc = transport->asoc; struct net *net = sock_net(asoc->base.sk); bh_lock_sock(asoc->base.sk); if (sock_owned_by_user(asoc->base.sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20))) sctp_transport_hold(transport); goto out_unlock; } /* Is this structure just waiting around for us to actually * get destroyed? */ if (transport->dead) goto out_unlock; error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); if (error) asoc->base.sk->sk_err = -error; out_unlock: bh_unlock_sock(asoc->base.sk); sctp_transport_put(transport); } /* Handle the timeout of the ICMP protocol unreachable timer. Trigger * the correct state machine transition that will close the association. */ void sctp_generate_proto_unreach_event(unsigned long data) { struct sctp_transport *transport = (struct sctp_transport *) data; struct sctp_association *asoc = transport->asoc; struct net *net = sock_net(asoc->base.sk); bh_lock_sock(asoc->base.sk); if (sock_owned_by_user(asoc->base.sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ if (!mod_timer(&transport->proto_unreach_timer, jiffies + (HZ/20))) sctp_association_hold(asoc); goto out_unlock; } /* Is this structure just waiting around for us to actually * get destroyed? */ if (asoc->base.dead) goto out_unlock; sctp_do_sm(net, SCTP_EVENT_T_OTHER, SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); out_unlock: bh_unlock_sock(asoc->base.sk); sctp_association_put(asoc); } /* Inject a SACK Timeout event into the state machine. */ static void sctp_generate_sack_event(unsigned long data) { struct sctp_association *asoc = (struct sctp_association *) data; sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK); } sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = { NULL, sctp_generate_t1_cookie_event, sctp_generate_t1_init_event, sctp_generate_t2_shutdown_event, NULL, sctp_generate_t4_rto_event, sctp_generate_t5_shutdown_guard_event, NULL, sctp_generate_sack_event, sctp_generate_autoclose_event, }; /* RFC 2960 8.2 Path Failure Detection * * When its peer endpoint is multi-homed, an endpoint should keep a * error counter for each of the destination transport addresses of the * peer endpoint. * * Each time the T3-rtx timer expires on any address, or when a * HEARTBEAT sent to an idle address is not acknowledged within a RTO, * the error counter of that destination address will be incremented. * When the value in the error counter exceeds the protocol parameter * 'Path.Max.Retrans' of that destination address, the endpoint should * mark the destination transport address as inactive, and a * notification SHOULD be sent to the upper layer. * */ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands, struct sctp_association *asoc, struct sctp_transport *transport, int is_hb) { /* The check for association's overall error counter exceeding the * threshold is done in the state function. */ /* We are here due to a timer expiration. If the timer was * not a HEARTBEAT, then normal error tracking is done. * If the timer was a heartbeat, we only increment error counts * when we already have an outstanding HEARTBEAT that has not * been acknowledged. * Additionally, some tranport states inhibit error increments. */ if (!is_hb) { asoc->overall_error_count++; if (transport->state != SCTP_INACTIVE) transport->error_count++; } else if (transport->hb_sent) { if (transport->state != SCTP_UNCONFIRMED) asoc->overall_error_count++; if (transport->state != SCTP_INACTIVE) transport->error_count++; } /* If the transport error count is greater than the pf_retrans * threshold, and less than pathmaxrtx, and if the current state * is SCTP_ACTIVE, then mark this transport as Partially Failed, * see SCTP Quick Failover Draft, section 5.1 */ if ((transport->state == SCTP_ACTIVE) && (asoc->pf_retrans < transport->pathmaxrxt) && (transport->error_count > asoc->pf_retrans)) { sctp_assoc_control_transport(asoc, transport, SCTP_TRANSPORT_PF, 0); /* Update the hb timer to resend a heartbeat every rto */ sctp_cmd_hb_timer_update(commands, transport); } if (transport->state != SCTP_INACTIVE && (transport->error_count > transport->pathmaxrxt)) { pr_debug("%s: association:%p transport addr:%pISpc failed\n", __func__, asoc, &transport->ipaddr.sa); sctp_assoc_control_transport(asoc, transport, SCTP_TRANSPORT_DOWN, SCTP_FAILED_THRESHOLD); } /* E2) For the destination address for which the timer * expires, set RTO <- RTO * 2 ("back off the timer"). The * maximum value discussed in rule C7 above (RTO.max) may be * used to provide an upper bound to this doubling operation. * * Special Case: the first HB doesn't trigger exponential backoff. * The first unacknowledged HB triggers it. We do this with a flag * that indicates that we have an outstanding HB. */ if (!is_hb || transport->hb_sent) { transport->rto = min((transport->rto * 2), transport->asoc->rto_max); sctp_max_rto(asoc, transport); } } /* Worker routine to handle INIT command failure. */ static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, struct sctp_association *asoc, unsigned int error) { struct sctp_ulpevent *event; event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC, (__u16)error, 0, 0, NULL, GFP_ATOMIC); if (event) sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); /* SEND_FAILED sent later when cleaning up the association. */ asoc->outqueue.error = error; sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); } /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, struct sctp_association *asoc, sctp_event_t event_type, sctp_subtype_t subtype, struct sctp_chunk *chunk, unsigned int error) { struct sctp_ulpevent *event; struct sctp_chunk *abort; /* Cancel any partial delivery in progress. */ sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT) event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, (__u16)error, 0, 0, chunk, GFP_ATOMIC); else event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, (__u16)error, 0, 0, NULL, GFP_ATOMIC); if (event) sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event)); if (asoc->overall_error_count >= asoc->max_retrans) { abort = sctp_make_violation_max_retrans(asoc, chunk); if (abort) sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); } sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); /* SEND_FAILED sent later when cleaning up the association. */ asoc->outqueue.error = error; sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); } /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT * inside the cookie. In reality, this is only used for INIT-ACK processing * since all other cases use "temporary" associations and can do all * their work in statefuns directly. */ static int sctp_cmd_process_init(sctp_cmd_seq_t *commands, struct sctp_association *asoc, struct sctp_chunk *chunk, sctp_init_chunk_t *peer_init, gfp_t gfp) { int error; /* We only process the init as a sideeffect in a single * case. This is when we process the INIT-ACK. If we * fail during INIT processing (due to malloc problems), * just return the error and stop processing the stack. */ if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp)) error = -ENOMEM; else error = 0; return error; } /* Helper function to break out starting up of heartbeat timers. */ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds, struct sctp_association *asoc) { struct sctp_transport *t; /* Start a heartbeat timer for each transport on the association. * hold a reference on the transport to make sure none of * the needed data structures go away. */ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) sctp_transport_hold(t); } } static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, struct sctp_association *asoc) { struct sctp_transport *t; /* Stop all heartbeat timers. */ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { if (del_timer(&t->hb_timer)) sctp_transport_put(t); } } /* Helper function to stop any pending T3-RTX timers */ static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds, struct sctp_association *asoc) { struct sctp_transport *t; list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { if (del_timer(&t->T3_rtx_timer)) sctp_transport_put(t); } } /* Helper function to update the heartbeat timer. */ static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, struct sctp_transport *t) { /* Update the heartbeat timer. */ if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) sctp_transport_hold(t); } /* Helper function to handle the reception of an HEARTBEAT ACK. */ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, struct sctp_association *asoc, struct sctp_transport *t, struct sctp_chunk *chunk) { sctp_sender_hb_info_t *hbinfo; int was_unconfirmed = 0; /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the * HEARTBEAT should clear the error counter of the destination * transport address to which the HEARTBEAT was sent. */ t->error_count = 0; /* * Although RFC4960 specifies that the overall error count must * be cleared when a HEARTBEAT ACK is received, we make an * exception while in SHUTDOWN PENDING. If the peer keeps its * window shut forever, we may never be able to transmit our * outstanding data and rely on the retransmission limit be reached * to shutdown the association. */ if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) t->asoc->overall_error_count = 0; /* Clear the hb_sent flag to signal that we had a good * acknowledgement. */ t->hb_sent = 0; /* Mark the destination transport address as active if it is not so * marked. */ if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) { was_unconfirmed = 1; sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, SCTP_HEARTBEAT_SUCCESS); } if (t->state == SCTP_PF) sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, SCTP_HEARTBEAT_SUCCESS); /* HB-ACK was received for a the proper HB. Consider this * forward progress. */ if (t->dst) dst_confirm(t->dst); /* The receiver of the HEARTBEAT ACK should also perform an * RTT measurement for that destination transport address * using the time value carried in the HEARTBEAT ACK chunk. * If the transport's rto_pending variable has been cleared, * it was most likely due to a retransmit. However, we want * to re-enable it to properly update the rto. */ if (t->rto_pending == 0) t->rto_pending = 1; hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); /* Update the heartbeat timer. */ if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) sctp_transport_hold(t); if (was_unconfirmed && asoc->peer.transport_count == 1) sctp_transport_immediate_rtx(t); } /* Helper function to process the process SACK command. */ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, struct sctp_association *asoc, struct sctp_chunk *chunk) { int err = 0; if (sctp_outq_sack(&asoc->outqueue, chunk)) { struct net *net = sock_net(asoc->base.sk); /* There are no more TSNs awaiting SACK. */ err = sctp_do_sm(net, SCTP_EVENT_T_OTHER, SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), asoc->state, asoc->ep, asoc, NULL, GFP_ATOMIC); } return err; } /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set * the transport for a shutdown chunk. */ static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds, struct sctp_association *asoc, struct sctp_chunk *chunk) { struct sctp_transport *t; if (chunk->transport) t = chunk->transport; else { t = sctp_assoc_choose_alter_transport(asoc, asoc->shutdown_last_sent_to); chunk->transport = t; } asoc->shutdown_last_sent_to = t; asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; } /* Helper function to change the state of an association. */ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, struct sctp_association *asoc, sctp_state_t state) { struct sock *sk = asoc->base.sk; asoc->state = state; pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]); if (sctp_style(sk, TCP)) { /* Change the sk->sk_state of a TCP-style socket that has * successfully completed a connect() call. */ if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED)) sk->sk_state = SCTP_SS_ESTABLISHED; /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */ if (sctp_state(asoc, SHUTDOWN_RECEIVED) && sctp_sstate(sk, ESTABLISHED)) sk->sk_shutdown |= RCV_SHUTDOWN; } if (sctp_state(asoc, COOKIE_WAIT)) { /* Reset init timeouts since they may have been * increased due to timer expirations. */ asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial; asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial; } if (sctp_state(asoc, ESTABLISHED) || sctp_state(asoc, CLOSED) || sctp_state(asoc, SHUTDOWN_RECEIVED)) { /* Wake up any processes waiting in the asoc's wait queue in * sctp_wait_for_connect() or sctp_wait_for_sndbuf(). */ if (waitqueue_active(&asoc->wait)) wake_up_interruptible(&asoc->wait); /* Wake up any processes waiting in the sk's sleep queue of * a TCP-style or UDP-style peeled-off socket in * sctp_wait_for_accept() or sctp_wait_for_packet(). * For a UDP-style socket, the waiters are woken up by the * notifications. */ if (!sctp_style(sk, UDP)) sk->sk_state_change(sk); } } /* Helper function to delete an association. */ static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds, struct sctp_association *asoc) { struct sock *sk = asoc->base.sk; /* If it is a non-temporary association belonging to a TCP-style * listening socket that is not closed, do not free it so that accept() * can pick it up later. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) && (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK)) return; sctp_unhash_established(asoc); sctp_association_free(asoc); } /* * ADDIP Section 4.1 ASCONF Chunk Procedures * A4) Start a T-4 RTO timer, using the RTO value of the selected * destination address (we use active path instead of primary path just * because primary path may be inactive. */ static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds, struct sctp_association *asoc, struct sctp_chunk *chunk) { struct sctp_transport *t; t = sctp_assoc_choose_alter_transport(asoc, chunk->transport); asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto; chunk->transport = t; } /* Process an incoming Operation Error Chunk. */ static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds, struct sctp_association *asoc, struct sctp_chunk *chunk) { struct sctp_errhdr *err_hdr; struct sctp_ulpevent *ev; while (chunk->chunk_end > chunk->skb->data) { err_hdr = (struct sctp_errhdr *)(chunk->skb->data); ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0, GFP_ATOMIC); if (!ev) return; sctp_ulpq_tail_event(&asoc->ulpq, ev); switch (err_hdr->cause) { case SCTP_ERROR_UNKNOWN_CHUNK: { sctp_chunkhdr_t *unk_chunk_hdr; unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable; switch (unk_chunk_hdr->type) { /* ADDIP 4.1 A9) If the peer responds to an ASCONF with * an ERROR chunk reporting that it did not recognized * the ASCONF chunk type, the sender of the ASCONF MUST * NOT send any further ASCONF chunks and MUST stop its * T-4 timer. */ case SCTP_CID_ASCONF: if (asoc->peer.asconf_capable == 0) break; asoc->peer.asconf_capable = 0; sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); break; default: break; } break; } default: break; } } } /* Process variable FWDTSN chunk information. */ static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk) { struct sctp_fwdtsn_skip *skip; /* Walk through all the skipped SSNs */ sctp_walk_fwdtsn(skip, chunk) { sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); } } /* Helper function to remove the association non-primary peer * transports. */ static void sctp_cmd_del_non_primary(struct sctp_association *asoc) { struct sctp_transport *t; struct list_head *pos; struct list_head *temp; list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { t = list_entry(pos, struct sctp_transport, transports); if (!sctp_cmp_addr_exact(&t->ipaddr, &asoc->peer.primary_addr)) { sctp_assoc_rm_peer(asoc, t); } } } /* Helper function to set sk_err on a 1-1 style socket. */ static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error) { struct sock *sk = asoc->base.sk; if (!sctp_style(sk, UDP)) sk->sk_err = error; } /* Helper function to generate an association change event */ static void sctp_cmd_assoc_change(sctp_cmd_seq_t *commands, struct sctp_association *asoc, u8 state) { struct sctp_ulpevent *ev; ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0, asoc->c.sinit_num_ostreams, asoc->c.sinit_max_instreams, NULL, GFP_ATOMIC); if (ev) sctp_ulpq_tail_event(&asoc->ulpq, ev); } /* Helper function to generate an adaptation indication event */ static void sctp_cmd_adaptation_ind(sctp_cmd_seq_t *commands, struct sctp_association *asoc) { struct sctp_ulpevent *ev; ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC); if (ev) sctp_ulpq_tail_event(&asoc->ulpq, ev); } static void sctp_cmd_t1_timer_update(struct sctp_association *asoc, sctp_event_timeout_t timer, char *name) { struct sctp_transport *t; t = asoc->init_last_sent_to; asoc->init_err_counter++; if (t->init_sent_count > (asoc->init_cycle + 1)) { asoc->timeouts[timer] *= 2; if (asoc->timeouts[timer] > asoc->max_init_timeo) { asoc->timeouts[timer] = asoc->max_init_timeo; } asoc->init_cycle++; pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d" " cycle:%d timeout:%ld\n", __func__, name, asoc->init_err_counter, asoc->init_cycle, asoc->timeouts[timer]); } } /* Send the whole message, chunk by chunk, to the outqueue. * This way the whole message is queued up and bundling if * encouraged for small fragments. */ static int sctp_cmd_send_msg(struct sctp_association *asoc, struct sctp_datamsg *msg) { struct sctp_chunk *chunk; int error = 0; list_for_each_entry(chunk, &msg->chunks, frag_list) { error = sctp_outq_tail(&asoc->outqueue, chunk); if (error) break; } return error; } /* Sent the next ASCONF packet currently stored in the association. * This happens after the ASCONF_ACK was succeffully processed. */ static void sctp_cmd_send_asconf(struct sctp_association *asoc) { struct net *net = sock_net(asoc->base.sk); /* Send the next asconf chunk from the addip chunk * queue. */ if (!list_empty(&asoc->addip_chunk_list)) { struct list_head *entry = asoc->addip_chunk_list.next; struct sctp_chunk *asconf = list_entry(entry, struct sctp_chunk, list); list_del_init(entry); /* Hold the chunk until an ASCONF_ACK is received. */ sctp_chunk_hold(asconf); if (sctp_primitive_ASCONF(net, asoc, asconf)) sctp_chunk_free(asconf); else asoc->addip_last_asconf = asconf; } } /* These three macros allow us to pull the debugging code out of the * main flow of sctp_do_sm() to keep attention focused on the real * functionality there. */ #define debug_pre_sfn() \ pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \ ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype), \ asoc, sctp_state_tbl[state], state_fn->name) #define debug_post_sfn() \ pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \ sctp_status_tbl[status]) #define debug_post_sfx() \ pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \ asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \ sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED]) /* * This is the master state machine processing function. * * If you want to understand all of lksctp, this is a * good place to start. */ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, sctp_state_t state, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, gfp_t gfp) { sctp_cmd_seq_t commands; const sctp_sm_table_entry_t *state_fn; sctp_disposition_t status; int error = 0; typedef const char *(printfn_t)(sctp_subtype_t); static printfn_t *table[] = { NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname, }; printfn_t *debug_fn __attribute__ ((unused)) = table[event_type]; /* Look up the state function, run it, and then process the * side effects. These three steps are the heart of lksctp. */ state_fn = sctp_sm_lookup_event(net, event_type, state, subtype); sctp_init_cmd_seq(&commands); debug_pre_sfn(); status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands); debug_post_sfn(); error = sctp_side_effects(event_type, subtype, state, ep, asoc, event_arg, status, &commands, gfp); debug_post_sfx(); return error; } /***************************************************************** * This the master state function side effect processing function. *****************************************************************/ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, sctp_state_t state, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, sctp_disposition_t status, sctp_cmd_seq_t *commands, gfp_t gfp) { int error; /* FIXME - Most of the dispositions left today would be categorized * as "exceptional" dispositions. For those dispositions, it * may not be proper to run through any of the commands at all. * For example, the command interpreter might be run only with * disposition SCTP_DISPOSITION_CONSUME. */ if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state, ep, asoc, event_arg, status, commands, gfp))) goto bail; switch (status) { case SCTP_DISPOSITION_DISCARD: pr_debug("%s: ignored sctp protocol event - state:%d, " "event_type:%d, event_id:%d\n", __func__, state, event_type, subtype.chunk); break; case SCTP_DISPOSITION_NOMEM: /* We ran out of memory, so we need to discard this * packet. */ /* BUG--we should now recover some memory, probably by * reneging... */ error = -ENOMEM; break; case SCTP_DISPOSITION_DELETE_TCB: /* This should now be a command. */ break; case SCTP_DISPOSITION_CONSUME: case SCTP_DISPOSITION_ABORT: /* * We should no longer have much work to do here as the * real work has been done as explicit commands above. */ break; case SCTP_DISPOSITION_VIOLATION: net_err_ratelimited("protocol violation state %d chunkid %d\n", state, subtype.chunk); break; case SCTP_DISPOSITION_NOT_IMPL: pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n", state, event_type, subtype.chunk); break; case SCTP_DISPOSITION_BUG: pr_err("bug in state %d, event_type %d, event_id %d\n", state, event_type, subtype.chunk); BUG(); break; default: pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n", status, state, event_type, subtype.chunk); BUG(); break; } bail: return error; } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ /* This is the side-effect interpreter. */ static int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype, sctp_state_t state, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, sctp_disposition_t status, sctp_cmd_seq_t *commands, gfp_t gfp) { int error = 0; int force; sctp_cmd_t *cmd; struct sctp_chunk *new_obj; struct sctp_chunk *chunk = NULL; struct sctp_packet *packet; struct timer_list *timer; unsigned long timeout; struct sctp_transport *t; struct sctp_sackhdr sackh; int local_cork = 0; if (SCTP_EVENT_T_TIMEOUT != event_type) chunk = event_arg; /* Note: This whole file is a huge candidate for rework. * For example, each command could either have its own handler, so * the loop would look like: * while (cmds) * cmd->handle(x, y, z) * --jgrimm */ while (NULL != (cmd = sctp_next_cmd(commands))) { switch (cmd->verb) { case SCTP_CMD_NOP: /* Do nothing. */ break; case SCTP_CMD_NEW_ASOC: /* Register a new association. */ if (local_cork) { sctp_outq_uncork(&asoc->outqueue); local_cork = 0; } /* Register with the endpoint. */ asoc = cmd->obj.asoc; BUG_ON(asoc->peer.primary_path == NULL); sctp_endpoint_add_asoc(ep, asoc); sctp_hash_established(asoc); break; case SCTP_CMD_UPDATE_ASSOC: sctp_assoc_update(asoc, cmd->obj.asoc); break; case SCTP_CMD_PURGE_OUTQUEUE: sctp_outq_teardown(&asoc->outqueue); break; case SCTP_CMD_DELETE_TCB: if (local_cork) { sctp_outq_uncork(&asoc->outqueue); local_cork = 0; } /* Delete the current association. */ sctp_cmd_delete_tcb(commands, asoc); asoc = NULL; break; case SCTP_CMD_NEW_STATE: /* Enter a new state. */ sctp_cmd_new_state(commands, asoc, cmd->obj.state); break; case SCTP_CMD_REPORT_TSN: /* Record the arrival of a TSN. */ error = sctp_tsnmap_mark(&asoc->peer.tsn_map, cmd->obj.u32, NULL); break; case SCTP_CMD_REPORT_FWDTSN: /* Move the Cumulattive TSN Ack ahead. */ sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); /* purge the fragmentation queue */ sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32); /* Abort any in progress partial delivery. */ sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); break; case SCTP_CMD_PROCESS_FWDTSN: sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk); break; case SCTP_CMD_GEN_SACK: /* Generate a Selective ACK. * The argument tells us whether to just count * the packet and MAYBE generate a SACK, or * force a SACK out. */ force = cmd->obj.i32; error = sctp_gen_sack(asoc, force, commands); break; case SCTP_CMD_PROCESS_SACK: /* Process an inbound SACK. */ error = sctp_cmd_process_sack(commands, asoc, cmd->obj.chunk); break; case SCTP_CMD_GEN_INIT_ACK: /* Generate an INIT ACK chunk. */ new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC, 0); if (!new_obj) goto nomem; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(new_obj)); break; case SCTP_CMD_PEER_INIT: /* Process a unified INIT from the peer. * Note: Only used during INIT-ACK processing. If * there is an error just return to the outter * layer which will bail. */ error = sctp_cmd_process_init(commands, asoc, chunk, cmd->obj.init, gfp); break; case SCTP_CMD_GEN_COOKIE_ECHO: /* Generate a COOKIE ECHO chunk. */ new_obj = sctp_make_cookie_echo(asoc, chunk); if (!new_obj) { if (cmd->obj.chunk) sctp_chunk_free(cmd->obj.chunk); goto nomem; } sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(new_obj)); /* If there is an ERROR chunk to be sent along with * the COOKIE_ECHO, send it, too. */ if (cmd->obj.chunk) sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(cmd->obj.chunk)); if (new_obj->transport) { new_obj->transport->init_sent_count++; asoc->init_last_sent_to = new_obj->transport; } /* FIXME - Eventually come up with a cleaner way to * enabling COOKIE-ECHO + DATA bundling during * multihoming stale cookie scenarios, the following * command plays with asoc->peer.retran_path to * avoid the problem of sending the COOKIE-ECHO and * DATA in different paths, which could result * in the association being ABORTed if the DATA chunk * is processed first by the server. Checking the * init error counter simply causes this command * to be executed only during failed attempts of * association establishment. */ if ((asoc->peer.retran_path != asoc->peer.primary_path) && (asoc->init_err_counter > 0)) { sctp_add_cmd_sf(commands, SCTP_CMD_FORCE_PRIM_RETRAN, SCTP_NULL()); } break; case SCTP_CMD_GEN_SHUTDOWN: /* Generate SHUTDOWN when in SHUTDOWN_SENT state. * Reset error counts. */ asoc->overall_error_count = 0; /* Generate a SHUTDOWN chunk. */ new_obj = sctp_make_shutdown(asoc, chunk); if (!new_obj) goto nomem; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(new_obj)); break; case SCTP_CMD_CHUNK_ULP: /* Send a chunk to the sockets layer. */ pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n", __func__, cmd->obj.chunk, &asoc->ulpq); sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk, GFP_ATOMIC); break; case SCTP_CMD_EVENT_ULP: /* Send a notification to the sockets layer. */ pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n", __func__, cmd->obj.ulpevent, &asoc->ulpq); sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent); break; case SCTP_CMD_REPLY: /* If an caller has not already corked, do cork. */ if (!asoc->outqueue.cork) { sctp_outq_cork(&asoc->outqueue); local_cork = 1; } /* Send a chunk to our peer. */ error = sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk); break; case SCTP_CMD_SEND_PKT: /* Send a full packet to our peer. */ packet = cmd->obj.packet; sctp_packet_transmit(packet); sctp_ootb_pkt_free(packet); break; case SCTP_CMD_T1_RETRAN: /* Mark a transport for retransmission. */ sctp_retransmit(&asoc->outqueue, cmd->obj.transport, SCTP_RTXR_T1_RTX); break; case SCTP_CMD_RETRAN: /* Mark a transport for retransmission. */ sctp_retransmit(&asoc->outqueue, cmd->obj.transport, SCTP_RTXR_T3_RTX); break; case SCTP_CMD_ECN_CE: /* Do delayed CE processing. */ sctp_do_ecn_ce_work(asoc, cmd->obj.u32); break; case SCTP_CMD_ECN_ECNE: /* Do delayed ECNE processing. */ new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32, chunk); if (new_obj) sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(new_obj)); break; case SCTP_CMD_ECN_CWR: /* Do delayed CWR processing. */ sctp_do_ecn_cwr_work(asoc, cmd->obj.u32); break; case SCTP_CMD_SETUP_T2: sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk); break; case SCTP_CMD_TIMER_START_ONCE: timer = &asoc->timers[cmd->obj.to]; if (timer_pending(timer)) break; /* fall through */ case SCTP_CMD_TIMER_START: timer = &asoc->timers[cmd->obj.to]; timeout = asoc->timeouts[cmd->obj.to]; BUG_ON(!timeout); timer->expires = jiffies + timeout; sctp_association_hold(asoc); add_timer(timer); break; case SCTP_CMD_TIMER_RESTART: timer = &asoc->timers[cmd->obj.to]; timeout = asoc->timeouts[cmd->obj.to]; if (!mod_timer(timer, jiffies + timeout)) sctp_association_hold(asoc); break; case SCTP_CMD_TIMER_STOP: timer = &asoc->timers[cmd->obj.to]; if (del_timer(timer)) sctp_association_put(asoc); break; case SCTP_CMD_INIT_CHOOSE_TRANSPORT: chunk = cmd->obj.chunk; t = sctp_assoc_choose_alter_transport(asoc, asoc->init_last_sent_to); asoc->init_last_sent_to = t; chunk->transport = t; t->init_sent_count++; /* Set the new transport as primary */ sctp_assoc_set_primary(asoc, t); break; case SCTP_CMD_INIT_RESTART: /* Do the needed accounting and updates * associated with restarting an initialization * timer. Only multiply the timeout by two if * all transports have been tried at the current * timeout. */ sctp_cmd_t1_timer_update(asoc, SCTP_EVENT_TIMEOUT_T1_INIT, "INIT"); sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); break; case SCTP_CMD_COOKIEECHO_RESTART: /* Do the needed accounting and updates * associated with restarting an initialization * timer. Only multiply the timeout by two if * all transports have been tried at the current * timeout. */ sctp_cmd_t1_timer_update(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE, "COOKIE"); /* If we've sent any data bundled with * COOKIE-ECHO we need to resend. */ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { sctp_retransmit_mark(&asoc->outqueue, t, SCTP_RTXR_T1_RTX); } sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); break; case SCTP_CMD_INIT_FAILED: sctp_cmd_init_failed(commands, asoc, cmd->obj.err); break; case SCTP_CMD_ASSOC_FAILED: sctp_cmd_assoc_failed(commands, asoc, event_type, subtype, chunk, cmd->obj.err); break; case SCTP_CMD_INIT_COUNTER_INC: asoc->init_err_counter++; break; case SCTP_CMD_INIT_COUNTER_RESET: asoc->init_err_counter = 0; asoc->init_cycle = 0; list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { t->init_sent_count = 0; } break; case SCTP_CMD_REPORT_DUP: sctp_tsnmap_mark_dup(&asoc->peer.tsn_map, cmd->obj.u32); break; case SCTP_CMD_REPORT_BAD_TAG: pr_debug("%s: vtag mismatch!\n", __func__); break; case SCTP_CMD_STRIKE: /* Mark one strike against a transport. */ sctp_do_8_2_transport_strike(commands, asoc, cmd->obj.transport, 0); break; case SCTP_CMD_TRANSPORT_IDLE: t = cmd->obj.transport; sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE); break; case SCTP_CMD_TRANSPORT_HB_SENT: t = cmd->obj.transport; sctp_do_8_2_transport_strike(commands, asoc, t, 1); t->hb_sent = 1; break; case SCTP_CMD_TRANSPORT_ON: t = cmd->obj.transport; sctp_cmd_transport_on(commands, asoc, t, chunk); break; case SCTP_CMD_HB_TIMERS_START: sctp_cmd_hb_timers_start(commands, asoc); break; case SCTP_CMD_HB_TIMER_UPDATE: t = cmd->obj.transport; sctp_cmd_hb_timer_update(commands, t); break; case SCTP_CMD_HB_TIMERS_STOP: sctp_cmd_hb_timers_stop(commands, asoc); break; case SCTP_CMD_REPORT_ERROR: error = cmd->obj.error; break; case SCTP_CMD_PROCESS_CTSN: /* Dummy up a SACK for processing. */ sackh.cum_tsn_ack = cmd->obj.be32; sackh.a_rwnd = asoc->peer.rwnd + asoc->outqueue.outstanding_bytes; sackh.num_gap_ack_blocks = 0; sackh.num_dup_tsns = 0; chunk->subh.sack_hdr = &sackh; sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_CHUNK(chunk)); break; case SCTP_CMD_DISCARD_PACKET: /* We need to discard the whole packet. * Uncork the queue since there might be * responses pending */ chunk->pdiscard = 1; if (asoc) { sctp_outq_uncork(&asoc->outqueue); local_cork = 0; } break; case SCTP_CMD_RTO_PENDING: t = cmd->obj.transport; t->rto_pending = 1; break; case SCTP_CMD_PART_DELIVER: sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC); break; case SCTP_CMD_RENEGE: sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk, GFP_ATOMIC); break; case SCTP_CMD_SETUP_T4: sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk); break; case SCTP_CMD_PROCESS_OPERR: sctp_cmd_process_operr(commands, asoc, chunk); break; case SCTP_CMD_CLEAR_INIT_TAG: asoc->peer.i.init_tag = 0; break; case SCTP_CMD_DEL_NON_PRIMARY: sctp_cmd_del_non_primary(asoc); break; case SCTP_CMD_T3_RTX_TIMERS_STOP: sctp_cmd_t3_rtx_timers_stop(commands, asoc); break; case SCTP_CMD_FORCE_PRIM_RETRAN: t = asoc->peer.retran_path; asoc->peer.retran_path = asoc->peer.primary_path; error = sctp_outq_uncork(&asoc->outqueue); local_cork = 0; asoc->peer.retran_path = t; break; case SCTP_CMD_SET_SK_ERR: sctp_cmd_set_sk_err(asoc, cmd->obj.error); break; case SCTP_CMD_ASSOC_CHANGE: sctp_cmd_assoc_change(commands, asoc, cmd->obj.u8); break; case SCTP_CMD_ADAPTATION_IND: sctp_cmd_adaptation_ind(commands, asoc); break; case SCTP_CMD_ASSOC_SHKEY: error = sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); break; case SCTP_CMD_UPDATE_INITTAG: asoc->peer.i.init_tag = cmd->obj.u32; break; case SCTP_CMD_SEND_MSG: if (!asoc->outqueue.cork) { sctp_outq_cork(&asoc->outqueue); local_cork = 1; } error = sctp_cmd_send_msg(asoc, cmd->obj.msg); break; case SCTP_CMD_SEND_NEXT_ASCONF: sctp_cmd_send_asconf(asoc); break; case SCTP_CMD_PURGE_ASCONF_QUEUE: sctp_asconf_queue_teardown(asoc); break; case SCTP_CMD_SET_ASOC: asoc = cmd->obj.asoc; break; default: pr_warn("Impossible command: %u\n", cmd->verb); break; } if (error) break; } out: /* If this is in response to a received chunk, wait until * we are done with the packet to open the queue so that we don't * send multiple packets in response to a single request. */ if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) { if (chunk->end_of_packet || chunk->singleton) error = sctp_outq_uncork(&asoc->outqueue); } else if (local_cork) error = sctp_outq_uncork(&asoc->outqueue); return error; nomem: error = -ENOMEM; goto out; }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_1803_0
crossvul-cpp_data_good_3275_0
/* * fs/dcache.c * * Complete reimplementation * (C) 1997 Thomas Schoebel-Theuer, * with heavy changes by Linus Torvalds */ /* * Notes on the allocation strategy: * * The dcache is a master of the icache - whenever a dcache entry * exists, the inode will always exist. "iput()" is done either when * the dcache entry is deleted or garbage collected. */ #include <linux/syscalls.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/fsnotify.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/hash.h> #include <linux/cache.h> #include <linux/export.h> #include <linux/mount.h> #include <linux/file.h> #include <linux/uaccess.h> #include <linux/security.h> #include <linux/seqlock.h> #include <linux/swap.h> #include <linux/bootmem.h> #include <linux/fs_struct.h> #include <linux/hardirq.h> #include <linux/bit_spinlock.h> #include <linux/rculist_bl.h> #include <linux/prefetch.h> #include <linux/ratelimit.h> #include <linux/list_lru.h> #include <linux/kasan.h> #include "internal.h" #include "mount.h" /* * Usage: * dcache->d_inode->i_lock protects: * - i_dentry, d_u.d_alias, d_inode of aliases * dcache_hash_bucket lock protects: * - the dcache hash table * s_anon bl list spinlock protects: * - the s_anon list (see __d_drop) * dentry->d_sb->s_dentry_lru_lock protects: * - the dcache lru lists and counters * d_lock protects: * - d_flags * - d_name * - d_lru * - d_count * - d_unhashed() * - d_parent and d_subdirs * - childrens' d_child and d_parent * - d_u.d_alias, d_inode * * Ordering: * dentry->d_inode->i_lock * dentry->d_lock * dentry->d_sb->s_dentry_lru_lock * dcache_hash_bucket lock * s_anon lock * * If there is an ancestor relationship: * dentry->d_parent->...->d_parent->d_lock * ... * dentry->d_parent->d_lock * dentry->d_lock * * If no ancestor relationship: * if (dentry1 < dentry2) * dentry1->d_lock * dentry2->d_lock */ int sysctl_vfs_cache_pressure __read_mostly = 100; EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); EXPORT_SYMBOL(rename_lock); static struct kmem_cache *dentry_cache __read_mostly; /* * This is the single most critical data structure when it comes * to the dcache: the hashtable for lookups. Somebody should try * to make this good - I've just made it work. * * This hash-function tries to avoid losing too many bits of hash * information, yet avoid using a prime hash-size or similar. */ static unsigned int d_hash_mask __read_mostly; static unsigned int d_hash_shift __read_mostly; static struct hlist_bl_head *dentry_hashtable __read_mostly; static inline struct hlist_bl_head *d_hash(unsigned int hash) { return dentry_hashtable + (hash >> (32 - d_hash_shift)); } #define IN_LOOKUP_SHIFT 10 static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT]; static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent, unsigned int hash) { hash += (unsigned long) parent / L1_CACHE_BYTES; return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT); } /* Statistics gathering. */ struct dentry_stat_t dentry_stat = { .age_limit = 45, }; static DEFINE_PER_CPU(long, nr_dentry); static DEFINE_PER_CPU(long, nr_dentry_unused); #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) /* * Here we resort to our own counters instead of using generic per-cpu counters * for consistency with what the vfs inode code does. We are expected to harvest * better code and performance by having our own specialized counters. * * Please note that the loop is done over all possible CPUs, not over all online * CPUs. The reason for this is that we don't want to play games with CPUs going * on and off. If one of them goes off, we will just keep their counters. * * glommer: See cffbc8a for details, and if you ever intend to change this, * please update all vfs counters to match. */ static long get_nr_dentry(void) { int i; long sum = 0; for_each_possible_cpu(i) sum += per_cpu(nr_dentry, i); return sum < 0 ? 0 : sum; } static long get_nr_dentry_unused(void) { int i; long sum = 0; for_each_possible_cpu(i) sum += per_cpu(nr_dentry_unused, i); return sum < 0 ? 0 : sum; } int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { dentry_stat.nr_dentry = get_nr_dentry(); dentry_stat.nr_unused = get_nr_dentry_unused(); return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } #endif /* * Compare 2 name strings, return 0 if they match, otherwise non-zero. * The strings are both count bytes long, and count is non-zero. */ #ifdef CONFIG_DCACHE_WORD_ACCESS #include <asm/word-at-a-time.h> /* * NOTE! 'cs' and 'scount' come from a dentry, so it has a * aligned allocation for this particular component. We don't * strictly need the load_unaligned_zeropad() safety, but it * doesn't hurt either. * * In contrast, 'ct' and 'tcount' can be from a pathname, and do * need the careful unaligned handling. */ static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) { unsigned long a,b,mask; for (;;) { a = *(unsigned long *)cs; b = load_unaligned_zeropad(ct); if (tcount < sizeof(unsigned long)) break; if (unlikely(a != b)) return 1; cs += sizeof(unsigned long); ct += sizeof(unsigned long); tcount -= sizeof(unsigned long); if (!tcount) return 0; } mask = bytemask_from_count(tcount); return unlikely(!!((a ^ b) & mask)); } #else static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) { do { if (*cs != *ct) return 1; cs++; ct++; tcount--; } while (tcount); return 0; } #endif static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount) { /* * Be careful about RCU walk racing with rename: * use 'lockless_dereference' to fetch the name pointer. * * NOTE! Even if a rename will mean that the length * was not loaded atomically, we don't care. The * RCU walk will check the sequence count eventually, * and catch it. And we won't overrun the buffer, * because we're reading the name pointer atomically, * and a dentry name is guaranteed to be properly * terminated with a NUL byte. * * End result: even if 'len' is wrong, we'll exit * early because the data cannot match (there can * be no NUL in the ct/tcount data) */ const unsigned char *cs = lockless_dereference(dentry->d_name.name); return dentry_string_cmp(cs, ct, tcount); } struct external_name { union { atomic_t count; struct rcu_head head; } u; unsigned char name[]; }; static inline struct external_name *external_name(struct dentry *dentry) { return container_of(dentry->d_name.name, struct external_name, name[0]); } static void __d_free(struct rcu_head *head) { struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); kmem_cache_free(dentry_cache, dentry); } static void __d_free_external(struct rcu_head *head) { struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); kfree(external_name(dentry)); kmem_cache_free(dentry_cache, dentry); } static inline int dname_external(const struct dentry *dentry) { return dentry->d_name.name != dentry->d_iname; } void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry) { spin_lock(&dentry->d_lock); if (unlikely(dname_external(dentry))) { struct external_name *p = external_name(dentry); atomic_inc(&p->u.count); spin_unlock(&dentry->d_lock); name->name = p->name; } else { memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN); spin_unlock(&dentry->d_lock); name->name = name->inline_name; } } EXPORT_SYMBOL(take_dentry_name_snapshot); void release_dentry_name_snapshot(struct name_snapshot *name) { if (unlikely(name->name != name->inline_name)) { struct external_name *p; p = container_of(name->name, struct external_name, name[0]); if (unlikely(atomic_dec_and_test(&p->u.count))) kfree_rcu(p, u.head); } } EXPORT_SYMBOL(release_dentry_name_snapshot); static inline void __d_set_inode_and_type(struct dentry *dentry, struct inode *inode, unsigned type_flags) { unsigned flags; dentry->d_inode = inode; flags = READ_ONCE(dentry->d_flags); flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); flags |= type_flags; WRITE_ONCE(dentry->d_flags, flags); } static inline void __d_clear_type_and_inode(struct dentry *dentry) { unsigned flags = READ_ONCE(dentry->d_flags); flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); WRITE_ONCE(dentry->d_flags, flags); dentry->d_inode = NULL; } static void dentry_free(struct dentry *dentry) { WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias)); if (unlikely(dname_external(dentry))) { struct external_name *p = external_name(dentry); if (likely(atomic_dec_and_test(&p->u.count))) { call_rcu(&dentry->d_u.d_rcu, __d_free_external); return; } } /* if dentry was never visible to RCU, immediate free is OK */ if (!(dentry->d_flags & DCACHE_RCUACCESS)) __d_free(&dentry->d_u.d_rcu); else call_rcu(&dentry->d_u.d_rcu, __d_free); } /* * Release the dentry's inode, using the filesystem * d_iput() operation if defined. */ static void dentry_unlink_inode(struct dentry * dentry) __releases(dentry->d_lock) __releases(dentry->d_inode->i_lock) { struct inode *inode = dentry->d_inode; bool hashed = !d_unhashed(dentry); if (hashed) raw_write_seqcount_begin(&dentry->d_seq); __d_clear_type_and_inode(dentry); hlist_del_init(&dentry->d_u.d_alias); if (hashed) raw_write_seqcount_end(&dentry->d_seq); spin_unlock(&dentry->d_lock); spin_unlock(&inode->i_lock); if (!inode->i_nlink) fsnotify_inoderemove(inode); if (dentry->d_op && dentry->d_op->d_iput) dentry->d_op->d_iput(dentry, inode); else iput(inode); } /* * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry * is in use - which includes both the "real" per-superblock * LRU list _and_ the DCACHE_SHRINK_LIST use. * * The DCACHE_SHRINK_LIST bit is set whenever the dentry is * on the shrink list (ie not on the superblock LRU list). * * The per-cpu "nr_dentry_unused" counters are updated with * the DCACHE_LRU_LIST bit. * * These helper functions make sure we always follow the * rules. d_lock must be held by the caller. */ #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x)) static void d_lru_add(struct dentry *dentry) { D_FLAG_VERIFY(dentry, 0); dentry->d_flags |= DCACHE_LRU_LIST; this_cpu_inc(nr_dentry_unused); WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); } static void d_lru_del(struct dentry *dentry) { D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); dentry->d_flags &= ~DCACHE_LRU_LIST; this_cpu_dec(nr_dentry_unused); WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); } static void d_shrink_del(struct dentry *dentry) { D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); list_del_init(&dentry->d_lru); dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); this_cpu_dec(nr_dentry_unused); } static void d_shrink_add(struct dentry *dentry, struct list_head *list) { D_FLAG_VERIFY(dentry, 0); list_add(&dentry->d_lru, list); dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST; this_cpu_inc(nr_dentry_unused); } /* * These can only be called under the global LRU lock, ie during the * callback for freeing the LRU list. "isolate" removes it from the * LRU lists entirely, while shrink_move moves it to the indicated * private list. */ static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry) { D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); dentry->d_flags &= ~DCACHE_LRU_LIST; this_cpu_dec(nr_dentry_unused); list_lru_isolate(lru, &dentry->d_lru); } static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry, struct list_head *list) { D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); dentry->d_flags |= DCACHE_SHRINK_LIST; list_lru_isolate_move(lru, &dentry->d_lru, list); } /* * dentry_lru_(add|del)_list) must be called with d_lock held. */ static void dentry_lru_add(struct dentry *dentry) { if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) d_lru_add(dentry); else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED))) dentry->d_flags |= DCACHE_REFERENCED; } /** * d_drop - drop a dentry * @dentry: dentry to drop * * d_drop() unhashes the entry from the parent dentry hashes, so that it won't * be found through a VFS lookup any more. Note that this is different from * deleting the dentry - d_delete will try to mark the dentry negative if * possible, giving a successful _negative_ lookup, while d_drop will * just make the cache lookup fail. * * d_drop() is used mainly for stuff that wants to invalidate a dentry for some * reason (NFS timeouts or autofs deletes). * * __d_drop requires dentry->d_lock. */ void __d_drop(struct dentry *dentry) { if (!d_unhashed(dentry)) { struct hlist_bl_head *b; /* * Hashed dentries are normally on the dentry hashtable, * with the exception of those newly allocated by * d_obtain_alias, which are always IS_ROOT: */ if (unlikely(IS_ROOT(dentry))) b = &dentry->d_sb->s_anon; else b = d_hash(dentry->d_name.hash); hlist_bl_lock(b); __hlist_bl_del(&dentry->d_hash); dentry->d_hash.pprev = NULL; hlist_bl_unlock(b); /* After this call, in-progress rcu-walk path lookup will fail. */ write_seqcount_invalidate(&dentry->d_seq); } } EXPORT_SYMBOL(__d_drop); void d_drop(struct dentry *dentry) { spin_lock(&dentry->d_lock); __d_drop(dentry); spin_unlock(&dentry->d_lock); } EXPORT_SYMBOL(d_drop); static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent) { struct dentry *next; /* * Inform d_walk() and shrink_dentry_list() that we are no longer * attached to the dentry tree */ dentry->d_flags |= DCACHE_DENTRY_KILLED; if (unlikely(list_empty(&dentry->d_child))) return; __list_del_entry(&dentry->d_child); /* * Cursors can move around the list of children. While we'd been * a normal list member, it didn't matter - ->d_child.next would've * been updated. However, from now on it won't be and for the * things like d_walk() it might end up with a nasty surprise. * Normally d_walk() doesn't care about cursors moving around - * ->d_lock on parent prevents that and since a cursor has no children * of its own, we get through it without ever unlocking the parent. * There is one exception, though - if we ascend from a child that * gets killed as soon as we unlock it, the next sibling is found * using the value left in its ->d_child.next. And if _that_ * pointed to a cursor, and cursor got moved (e.g. by lseek()) * before d_walk() regains parent->d_lock, we'll end up skipping * everything the cursor had been moved past. * * Solution: make sure that the pointer left behind in ->d_child.next * points to something that won't be moving around. I.e. skip the * cursors. */ while (dentry->d_child.next != &parent->d_subdirs) { next = list_entry(dentry->d_child.next, struct dentry, d_child); if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR))) break; dentry->d_child.next = next->d_child.next; } } static void __dentry_kill(struct dentry *dentry) { struct dentry *parent = NULL; bool can_free = true; if (!IS_ROOT(dentry)) parent = dentry->d_parent; /* * The dentry is now unrecoverably dead to the world. */ lockref_mark_dead(&dentry->d_lockref); /* * inform the fs via d_prune that this dentry is about to be * unhashed and destroyed. */ if (dentry->d_flags & DCACHE_OP_PRUNE) dentry->d_op->d_prune(dentry); if (dentry->d_flags & DCACHE_LRU_LIST) { if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) d_lru_del(dentry); } /* if it was on the hash then remove it */ __d_drop(dentry); dentry_unlist(dentry, parent); if (parent) spin_unlock(&parent->d_lock); if (dentry->d_inode) dentry_unlink_inode(dentry); else spin_unlock(&dentry->d_lock); this_cpu_dec(nr_dentry); if (dentry->d_op && dentry->d_op->d_release) dentry->d_op->d_release(dentry); spin_lock(&dentry->d_lock); if (dentry->d_flags & DCACHE_SHRINK_LIST) { dentry->d_flags |= DCACHE_MAY_FREE; can_free = false; } spin_unlock(&dentry->d_lock); if (likely(can_free)) dentry_free(dentry); } /* * Finish off a dentry we've decided to kill. * dentry->d_lock must be held, returns with it unlocked. * If ref is non-zero, then decrement the refcount too. * Returns dentry requiring refcount drop, or NULL if we're done. */ static struct dentry *dentry_kill(struct dentry *dentry) __releases(dentry->d_lock) { struct inode *inode = dentry->d_inode; struct dentry *parent = NULL; if (inode && unlikely(!spin_trylock(&inode->i_lock))) goto failed; if (!IS_ROOT(dentry)) { parent = dentry->d_parent; if (unlikely(!spin_trylock(&parent->d_lock))) { if (inode) spin_unlock(&inode->i_lock); goto failed; } } __dentry_kill(dentry); return parent; failed: spin_unlock(&dentry->d_lock); return dentry; /* try again with same dentry */ } static inline struct dentry *lock_parent(struct dentry *dentry) { struct dentry *parent = dentry->d_parent; if (IS_ROOT(dentry)) return NULL; if (unlikely(dentry->d_lockref.count < 0)) return NULL; if (likely(spin_trylock(&parent->d_lock))) return parent; rcu_read_lock(); spin_unlock(&dentry->d_lock); again: parent = ACCESS_ONCE(dentry->d_parent); spin_lock(&parent->d_lock); /* * We can't blindly lock dentry until we are sure * that we won't violate the locking order. * Any changes of dentry->d_parent must have * been done with parent->d_lock held, so * spin_lock() above is enough of a barrier * for checking if it's still our child. */ if (unlikely(parent != dentry->d_parent)) { spin_unlock(&parent->d_lock); goto again; } rcu_read_unlock(); if (parent != dentry) spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); else parent = NULL; return parent; } /* * Try to do a lockless dput(), and return whether that was successful. * * If unsuccessful, we return false, having already taken the dentry lock. * * The caller needs to hold the RCU read lock, so that the dentry is * guaranteed to stay around even if the refcount goes down to zero! */ static inline bool fast_dput(struct dentry *dentry) { int ret; unsigned int d_flags; /* * If we have a d_op->d_delete() operation, we sould not * let the dentry count go to zero, so use "put_or_lock". */ if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) return lockref_put_or_lock(&dentry->d_lockref); /* * .. otherwise, we can try to just decrement the * lockref optimistically. */ ret = lockref_put_return(&dentry->d_lockref); /* * If the lockref_put_return() failed due to the lock being held * by somebody else, the fast path has failed. We will need to * get the lock, and then check the count again. */ if (unlikely(ret < 0)) { spin_lock(&dentry->d_lock); if (dentry->d_lockref.count > 1) { dentry->d_lockref.count--; spin_unlock(&dentry->d_lock); return 1; } return 0; } /* * If we weren't the last ref, we're done. */ if (ret) return 1; /* * Careful, careful. The reference count went down * to zero, but we don't hold the dentry lock, so * somebody else could get it again, and do another * dput(), and we need to not race with that. * * However, there is a very special and common case * where we don't care, because there is nothing to * do: the dentry is still hashed, it does not have * a 'delete' op, and it's referenced and already on * the LRU list. * * NOTE! Since we aren't locked, these values are * not "stable". However, it is sufficient that at * some point after we dropped the reference the * dentry was hashed and the flags had the proper * value. Other dentry users may have re-gotten * a reference to the dentry and change that, but * our work is done - we can leave the dentry * around with a zero refcount. */ smp_rmb(); d_flags = ACCESS_ONCE(dentry->d_flags); d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED; /* Nothing to do? Dropping the reference was all we needed? */ if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry)) return 1; /* * Not the fast normal case? Get the lock. We've already decremented * the refcount, but we'll need to re-check the situation after * getting the lock. */ spin_lock(&dentry->d_lock); /* * Did somebody else grab a reference to it in the meantime, and * we're no longer the last user after all? Alternatively, somebody * else could have killed it and marked it dead. Either way, we * don't need to do anything else. */ if (dentry->d_lockref.count) { spin_unlock(&dentry->d_lock); return 1; } /* * Re-get the reference we optimistically dropped. We hold the * lock, and we just tested that it was zero, so we can just * set it to 1. */ dentry->d_lockref.count = 1; return 0; } /* * This is dput * * This is complicated by the fact that we do not want to put * dentries that are no longer on any hash chain on the unused * list: we'd much rather just get rid of them immediately. * * However, that implies that we have to traverse the dentry * tree upwards to the parents which might _also_ now be * scheduled for deletion (it may have been only waiting for * its last child to go away). * * This tail recursion is done by hand as we don't want to depend * on the compiler to always get this right (gcc generally doesn't). * Real recursion would eat up our stack space. */ /* * dput - release a dentry * @dentry: dentry to release * * Release a dentry. This will drop the usage count and if appropriate * call the dentry unlink method as well as removing it from the queues and * releasing its resources. If the parent dentries were scheduled for release * they too may now get deleted. */ void dput(struct dentry *dentry) { if (unlikely(!dentry)) return; repeat: might_sleep(); rcu_read_lock(); if (likely(fast_dput(dentry))) { rcu_read_unlock(); return; } /* Slow case: now with the dentry lock held */ rcu_read_unlock(); WARN_ON(d_in_lookup(dentry)); /* Unreachable? Get rid of it */ if (unlikely(d_unhashed(dentry))) goto kill_it; if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) goto kill_it; if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) { if (dentry->d_op->d_delete(dentry)) goto kill_it; } dentry_lru_add(dentry); dentry->d_lockref.count--; spin_unlock(&dentry->d_lock); return; kill_it: dentry = dentry_kill(dentry); if (dentry) { cond_resched(); goto repeat; } } EXPORT_SYMBOL(dput); /* This must be called with d_lock held */ static inline void __dget_dlock(struct dentry *dentry) { dentry->d_lockref.count++; } static inline void __dget(struct dentry *dentry) { lockref_get(&dentry->d_lockref); } struct dentry *dget_parent(struct dentry *dentry) { int gotref; struct dentry *ret; /* * Do optimistic parent lookup without any * locking. */ rcu_read_lock(); ret = ACCESS_ONCE(dentry->d_parent); gotref = lockref_get_not_zero(&ret->d_lockref); rcu_read_unlock(); if (likely(gotref)) { if (likely(ret == ACCESS_ONCE(dentry->d_parent))) return ret; dput(ret); } repeat: /* * Don't need rcu_dereference because we re-check it was correct under * the lock. */ rcu_read_lock(); ret = dentry->d_parent; spin_lock(&ret->d_lock); if (unlikely(ret != dentry->d_parent)) { spin_unlock(&ret->d_lock); rcu_read_unlock(); goto repeat; } rcu_read_unlock(); BUG_ON(!ret->d_lockref.count); ret->d_lockref.count++; spin_unlock(&ret->d_lock); return ret; } EXPORT_SYMBOL(dget_parent); /** * d_find_alias - grab a hashed alias of inode * @inode: inode in question * * If inode has a hashed alias, or is a directory and has any alias, * acquire the reference to alias and return it. Otherwise return NULL. * Notice that if inode is a directory there can be only one alias and * it can be unhashed only if it has no children, or if it is the root * of a filesystem, or if the directory was renamed and d_revalidate * was the first vfs operation to notice. * * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer * any other hashed alias over that one. */ static struct dentry *__d_find_alias(struct inode *inode) { struct dentry *alias, *discon_alias; again: discon_alias = NULL; hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { spin_lock(&alias->d_lock); if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { if (IS_ROOT(alias) && (alias->d_flags & DCACHE_DISCONNECTED)) { discon_alias = alias; } else { __dget_dlock(alias); spin_unlock(&alias->d_lock); return alias; } } spin_unlock(&alias->d_lock); } if (discon_alias) { alias = discon_alias; spin_lock(&alias->d_lock); if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { __dget_dlock(alias); spin_unlock(&alias->d_lock); return alias; } spin_unlock(&alias->d_lock); goto again; } return NULL; } struct dentry *d_find_alias(struct inode *inode) { struct dentry *de = NULL; if (!hlist_empty(&inode->i_dentry)) { spin_lock(&inode->i_lock); de = __d_find_alias(inode); spin_unlock(&inode->i_lock); } return de; } EXPORT_SYMBOL(d_find_alias); /* * Try to kill dentries associated with this inode. * WARNING: you must own a reference to inode. */ void d_prune_aliases(struct inode *inode) { struct dentry *dentry; restart: spin_lock(&inode->i_lock); hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { spin_lock(&dentry->d_lock); if (!dentry->d_lockref.count) { struct dentry *parent = lock_parent(dentry); if (likely(!dentry->d_lockref.count)) { __dentry_kill(dentry); dput(parent); goto restart; } if (parent) spin_unlock(&parent->d_lock); } spin_unlock(&dentry->d_lock); } spin_unlock(&inode->i_lock); } EXPORT_SYMBOL(d_prune_aliases); static void shrink_dentry_list(struct list_head *list) { struct dentry *dentry, *parent; while (!list_empty(list)) { struct inode *inode; dentry = list_entry(list->prev, struct dentry, d_lru); spin_lock(&dentry->d_lock); parent = lock_parent(dentry); /* * The dispose list is isolated and dentries are not accounted * to the LRU here, so we can simply remove it from the list * here regardless of whether it is referenced or not. */ d_shrink_del(dentry); /* * We found an inuse dentry which was not removed from * the LRU because of laziness during lookup. Do not free it. */ if (dentry->d_lockref.count > 0) { spin_unlock(&dentry->d_lock); if (parent) spin_unlock(&parent->d_lock); continue; } if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) { bool can_free = dentry->d_flags & DCACHE_MAY_FREE; spin_unlock(&dentry->d_lock); if (parent) spin_unlock(&parent->d_lock); if (can_free) dentry_free(dentry); continue; } inode = dentry->d_inode; if (inode && unlikely(!spin_trylock(&inode->i_lock))) { d_shrink_add(dentry, list); spin_unlock(&dentry->d_lock); if (parent) spin_unlock(&parent->d_lock); continue; } __dentry_kill(dentry); /* * We need to prune ancestors too. This is necessary to prevent * quadratic behavior of shrink_dcache_parent(), but is also * expected to be beneficial in reducing dentry cache * fragmentation. */ dentry = parent; while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) { parent = lock_parent(dentry); if (dentry->d_lockref.count != 1) { dentry->d_lockref.count--; spin_unlock(&dentry->d_lock); if (parent) spin_unlock(&parent->d_lock); break; } inode = dentry->d_inode; /* can't be NULL */ if (unlikely(!spin_trylock(&inode->i_lock))) { spin_unlock(&dentry->d_lock); if (parent) spin_unlock(&parent->d_lock); cpu_relax(); continue; } __dentry_kill(dentry); dentry = parent; } } } static enum lru_status dentry_lru_isolate(struct list_head *item, struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) { struct list_head *freeable = arg; struct dentry *dentry = container_of(item, struct dentry, d_lru); /* * we are inverting the lru lock/dentry->d_lock here, * so use a trylock. If we fail to get the lock, just skip * it */ if (!spin_trylock(&dentry->d_lock)) return LRU_SKIP; /* * Referenced dentries are still in use. If they have active * counts, just remove them from the LRU. Otherwise give them * another pass through the LRU. */ if (dentry->d_lockref.count) { d_lru_isolate(lru, dentry); spin_unlock(&dentry->d_lock); return LRU_REMOVED; } if (dentry->d_flags & DCACHE_REFERENCED) { dentry->d_flags &= ~DCACHE_REFERENCED; spin_unlock(&dentry->d_lock); /* * The list move itself will be made by the common LRU code. At * this point, we've dropped the dentry->d_lock but keep the * lru lock. This is safe to do, since every list movement is * protected by the lru lock even if both locks are held. * * This is guaranteed by the fact that all LRU management * functions are intermediated by the LRU API calls like * list_lru_add and list_lru_del. List movement in this file * only ever occur through this functions or through callbacks * like this one, that are called from the LRU API. * * The only exceptions to this are functions like * shrink_dentry_list, and code that first checks for the * DCACHE_SHRINK_LIST flag. Those are guaranteed to be * operating only with stack provided lists after they are * properly isolated from the main list. It is thus, always a * local access. */ return LRU_ROTATE; } d_lru_shrink_move(lru, dentry, freeable); spin_unlock(&dentry->d_lock); return LRU_REMOVED; } /** * prune_dcache_sb - shrink the dcache * @sb: superblock * @sc: shrink control, passed to list_lru_shrink_walk() * * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This * is done when we need more memory and called from the superblock shrinker * function. * * This function may fail to free any resources if all the dentries are in * use. */ long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc) { LIST_HEAD(dispose); long freed; freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc, dentry_lru_isolate, &dispose); shrink_dentry_list(&dispose); return freed; } static enum lru_status dentry_lru_isolate_shrink(struct list_head *item, struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) { struct list_head *freeable = arg; struct dentry *dentry = container_of(item, struct dentry, d_lru); /* * we are inverting the lru lock/dentry->d_lock here, * so use a trylock. If we fail to get the lock, just skip * it */ if (!spin_trylock(&dentry->d_lock)) return LRU_SKIP; d_lru_shrink_move(lru, dentry, freeable); spin_unlock(&dentry->d_lock); return LRU_REMOVED; } /** * shrink_dcache_sb - shrink dcache for a superblock * @sb: superblock * * Shrink the dcache for the specified super block. This is used to free * the dcache before unmounting a file system. */ void shrink_dcache_sb(struct super_block *sb) { long freed; do { LIST_HEAD(dispose); freed = list_lru_walk(&sb->s_dentry_lru, dentry_lru_isolate_shrink, &dispose, UINT_MAX); this_cpu_sub(nr_dentry_unused, freed); shrink_dentry_list(&dispose); } while (freed > 0); } EXPORT_SYMBOL(shrink_dcache_sb); /** * enum d_walk_ret - action to talke during tree walk * @D_WALK_CONTINUE: contrinue walk * @D_WALK_QUIT: quit walk * @D_WALK_NORETRY: quit when retry is needed * @D_WALK_SKIP: skip this dentry and its children */ enum d_walk_ret { D_WALK_CONTINUE, D_WALK_QUIT, D_WALK_NORETRY, D_WALK_SKIP, }; /** * d_walk - walk the dentry tree * @parent: start of walk * @data: data passed to @enter() and @finish() * @enter: callback when first entering the dentry * @finish: callback when successfully finished the walk * * The @enter() and @finish() callbacks are called with d_lock held. */ static void d_walk(struct dentry *parent, void *data, enum d_walk_ret (*enter)(void *, struct dentry *), void (*finish)(void *)) { struct dentry *this_parent; struct list_head *next; unsigned seq = 0; enum d_walk_ret ret; bool retry = true; again: read_seqbegin_or_lock(&rename_lock, &seq); this_parent = parent; spin_lock(&this_parent->d_lock); ret = enter(data, this_parent); switch (ret) { case D_WALK_CONTINUE: break; case D_WALK_QUIT: case D_WALK_SKIP: goto out_unlock; case D_WALK_NORETRY: retry = false; break; } repeat: next = this_parent->d_subdirs.next; resume: while (next != &this_parent->d_subdirs) { struct list_head *tmp = next; struct dentry *dentry = list_entry(tmp, struct dentry, d_child); next = tmp->next; if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR)) continue; spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); ret = enter(data, dentry); switch (ret) { case D_WALK_CONTINUE: break; case D_WALK_QUIT: spin_unlock(&dentry->d_lock); goto out_unlock; case D_WALK_NORETRY: retry = false; break; case D_WALK_SKIP: spin_unlock(&dentry->d_lock); continue; } if (!list_empty(&dentry->d_subdirs)) { spin_unlock(&this_parent->d_lock); spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); this_parent = dentry; spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); goto repeat; } spin_unlock(&dentry->d_lock); } /* * All done at this level ... ascend and resume the search. */ rcu_read_lock(); ascend: if (this_parent != parent) { struct dentry *child = this_parent; this_parent = child->d_parent; spin_unlock(&child->d_lock); spin_lock(&this_parent->d_lock); /* might go back up the wrong parent if we have had a rename. */ if (need_seqretry(&rename_lock, seq)) goto rename_retry; /* go into the first sibling still alive */ do { next = child->d_child.next; if (next == &this_parent->d_subdirs) goto ascend; child = list_entry(next, struct dentry, d_child); } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)); rcu_read_unlock(); goto resume; } if (need_seqretry(&rename_lock, seq)) goto rename_retry; rcu_read_unlock(); if (finish) finish(data); out_unlock: spin_unlock(&this_parent->d_lock); done_seqretry(&rename_lock, seq); return; rename_retry: spin_unlock(&this_parent->d_lock); rcu_read_unlock(); BUG_ON(seq & 1); if (!retry) return; seq = 1; goto again; } struct check_mount { struct vfsmount *mnt; unsigned int mounted; }; static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry) { struct check_mount *info = data; struct path path = { .mnt = info->mnt, .dentry = dentry }; if (likely(!d_mountpoint(dentry))) return D_WALK_CONTINUE; if (__path_is_mountpoint(&path)) { info->mounted = 1; return D_WALK_QUIT; } return D_WALK_CONTINUE; } /** * path_has_submounts - check for mounts over a dentry in the * current namespace. * @parent: path to check. * * Return true if the parent or its subdirectories contain * a mount point in the current namespace. */ int path_has_submounts(const struct path *parent) { struct check_mount data = { .mnt = parent->mnt, .mounted = 0 }; read_seqlock_excl(&mount_lock); d_walk(parent->dentry, &data, path_check_mount, NULL); read_sequnlock_excl(&mount_lock); return data.mounted; } EXPORT_SYMBOL(path_has_submounts); /* * Called by mount code to set a mountpoint and check if the mountpoint is * reachable (e.g. NFS can unhash a directory dentry and then the complete * subtree can become unreachable). * * Only one of d_invalidate() and d_set_mounted() must succeed. For * this reason take rename_lock and d_lock on dentry and ancestors. */ int d_set_mounted(struct dentry *dentry) { struct dentry *p; int ret = -ENOENT; write_seqlock(&rename_lock); for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) { /* Need exclusion wrt. d_invalidate() */ spin_lock(&p->d_lock); if (unlikely(d_unhashed(p))) { spin_unlock(&p->d_lock); goto out; } spin_unlock(&p->d_lock); } spin_lock(&dentry->d_lock); if (!d_unlinked(dentry)) { ret = -EBUSY; if (!d_mountpoint(dentry)) { dentry->d_flags |= DCACHE_MOUNTED; ret = 0; } } spin_unlock(&dentry->d_lock); out: write_sequnlock(&rename_lock); return ret; } /* * Search the dentry child list of the specified parent, * and move any unused dentries to the end of the unused * list for prune_dcache(). We descend to the next level * whenever the d_subdirs list is non-empty and continue * searching. * * It returns zero iff there are no unused children, * otherwise it returns the number of children moved to * the end of the unused list. This may not be the total * number of unused children, because select_parent can * drop the lock and return early due to latency * constraints. */ struct select_data { struct dentry *start; struct list_head dispose; int found; }; static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) { struct select_data *data = _data; enum d_walk_ret ret = D_WALK_CONTINUE; if (data->start == dentry) goto out; if (dentry->d_flags & DCACHE_SHRINK_LIST) { data->found++; } else { if (dentry->d_flags & DCACHE_LRU_LIST) d_lru_del(dentry); if (!dentry->d_lockref.count) { d_shrink_add(dentry, &data->dispose); data->found++; } } /* * We can return to the caller if we have found some (this * ensures forward progress). We'll be coming back to find * the rest. */ if (!list_empty(&data->dispose)) ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY; out: return ret; } /** * shrink_dcache_parent - prune dcache * @parent: parent of entries to prune * * Prune the dcache to remove unused children of the parent dentry. */ void shrink_dcache_parent(struct dentry *parent) { for (;;) { struct select_data data; INIT_LIST_HEAD(&data.dispose); data.start = parent; data.found = 0; d_walk(parent, &data, select_collect, NULL); if (!data.found) break; shrink_dentry_list(&data.dispose); cond_resched(); } } EXPORT_SYMBOL(shrink_dcache_parent); static enum d_walk_ret umount_check(void *_data, struct dentry *dentry) { /* it has busy descendents; complain about those instead */ if (!list_empty(&dentry->d_subdirs)) return D_WALK_CONTINUE; /* root with refcount 1 is fine */ if (dentry == _data && dentry->d_lockref.count == 1) return D_WALK_CONTINUE; printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} " " still in use (%d) [unmount of %s %s]\n", dentry, dentry->d_inode ? dentry->d_inode->i_ino : 0UL, dentry, dentry->d_lockref.count, dentry->d_sb->s_type->name, dentry->d_sb->s_id); WARN_ON(1); return D_WALK_CONTINUE; } static void do_one_tree(struct dentry *dentry) { shrink_dcache_parent(dentry); d_walk(dentry, dentry, umount_check, NULL); d_drop(dentry); dput(dentry); } /* * destroy the dentries attached to a superblock on unmounting */ void shrink_dcache_for_umount(struct super_block *sb) { struct dentry *dentry; WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked"); dentry = sb->s_root; sb->s_root = NULL; do_one_tree(dentry); while (!hlist_bl_empty(&sb->s_anon)) { dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash)); do_one_tree(dentry); } } struct detach_data { struct select_data select; struct dentry *mountpoint; }; static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry) { struct detach_data *data = _data; if (d_mountpoint(dentry)) { __dget_dlock(dentry); data->mountpoint = dentry; return D_WALK_QUIT; } return select_collect(&data->select, dentry); } static void check_and_drop(void *_data) { struct detach_data *data = _data; if (!data->mountpoint && list_empty(&data->select.dispose)) __d_drop(data->select.start); } /** * d_invalidate - detach submounts, prune dcache, and drop * @dentry: dentry to invalidate (aka detach, prune and drop) * * no dcache lock. * * The final d_drop is done as an atomic operation relative to * rename_lock ensuring there are no races with d_set_mounted. This * ensures there are no unhashed dentries on the path to a mountpoint. */ void d_invalidate(struct dentry *dentry) { /* * If it's already been dropped, return OK. */ spin_lock(&dentry->d_lock); if (d_unhashed(dentry)) { spin_unlock(&dentry->d_lock); return; } spin_unlock(&dentry->d_lock); /* Negative dentries can be dropped without further checks */ if (!dentry->d_inode) { d_drop(dentry); return; } for (;;) { struct detach_data data; data.mountpoint = NULL; INIT_LIST_HEAD(&data.select.dispose); data.select.start = dentry; data.select.found = 0; d_walk(dentry, &data, detach_and_collect, check_and_drop); if (!list_empty(&data.select.dispose)) shrink_dentry_list(&data.select.dispose); else if (!data.mountpoint) return; if (data.mountpoint) { detach_mounts(data.mountpoint); dput(data.mountpoint); } cond_resched(); } } EXPORT_SYMBOL(d_invalidate); /** * __d_alloc - allocate a dcache entry * @sb: filesystem it will belong to * @name: qstr of the name * * Allocates a dentry. It returns %NULL if there is insufficient memory * available. On a success the dentry is returned. The name passed in is * copied and the copy passed in may be reused after this call. */ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) { struct dentry *dentry; char *dname; int err; dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); if (!dentry) return NULL; /* * We guarantee that the inline name is always NUL-terminated. * This way the memcpy() done by the name switching in rename * will still always have a NUL at the end, even if we might * be overwriting an internal NUL character */ dentry->d_iname[DNAME_INLINE_LEN-1] = 0; if (unlikely(!name)) { static const struct qstr anon = QSTR_INIT("/", 1); name = &anon; dname = dentry->d_iname; } else if (name->len > DNAME_INLINE_LEN-1) { size_t size = offsetof(struct external_name, name[1]); struct external_name *p = kmalloc(size + name->len, GFP_KERNEL_ACCOUNT); if (!p) { kmem_cache_free(dentry_cache, dentry); return NULL; } atomic_set(&p->u.count, 1); dname = p->name; if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS)) kasan_unpoison_shadow(dname, round_up(name->len + 1, sizeof(unsigned long))); } else { dname = dentry->d_iname; } dentry->d_name.len = name->len; dentry->d_name.hash = name->hash; memcpy(dname, name->name, name->len); dname[name->len] = 0; /* Make sure we always see the terminating NUL character */ smp_wmb(); dentry->d_name.name = dname; dentry->d_lockref.count = 1; dentry->d_flags = 0; spin_lock_init(&dentry->d_lock); seqcount_init(&dentry->d_seq); dentry->d_inode = NULL; dentry->d_parent = dentry; dentry->d_sb = sb; dentry->d_op = NULL; dentry->d_fsdata = NULL; INIT_HLIST_BL_NODE(&dentry->d_hash); INIT_LIST_HEAD(&dentry->d_lru); INIT_LIST_HEAD(&dentry->d_subdirs); INIT_HLIST_NODE(&dentry->d_u.d_alias); INIT_LIST_HEAD(&dentry->d_child); d_set_d_op(dentry, dentry->d_sb->s_d_op); if (dentry->d_op && dentry->d_op->d_init) { err = dentry->d_op->d_init(dentry); if (err) { if (dname_external(dentry)) kfree(external_name(dentry)); kmem_cache_free(dentry_cache, dentry); return NULL; } } this_cpu_inc(nr_dentry); return dentry; } /** * d_alloc - allocate a dcache entry * @parent: parent of entry to allocate * @name: qstr of the name * * Allocates a dentry. It returns %NULL if there is insufficient memory * available. On a success the dentry is returned. The name passed in is * copied and the copy passed in may be reused after this call. */ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) { struct dentry *dentry = __d_alloc(parent->d_sb, name); if (!dentry) return NULL; dentry->d_flags |= DCACHE_RCUACCESS; spin_lock(&parent->d_lock); /* * don't need child lock because it is not subject * to concurrency here */ __dget_dlock(parent); dentry->d_parent = parent; list_add(&dentry->d_child, &parent->d_subdirs); spin_unlock(&parent->d_lock); return dentry; } EXPORT_SYMBOL(d_alloc); struct dentry *d_alloc_cursor(struct dentry * parent) { struct dentry *dentry = __d_alloc(parent->d_sb, NULL); if (dentry) { dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR; dentry->d_parent = dget(parent); } return dentry; } /** * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems) * @sb: the superblock * @name: qstr of the name * * For a filesystem that just pins its dentries in memory and never * performs lookups at all, return an unhashed IS_ROOT dentry. */ struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) { return __d_alloc(sb, name); } EXPORT_SYMBOL(d_alloc_pseudo); struct dentry *d_alloc_name(struct dentry *parent, const char *name) { struct qstr q; q.name = name; q.hash_len = hashlen_string(parent, name); return d_alloc(parent, &q); } EXPORT_SYMBOL(d_alloc_name); void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) { WARN_ON_ONCE(dentry->d_op); WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | DCACHE_OP_COMPARE | DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE | DCACHE_OP_DELETE | DCACHE_OP_REAL)); dentry->d_op = op; if (!op) return; if (op->d_hash) dentry->d_flags |= DCACHE_OP_HASH; if (op->d_compare) dentry->d_flags |= DCACHE_OP_COMPARE; if (op->d_revalidate) dentry->d_flags |= DCACHE_OP_REVALIDATE; if (op->d_weak_revalidate) dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE; if (op->d_delete) dentry->d_flags |= DCACHE_OP_DELETE; if (op->d_prune) dentry->d_flags |= DCACHE_OP_PRUNE; if (op->d_real) dentry->d_flags |= DCACHE_OP_REAL; } EXPORT_SYMBOL(d_set_d_op); /* * d_set_fallthru - Mark a dentry as falling through to a lower layer * @dentry - The dentry to mark * * Mark a dentry as falling through to the lower layer (as set with * d_pin_lower()). This flag may be recorded on the medium. */ void d_set_fallthru(struct dentry *dentry) { spin_lock(&dentry->d_lock); dentry->d_flags |= DCACHE_FALLTHRU; spin_unlock(&dentry->d_lock); } EXPORT_SYMBOL(d_set_fallthru); static unsigned d_flags_for_inode(struct inode *inode) { unsigned add_flags = DCACHE_REGULAR_TYPE; if (!inode) return DCACHE_MISS_TYPE; if (S_ISDIR(inode->i_mode)) { add_flags = DCACHE_DIRECTORY_TYPE; if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) { if (unlikely(!inode->i_op->lookup)) add_flags = DCACHE_AUTODIR_TYPE; else inode->i_opflags |= IOP_LOOKUP; } goto type_determined; } if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { if (unlikely(inode->i_op->get_link)) { add_flags = DCACHE_SYMLINK_TYPE; goto type_determined; } inode->i_opflags |= IOP_NOFOLLOW; } if (unlikely(!S_ISREG(inode->i_mode))) add_flags = DCACHE_SPECIAL_TYPE; type_determined: if (unlikely(IS_AUTOMOUNT(inode))) add_flags |= DCACHE_NEED_AUTOMOUNT; return add_flags; } static void __d_instantiate(struct dentry *dentry, struct inode *inode) { unsigned add_flags = d_flags_for_inode(inode); WARN_ON(d_in_lookup(dentry)); spin_lock(&dentry->d_lock); hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); raw_write_seqcount_begin(&dentry->d_seq); __d_set_inode_and_type(dentry, inode, add_flags); raw_write_seqcount_end(&dentry->d_seq); fsnotify_update_flags(dentry); spin_unlock(&dentry->d_lock); } /** * d_instantiate - fill in inode information for a dentry * @entry: dentry to complete * @inode: inode to attach to this dentry * * Fill in inode information in the entry. * * This turns negative dentries into productive full members * of society. * * NOTE! This assumes that the inode count has been incremented * (or otherwise set) by the caller to indicate that it is now * in use by the dcache. */ void d_instantiate(struct dentry *entry, struct inode * inode) { BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); if (inode) { security_d_instantiate(entry, inode); spin_lock(&inode->i_lock); __d_instantiate(entry, inode); spin_unlock(&inode->i_lock); } } EXPORT_SYMBOL(d_instantiate); /** * d_instantiate_no_diralias - instantiate a non-aliased dentry * @entry: dentry to complete * @inode: inode to attach to this dentry * * Fill in inode information in the entry. If a directory alias is found, then * return an error (and drop inode). Together with d_materialise_unique() this * guarantees that a directory inode may never have more than one alias. */ int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode) { BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); security_d_instantiate(entry, inode); spin_lock(&inode->i_lock); if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) { spin_unlock(&inode->i_lock); iput(inode); return -EBUSY; } __d_instantiate(entry, inode); spin_unlock(&inode->i_lock); return 0; } EXPORT_SYMBOL(d_instantiate_no_diralias); struct dentry *d_make_root(struct inode *root_inode) { struct dentry *res = NULL; if (root_inode) { res = __d_alloc(root_inode->i_sb, NULL); if (res) d_instantiate(res, root_inode); else iput(root_inode); } return res; } EXPORT_SYMBOL(d_make_root); static struct dentry * __d_find_any_alias(struct inode *inode) { struct dentry *alias; if (hlist_empty(&inode->i_dentry)) return NULL; alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); __dget(alias); return alias; } /** * d_find_any_alias - find any alias for a given inode * @inode: inode to find an alias for * * If any aliases exist for the given inode, take and return a * reference for one of them. If no aliases exist, return %NULL. */ struct dentry *d_find_any_alias(struct inode *inode) { struct dentry *de; spin_lock(&inode->i_lock); de = __d_find_any_alias(inode); spin_unlock(&inode->i_lock); return de; } EXPORT_SYMBOL(d_find_any_alias); static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected) { struct dentry *tmp; struct dentry *res; unsigned add_flags; if (!inode) return ERR_PTR(-ESTALE); if (IS_ERR(inode)) return ERR_CAST(inode); res = d_find_any_alias(inode); if (res) goto out_iput; tmp = __d_alloc(inode->i_sb, NULL); if (!tmp) { res = ERR_PTR(-ENOMEM); goto out_iput; } security_d_instantiate(tmp, inode); spin_lock(&inode->i_lock); res = __d_find_any_alias(inode); if (res) { spin_unlock(&inode->i_lock); dput(tmp); goto out_iput; } /* attach a disconnected dentry */ add_flags = d_flags_for_inode(inode); if (disconnected) add_flags |= DCACHE_DISCONNECTED; spin_lock(&tmp->d_lock); __d_set_inode_and_type(tmp, inode, add_flags); hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry); hlist_bl_lock(&tmp->d_sb->s_anon); hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); hlist_bl_unlock(&tmp->d_sb->s_anon); spin_unlock(&tmp->d_lock); spin_unlock(&inode->i_lock); return tmp; out_iput: iput(inode); return res; } /** * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode * @inode: inode to allocate the dentry for * * Obtain a dentry for an inode resulting from NFS filehandle conversion or * similar open by handle operations. The returned dentry may be anonymous, * or may have a full name (if the inode was already in the cache). * * When called on a directory inode, we must ensure that the inode only ever * has one dentry. If a dentry is found, that is returned instead of * allocating a new one. * * On successful return, the reference to the inode has been transferred * to the dentry. In case of an error the reference on the inode is released. * To make it easier to use in export operations a %NULL or IS_ERR inode may * be passed in and the error will be propagated to the return value, * with a %NULL @inode replaced by ERR_PTR(-ESTALE). */ struct dentry *d_obtain_alias(struct inode *inode) { return __d_obtain_alias(inode, 1); } EXPORT_SYMBOL(d_obtain_alias); /** * d_obtain_root - find or allocate a dentry for a given inode * @inode: inode to allocate the dentry for * * Obtain an IS_ROOT dentry for the root of a filesystem. * * We must ensure that directory inodes only ever have one dentry. If a * dentry is found, that is returned instead of allocating a new one. * * On successful return, the reference to the inode has been transferred * to the dentry. In case of an error the reference on the inode is * released. A %NULL or IS_ERR inode may be passed in and will be the * error will be propagate to the return value, with a %NULL @inode * replaced by ERR_PTR(-ESTALE). */ struct dentry *d_obtain_root(struct inode *inode) { return __d_obtain_alias(inode, 0); } EXPORT_SYMBOL(d_obtain_root); /** * d_add_ci - lookup or allocate new dentry with case-exact name * @inode: the inode case-insensitive lookup has found * @dentry: the negative dentry that was passed to the parent's lookup func * @name: the case-exact name to be associated with the returned dentry * * This is to avoid filling the dcache with case-insensitive names to the * same inode, only the actual correct case is stored in the dcache for * case-insensitive filesystems. * * For a case-insensitive lookup match and if the the case-exact dentry * already exists in in the dcache, use it and return it. * * If no entry exists with the exact case name, allocate new dentry with * the exact case, and return the spliced entry. */ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, struct qstr *name) { struct dentry *found, *res; /* * First check if a dentry matching the name already exists, * if not go ahead and create it now. */ found = d_hash_and_lookup(dentry->d_parent, name); if (found) { iput(inode); return found; } if (d_in_lookup(dentry)) { found = d_alloc_parallel(dentry->d_parent, name, dentry->d_wait); if (IS_ERR(found) || !d_in_lookup(found)) { iput(inode); return found; } } else { found = d_alloc(dentry->d_parent, name); if (!found) { iput(inode); return ERR_PTR(-ENOMEM); } } res = d_splice_alias(inode, found); if (res) { dput(found); return res; } return found; } EXPORT_SYMBOL(d_add_ci); static inline bool d_same_name(const struct dentry *dentry, const struct dentry *parent, const struct qstr *name) { if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) { if (dentry->d_name.len != name->len) return false; return dentry_cmp(dentry, name->name, name->len) == 0; } return parent->d_op->d_compare(dentry, dentry->d_name.len, dentry->d_name.name, name) == 0; } /** * __d_lookup_rcu - search for a dentry (racy, store-free) * @parent: parent dentry * @name: qstr of name we wish to find * @seqp: returns d_seq value at the point where the dentry was found * Returns: dentry, or NULL * * __d_lookup_rcu is the dcache lookup function for rcu-walk name * resolution (store-free path walking) design described in * Documentation/filesystems/path-lookup.txt. * * This is not to be used outside core vfs. * * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock * held, and rcu_read_lock held. The returned dentry must not be stored into * without taking d_lock and checking d_seq sequence count against @seq * returned here. * * A refcount may be taken on the found dentry with the d_rcu_to_refcount * function. * * Alternatively, __d_lookup_rcu may be called again to look up the child of * the returned dentry, so long as its parent's seqlock is checked after the * child is looked up. Thus, an interlocking stepping of sequence lock checks * is formed, giving integrity down the path walk. * * NOTE! The caller *has* to check the resulting dentry against the sequence * number we've returned before using any of the resulting dentry state! */ struct dentry *__d_lookup_rcu(const struct dentry *parent, const struct qstr *name, unsigned *seqp) { u64 hashlen = name->hash_len; const unsigned char *str = name->name; struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen)); struct hlist_bl_node *node; struct dentry *dentry; /* * Note: There is significant duplication with __d_lookup_rcu which is * required to prevent single threaded performance regressions * especially on architectures where smp_rmb (in seqcounts) are costly. * Keep the two functions in sync. */ /* * The hash list is protected using RCU. * * Carefully use d_seq when comparing a candidate dentry, to avoid * races with d_move(). * * It is possible that concurrent renames can mess up our list * walk here and result in missing our dentry, resulting in the * false-negative result. d_lookup() protects against concurrent * renames using rename_lock seqlock. * * See Documentation/filesystems/path-lookup.txt for more details. */ hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { unsigned seq; seqretry: /* * The dentry sequence count protects us from concurrent * renames, and thus protects parent and name fields. * * The caller must perform a seqcount check in order * to do anything useful with the returned dentry. * * NOTE! We do a "raw" seqcount_begin here. That means that * we don't wait for the sequence count to stabilize if it * is in the middle of a sequence change. If we do the slow * dentry compare, we will do seqretries until it is stable, * and if we end up with a successful lookup, we actually * want to exit RCU lookup anyway. * * Note that raw_seqcount_begin still *does* smp_rmb(), so * we are still guaranteed NUL-termination of ->d_name.name. */ seq = raw_seqcount_begin(&dentry->d_seq); if (dentry->d_parent != parent) continue; if (d_unhashed(dentry)) continue; if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { int tlen; const char *tname; if (dentry->d_name.hash != hashlen_hash(hashlen)) continue; tlen = dentry->d_name.len; tname = dentry->d_name.name; /* we want a consistent (name,len) pair */ if (read_seqcount_retry(&dentry->d_seq, seq)) { cpu_relax(); goto seqretry; } if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0) continue; } else { if (dentry->d_name.hash_len != hashlen) continue; if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0) continue; } *seqp = seq; return dentry; } return NULL; } /** * d_lookup - search for a dentry * @parent: parent dentry * @name: qstr of name we wish to find * Returns: dentry, or NULL * * d_lookup searches the children of the parent dentry for the name in * question. If the dentry is found its reference count is incremented and the * dentry is returned. The caller must use dput to free the entry when it has * finished using it. %NULL is returned if the dentry does not exist. */ struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name) { struct dentry *dentry; unsigned seq; do { seq = read_seqbegin(&rename_lock); dentry = __d_lookup(parent, name); if (dentry) break; } while (read_seqretry(&rename_lock, seq)); return dentry; } EXPORT_SYMBOL(d_lookup); /** * __d_lookup - search for a dentry (racy) * @parent: parent dentry * @name: qstr of name we wish to find * Returns: dentry, or NULL * * __d_lookup is like d_lookup, however it may (rarely) return a * false-negative result due to unrelated rename activity. * * __d_lookup is slightly faster by avoiding rename_lock read seqlock, * however it must be used carefully, eg. with a following d_lookup in * the case of failure. * * __d_lookup callers must be commented. */ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name) { unsigned int hash = name->hash; struct hlist_bl_head *b = d_hash(hash); struct hlist_bl_node *node; struct dentry *found = NULL; struct dentry *dentry; /* * Note: There is significant duplication with __d_lookup_rcu which is * required to prevent single threaded performance regressions * especially on architectures where smp_rmb (in seqcounts) are costly. * Keep the two functions in sync. */ /* * The hash list is protected using RCU. * * Take d_lock when comparing a candidate dentry, to avoid races * with d_move(). * * It is possible that concurrent renames can mess up our list * walk here and result in missing our dentry, resulting in the * false-negative result. d_lookup() protects against concurrent * renames using rename_lock seqlock. * * See Documentation/filesystems/path-lookup.txt for more details. */ rcu_read_lock(); hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { if (dentry->d_name.hash != hash) continue; spin_lock(&dentry->d_lock); if (dentry->d_parent != parent) goto next; if (d_unhashed(dentry)) goto next; if (!d_same_name(dentry, parent, name)) goto next; dentry->d_lockref.count++; found = dentry; spin_unlock(&dentry->d_lock); break; next: spin_unlock(&dentry->d_lock); } rcu_read_unlock(); return found; } /** * d_hash_and_lookup - hash the qstr then search for a dentry * @dir: Directory to search in * @name: qstr of name we wish to find * * On lookup failure NULL is returned; on bad name - ERR_PTR(-error) */ struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) { /* * Check for a fs-specific hash function. Note that we must * calculate the standard hash first, as the d_op->d_hash() * routine may choose to leave the hash value unchanged. */ name->hash = full_name_hash(dir, name->name, name->len); if (dir->d_flags & DCACHE_OP_HASH) { int err = dir->d_op->d_hash(dir, name); if (unlikely(err < 0)) return ERR_PTR(err); } return d_lookup(dir, name); } EXPORT_SYMBOL(d_hash_and_lookup); /* * When a file is deleted, we have two options: * - turn this dentry into a negative dentry * - unhash this dentry and free it. * * Usually, we want to just turn this into * a negative dentry, but if anybody else is * currently using the dentry or the inode * we can't do that and we fall back on removing * it from the hash queues and waiting for * it to be deleted later when it has no users */ /** * d_delete - delete a dentry * @dentry: The dentry to delete * * Turn the dentry into a negative dentry if possible, otherwise * remove it from the hash queues so it can be deleted later */ void d_delete(struct dentry * dentry) { struct inode *inode; int isdir = 0; /* * Are we the only user? */ again: spin_lock(&dentry->d_lock); inode = dentry->d_inode; isdir = S_ISDIR(inode->i_mode); if (dentry->d_lockref.count == 1) { if (!spin_trylock(&inode->i_lock)) { spin_unlock(&dentry->d_lock); cpu_relax(); goto again; } dentry->d_flags &= ~DCACHE_CANT_MOUNT; dentry_unlink_inode(dentry); fsnotify_nameremove(dentry, isdir); return; } if (!d_unhashed(dentry)) __d_drop(dentry); spin_unlock(&dentry->d_lock); fsnotify_nameremove(dentry, isdir); } EXPORT_SYMBOL(d_delete); static void __d_rehash(struct dentry *entry) { struct hlist_bl_head *b = d_hash(entry->d_name.hash); BUG_ON(!d_unhashed(entry)); hlist_bl_lock(b); hlist_bl_add_head_rcu(&entry->d_hash, b); hlist_bl_unlock(b); } /** * d_rehash - add an entry back to the hash * @entry: dentry to add to the hash * * Adds a dentry to the hash according to its name. */ void d_rehash(struct dentry * entry) { spin_lock(&entry->d_lock); __d_rehash(entry); spin_unlock(&entry->d_lock); } EXPORT_SYMBOL(d_rehash); static inline unsigned start_dir_add(struct inode *dir) { for (;;) { unsigned n = dir->i_dir_seq; if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n) return n; cpu_relax(); } } static inline void end_dir_add(struct inode *dir, unsigned n) { smp_store_release(&dir->i_dir_seq, n + 2); } static void d_wait_lookup(struct dentry *dentry) { if (d_in_lookup(dentry)) { DECLARE_WAITQUEUE(wait, current); add_wait_queue(dentry->d_wait, &wait); do { set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock(&dentry->d_lock); schedule(); spin_lock(&dentry->d_lock); } while (d_in_lookup(dentry)); } } struct dentry *d_alloc_parallel(struct dentry *parent, const struct qstr *name, wait_queue_head_t *wq) { unsigned int hash = name->hash; struct hlist_bl_head *b = in_lookup_hash(parent, hash); struct hlist_bl_node *node; struct dentry *new = d_alloc(parent, name); struct dentry *dentry; unsigned seq, r_seq, d_seq; if (unlikely(!new)) return ERR_PTR(-ENOMEM); retry: rcu_read_lock(); seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1; r_seq = read_seqbegin(&rename_lock); dentry = __d_lookup_rcu(parent, name, &d_seq); if (unlikely(dentry)) { if (!lockref_get_not_dead(&dentry->d_lockref)) { rcu_read_unlock(); goto retry; } if (read_seqcount_retry(&dentry->d_seq, d_seq)) { rcu_read_unlock(); dput(dentry); goto retry; } rcu_read_unlock(); dput(new); return dentry; } if (unlikely(read_seqretry(&rename_lock, r_seq))) { rcu_read_unlock(); goto retry; } hlist_bl_lock(b); if (unlikely(parent->d_inode->i_dir_seq != seq)) { hlist_bl_unlock(b); rcu_read_unlock(); goto retry; } /* * No changes for the parent since the beginning of d_lookup(). * Since all removals from the chain happen with hlist_bl_lock(), * any potential in-lookup matches are going to stay here until * we unlock the chain. All fields are stable in everything * we encounter. */ hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) { if (dentry->d_name.hash != hash) continue; if (dentry->d_parent != parent) continue; if (!d_same_name(dentry, parent, name)) continue; hlist_bl_unlock(b); /* now we can try to grab a reference */ if (!lockref_get_not_dead(&dentry->d_lockref)) { rcu_read_unlock(); goto retry; } rcu_read_unlock(); /* * somebody is likely to be still doing lookup for it; * wait for them to finish */ spin_lock(&dentry->d_lock); d_wait_lookup(dentry); /* * it's not in-lookup anymore; in principle we should repeat * everything from dcache lookup, but it's likely to be what * d_lookup() would've found anyway. If it is, just return it; * otherwise we really have to repeat the whole thing. */ if (unlikely(dentry->d_name.hash != hash)) goto mismatch; if (unlikely(dentry->d_parent != parent)) goto mismatch; if (unlikely(d_unhashed(dentry))) goto mismatch; if (unlikely(!d_same_name(dentry, parent, name))) goto mismatch; /* OK, it *is* a hashed match; return it */ spin_unlock(&dentry->d_lock); dput(new); return dentry; } rcu_read_unlock(); /* we can't take ->d_lock here; it's OK, though. */ new->d_flags |= DCACHE_PAR_LOOKUP; new->d_wait = wq; hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b); hlist_bl_unlock(b); return new; mismatch: spin_unlock(&dentry->d_lock); dput(dentry); goto retry; } EXPORT_SYMBOL(d_alloc_parallel); void __d_lookup_done(struct dentry *dentry) { struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash); hlist_bl_lock(b); dentry->d_flags &= ~DCACHE_PAR_LOOKUP; __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); wake_up_all(dentry->d_wait); dentry->d_wait = NULL; hlist_bl_unlock(b); INIT_HLIST_NODE(&dentry->d_u.d_alias); INIT_LIST_HEAD(&dentry->d_lru); } EXPORT_SYMBOL(__d_lookup_done); /* inode->i_lock held if inode is non-NULL */ static inline void __d_add(struct dentry *dentry, struct inode *inode) { struct inode *dir = NULL; unsigned n; spin_lock(&dentry->d_lock); if (unlikely(d_in_lookup(dentry))) { dir = dentry->d_parent->d_inode; n = start_dir_add(dir); __d_lookup_done(dentry); } if (inode) { unsigned add_flags = d_flags_for_inode(inode); hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); raw_write_seqcount_begin(&dentry->d_seq); __d_set_inode_and_type(dentry, inode, add_flags); raw_write_seqcount_end(&dentry->d_seq); fsnotify_update_flags(dentry); } __d_rehash(dentry); if (dir) end_dir_add(dir, n); spin_unlock(&dentry->d_lock); if (inode) spin_unlock(&inode->i_lock); } /** * d_add - add dentry to hash queues * @entry: dentry to add * @inode: The inode to attach to this dentry * * This adds the entry to the hash queues and initializes @inode. * The entry was actually filled in earlier during d_alloc(). */ void d_add(struct dentry *entry, struct inode *inode) { if (inode) { security_d_instantiate(entry, inode); spin_lock(&inode->i_lock); } __d_add(entry, inode); } EXPORT_SYMBOL(d_add); /** * d_exact_alias - find and hash an exact unhashed alias * @entry: dentry to add * @inode: The inode to go with this dentry * * If an unhashed dentry with the same name/parent and desired * inode already exists, hash and return it. Otherwise, return * NULL. * * Parent directory should be locked. */ struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode) { struct dentry *alias; unsigned int hash = entry->d_name.hash; spin_lock(&inode->i_lock); hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { /* * Don't need alias->d_lock here, because aliases with * d_parent == entry->d_parent are not subject to name or * parent changes, because the parent inode i_mutex is held. */ if (alias->d_name.hash != hash) continue; if (alias->d_parent != entry->d_parent) continue; if (!d_same_name(alias, entry->d_parent, &entry->d_name)) continue; spin_lock(&alias->d_lock); if (!d_unhashed(alias)) { spin_unlock(&alias->d_lock); alias = NULL; } else { __dget_dlock(alias); __d_rehash(alias); spin_unlock(&alias->d_lock); } spin_unlock(&inode->i_lock); return alias; } spin_unlock(&inode->i_lock); return NULL; } EXPORT_SYMBOL(d_exact_alias); /** * dentry_update_name_case - update case insensitive dentry with a new name * @dentry: dentry to be updated * @name: new name * * Update a case insensitive dentry with new case of name. * * dentry must have been returned by d_lookup with name @name. Old and new * name lengths must match (ie. no d_compare which allows mismatched name * lengths). * * Parent inode i_mutex must be held over d_lookup and into this call (to * keep renames and concurrent inserts, and readdir(2) away). */ void dentry_update_name_case(struct dentry *dentry, const struct qstr *name) { BUG_ON(!inode_is_locked(dentry->d_parent->d_inode)); BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ spin_lock(&dentry->d_lock); write_seqcount_begin(&dentry->d_seq); memcpy((unsigned char *)dentry->d_name.name, name->name, name->len); write_seqcount_end(&dentry->d_seq); spin_unlock(&dentry->d_lock); } EXPORT_SYMBOL(dentry_update_name_case); static void swap_names(struct dentry *dentry, struct dentry *target) { if (unlikely(dname_external(target))) { if (unlikely(dname_external(dentry))) { /* * Both external: swap the pointers */ swap(target->d_name.name, dentry->d_name.name); } else { /* * dentry:internal, target:external. Steal target's * storage and make target internal. */ memcpy(target->d_iname, dentry->d_name.name, dentry->d_name.len + 1); dentry->d_name.name = target->d_name.name; target->d_name.name = target->d_iname; } } else { if (unlikely(dname_external(dentry))) { /* * dentry:external, target:internal. Give dentry's * storage to target and make dentry internal */ memcpy(dentry->d_iname, target->d_name.name, target->d_name.len + 1); target->d_name.name = dentry->d_name.name; dentry->d_name.name = dentry->d_iname; } else { /* * Both are internal. */ unsigned int i; BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long))); kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN); kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN); for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) { swap(((long *) &dentry->d_iname)[i], ((long *) &target->d_iname)[i]); } } } swap(dentry->d_name.hash_len, target->d_name.hash_len); } static void copy_name(struct dentry *dentry, struct dentry *target) { struct external_name *old_name = NULL; if (unlikely(dname_external(dentry))) old_name = external_name(dentry); if (unlikely(dname_external(target))) { atomic_inc(&external_name(target)->u.count); dentry->d_name = target->d_name; } else { memcpy(dentry->d_iname, target->d_name.name, target->d_name.len + 1); dentry->d_name.name = dentry->d_iname; dentry->d_name.hash_len = target->d_name.hash_len; } if (old_name && likely(atomic_dec_and_test(&old_name->u.count))) kfree_rcu(old_name, u.head); } static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) { /* * XXXX: do we really need to take target->d_lock? */ if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent) spin_lock(&target->d_parent->d_lock); else { if (d_ancestor(dentry->d_parent, target->d_parent)) { spin_lock(&dentry->d_parent->d_lock); spin_lock_nested(&target->d_parent->d_lock, DENTRY_D_LOCK_NESTED); } else { spin_lock(&target->d_parent->d_lock); spin_lock_nested(&dentry->d_parent->d_lock, DENTRY_D_LOCK_NESTED); } } if (target < dentry) { spin_lock_nested(&target->d_lock, 2); spin_lock_nested(&dentry->d_lock, 3); } else { spin_lock_nested(&dentry->d_lock, 2); spin_lock_nested(&target->d_lock, 3); } } static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target) { if (target->d_parent != dentry->d_parent) spin_unlock(&dentry->d_parent->d_lock); if (target->d_parent != target) spin_unlock(&target->d_parent->d_lock); spin_unlock(&target->d_lock); spin_unlock(&dentry->d_lock); } /* * When switching names, the actual string doesn't strictly have to * be preserved in the target - because we're dropping the target * anyway. As such, we can just do a simple memcpy() to copy over * the new name before we switch, unless we are going to rehash * it. Note that if we *do* unhash the target, we are not allowed * to rehash it without giving it a new name/hash key - whether * we swap or overwrite the names here, resulting name won't match * the reality in filesystem; it's only there for d_path() purposes. * Note that all of this is happening under rename_lock, so the * any hash lookup seeing it in the middle of manipulations will * be discarded anyway. So we do not care what happens to the hash * key in that case. */ /* * __d_move - move a dentry * @dentry: entry to move * @target: new dentry * @exchange: exchange the two dentries * * Update the dcache to reflect the move of a file name. Negative * dcache entries should not be moved in this way. Caller must hold * rename_lock, the i_mutex of the source and target directories, * and the sb->s_vfs_rename_mutex if they differ. See lock_rename(). */ static void __d_move(struct dentry *dentry, struct dentry *target, bool exchange) { struct inode *dir = NULL; unsigned n; if (!dentry->d_inode) printk(KERN_WARNING "VFS: moving negative dcache entry\n"); BUG_ON(d_ancestor(dentry, target)); BUG_ON(d_ancestor(target, dentry)); dentry_lock_for_move(dentry, target); if (unlikely(d_in_lookup(target))) { dir = target->d_parent->d_inode; n = start_dir_add(dir); __d_lookup_done(target); } write_seqcount_begin(&dentry->d_seq); write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED); /* unhash both */ /* __d_drop does write_seqcount_barrier, but they're OK to nest. */ __d_drop(dentry); __d_drop(target); /* Switch the names.. */ if (exchange) swap_names(dentry, target); else copy_name(dentry, target); /* rehash in new place(s) */ __d_rehash(dentry); if (exchange) __d_rehash(target); /* ... and switch them in the tree */ if (IS_ROOT(dentry)) { /* splicing a tree */ dentry->d_flags |= DCACHE_RCUACCESS; dentry->d_parent = target->d_parent; target->d_parent = target; list_del_init(&target->d_child); list_move(&dentry->d_child, &dentry->d_parent->d_subdirs); } else { /* swapping two dentries */ swap(dentry->d_parent, target->d_parent); list_move(&target->d_child, &target->d_parent->d_subdirs); list_move(&dentry->d_child, &dentry->d_parent->d_subdirs); if (exchange) fsnotify_update_flags(target); fsnotify_update_flags(dentry); } write_seqcount_end(&target->d_seq); write_seqcount_end(&dentry->d_seq); if (dir) end_dir_add(dir, n); dentry_unlock_for_move(dentry, target); } /* * d_move - move a dentry * @dentry: entry to move * @target: new dentry * * Update the dcache to reflect the move of a file name. Negative * dcache entries should not be moved in this way. See the locking * requirements for __d_move. */ void d_move(struct dentry *dentry, struct dentry *target) { write_seqlock(&rename_lock); __d_move(dentry, target, false); write_sequnlock(&rename_lock); } EXPORT_SYMBOL(d_move); /* * d_exchange - exchange two dentries * @dentry1: first dentry * @dentry2: second dentry */ void d_exchange(struct dentry *dentry1, struct dentry *dentry2) { write_seqlock(&rename_lock); WARN_ON(!dentry1->d_inode); WARN_ON(!dentry2->d_inode); WARN_ON(IS_ROOT(dentry1)); WARN_ON(IS_ROOT(dentry2)); __d_move(dentry1, dentry2, true); write_sequnlock(&rename_lock); } /** * d_ancestor - search for an ancestor * @p1: ancestor dentry * @p2: child dentry * * Returns the ancestor dentry of p2 which is a child of p1, if p1 is * an ancestor of p2, else NULL. */ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) { struct dentry *p; for (p = p2; !IS_ROOT(p); p = p->d_parent) { if (p->d_parent == p1) return p; } return NULL; } /* * This helper attempts to cope with remotely renamed directories * * It assumes that the caller is already holding * dentry->d_parent->d_inode->i_mutex, and rename_lock * * Note: If ever the locking in lock_rename() changes, then please * remember to update this too... */ static int __d_unalias(struct inode *inode, struct dentry *dentry, struct dentry *alias) { struct mutex *m1 = NULL; struct rw_semaphore *m2 = NULL; int ret = -ESTALE; /* If alias and dentry share a parent, then no extra locks required */ if (alias->d_parent == dentry->d_parent) goto out_unalias; /* See lock_rename() */ if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) goto out_err; m1 = &dentry->d_sb->s_vfs_rename_mutex; if (!inode_trylock_shared(alias->d_parent->d_inode)) goto out_err; m2 = &alias->d_parent->d_inode->i_rwsem; out_unalias: __d_move(alias, dentry, false); ret = 0; out_err: if (m2) up_read(m2); if (m1) mutex_unlock(m1); return ret; } /** * d_splice_alias - splice a disconnected dentry into the tree if one exists * @inode: the inode which may have a disconnected dentry * @dentry: a negative dentry which we want to point to the inode. * * If inode is a directory and has an IS_ROOT alias, then d_move that in * place of the given dentry and return it, else simply d_add the inode * to the dentry and return NULL. * * If a non-IS_ROOT directory is found, the filesystem is corrupt, and * we should error out: directories can't have multiple aliases. * * This is needed in the lookup routine of any filesystem that is exportable * (via knfsd) so that we can build dcache paths to directories effectively. * * If a dentry was found and moved, then it is returned. Otherwise NULL * is returned. This matches the expected return value of ->lookup. * * Cluster filesystems may call this function with a negative, hashed dentry. * In that case, we know that the inode will be a regular file, and also this * will only occur during atomic_open. So we need to check for the dentry * being already hashed only in the final case. */ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) { if (IS_ERR(inode)) return ERR_CAST(inode); BUG_ON(!d_unhashed(dentry)); if (!inode) goto out; security_d_instantiate(dentry, inode); spin_lock(&inode->i_lock); if (S_ISDIR(inode->i_mode)) { struct dentry *new = __d_find_any_alias(inode); if (unlikely(new)) { /* The reference to new ensures it remains an alias */ spin_unlock(&inode->i_lock); write_seqlock(&rename_lock); if (unlikely(d_ancestor(new, dentry))) { write_sequnlock(&rename_lock); dput(new); new = ERR_PTR(-ELOOP); pr_warn_ratelimited( "VFS: Lookup of '%s' in %s %s" " would have caused loop\n", dentry->d_name.name, inode->i_sb->s_type->name, inode->i_sb->s_id); } else if (!IS_ROOT(new)) { int err = __d_unalias(inode, dentry, new); write_sequnlock(&rename_lock); if (err) { dput(new); new = ERR_PTR(err); } } else { __d_move(new, dentry, false); write_sequnlock(&rename_lock); } iput(inode); return new; } } out: __d_add(dentry, inode); return NULL; } EXPORT_SYMBOL(d_splice_alias); static int prepend(char **buffer, int *buflen, const char *str, int namelen) { *buflen -= namelen; if (*buflen < 0) return -ENAMETOOLONG; *buffer -= namelen; memcpy(*buffer, str, namelen); return 0; } /** * prepend_name - prepend a pathname in front of current buffer pointer * @buffer: buffer pointer * @buflen: allocated length of the buffer * @name: name string and length qstr structure * * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to * make sure that either the old or the new name pointer and length are * fetched. However, there may be mismatch between length and pointer. * The length cannot be trusted, we need to copy it byte-by-byte until * the length is reached or a null byte is found. It also prepends "/" at * the beginning of the name. The sequence number check at the caller will * retry it again when a d_move() does happen. So any garbage in the buffer * due to mismatched pointer and length will be discarded. * * Data dependency barrier is needed to make sure that we see that terminating * NUL. Alpha strikes again, film at 11... */ static int prepend_name(char **buffer, int *buflen, const struct qstr *name) { const char *dname = ACCESS_ONCE(name->name); u32 dlen = ACCESS_ONCE(name->len); char *p; smp_read_barrier_depends(); *buflen -= dlen + 1; if (*buflen < 0) return -ENAMETOOLONG; p = *buffer -= dlen + 1; *p++ = '/'; while (dlen--) { char c = *dname++; if (!c) break; *p++ = c; } return 0; } /** * prepend_path - Prepend path string to a buffer * @path: the dentry/vfsmount to report * @root: root vfsmnt/dentry * @buffer: pointer to the end of the buffer * @buflen: pointer to buffer length * * The function will first try to write out the pathname without taking any * lock other than the RCU read lock to make sure that dentries won't go away. * It only checks the sequence number of the global rename_lock as any change * in the dentry's d_seq will be preceded by changes in the rename_lock * sequence number. If the sequence number had been changed, it will restart * the whole pathname back-tracing sequence again by taking the rename_lock. * In this case, there is no need to take the RCU read lock as the recursive * parent pointer references will keep the dentry chain alive as long as no * rename operation is performed. */ static int prepend_path(const struct path *path, const struct path *root, char **buffer, int *buflen) { struct dentry *dentry; struct vfsmount *vfsmnt; struct mount *mnt; int error = 0; unsigned seq, m_seq = 0; char *bptr; int blen; rcu_read_lock(); restart_mnt: read_seqbegin_or_lock(&mount_lock, &m_seq); seq = 0; rcu_read_lock(); restart: bptr = *buffer; blen = *buflen; error = 0; dentry = path->dentry; vfsmnt = path->mnt; mnt = real_mount(vfsmnt); read_seqbegin_or_lock(&rename_lock, &seq); while (dentry != root->dentry || vfsmnt != root->mnt) { struct dentry * parent; if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { struct mount *parent = ACCESS_ONCE(mnt->mnt_parent); /* Escaped? */ if (dentry != vfsmnt->mnt_root) { bptr = *buffer; blen = *buflen; error = 3; break; } /* Global root? */ if (mnt != parent) { dentry = ACCESS_ONCE(mnt->mnt_mountpoint); mnt = parent; vfsmnt = &mnt->mnt; continue; } if (!error) error = is_mounted(vfsmnt) ? 1 : 2; break; } parent = dentry->d_parent; prefetch(parent); error = prepend_name(&bptr, &blen, &dentry->d_name); if (error) break; dentry = parent; } if (!(seq & 1)) rcu_read_unlock(); if (need_seqretry(&rename_lock, seq)) { seq = 1; goto restart; } done_seqretry(&rename_lock, seq); if (!(m_seq & 1)) rcu_read_unlock(); if (need_seqretry(&mount_lock, m_seq)) { m_seq = 1; goto restart_mnt; } done_seqretry(&mount_lock, m_seq); if (error >= 0 && bptr == *buffer) { if (--blen < 0) error = -ENAMETOOLONG; else *--bptr = '/'; } *buffer = bptr; *buflen = blen; return error; } /** * __d_path - return the path of a dentry * @path: the dentry/vfsmount to report * @root: root vfsmnt/dentry * @buf: buffer to return value in * @buflen: buffer length * * Convert a dentry into an ASCII path name. * * Returns a pointer into the buffer or an error code if the * path was too long. * * "buflen" should be positive. * * If the path is not reachable from the supplied root, return %NULL. */ char *__d_path(const struct path *path, const struct path *root, char *buf, int buflen) { char *res = buf + buflen; int error; prepend(&res, &buflen, "\0", 1); error = prepend_path(path, root, &res, &buflen); if (error < 0) return ERR_PTR(error); if (error > 0) return NULL; return res; } char *d_absolute_path(const struct path *path, char *buf, int buflen) { struct path root = {}; char *res = buf + buflen; int error; prepend(&res, &buflen, "\0", 1); error = prepend_path(path, &root, &res, &buflen); if (error > 1) error = -EINVAL; if (error < 0) return ERR_PTR(error); return res; } /* * same as __d_path but appends "(deleted)" for unlinked files. */ static int path_with_deleted(const struct path *path, const struct path *root, char **buf, int *buflen) { prepend(buf, buflen, "\0", 1); if (d_unlinked(path->dentry)) { int error = prepend(buf, buflen, " (deleted)", 10); if (error) return error; } return prepend_path(path, root, buf, buflen); } static int prepend_unreachable(char **buffer, int *buflen) { return prepend(buffer, buflen, "(unreachable)", 13); } static void get_fs_root_rcu(struct fs_struct *fs, struct path *root) { unsigned seq; do { seq = read_seqcount_begin(&fs->seq); *root = fs->root; } while (read_seqcount_retry(&fs->seq, seq)); } /** * d_path - return the path of a dentry * @path: path to report * @buf: buffer to return value in * @buflen: buffer length * * Convert a dentry into an ASCII path name. If the entry has been deleted * the string " (deleted)" is appended. Note that this is ambiguous. * * Returns a pointer into the buffer or an error code if the path was * too long. Note: Callers should use the returned pointer, not the passed * in buffer, to use the name! The implementation often starts at an offset * into the buffer, and may leave 0 bytes at the start. * * "buflen" should be positive. */ char *d_path(const struct path *path, char *buf, int buflen) { char *res = buf + buflen; struct path root; int error; /* * We have various synthetic filesystems that never get mounted. On * these filesystems dentries are never used for lookup purposes, and * thus don't need to be hashed. They also don't need a name until a * user wants to identify the object in /proc/pid/fd/. The little hack * below allows us to generate a name for these objects on demand: * * Some pseudo inodes are mountable. When they are mounted * path->dentry == path->mnt->mnt_root. In that case don't call d_dname * and instead have d_path return the mounted path. */ if (path->dentry->d_op && path->dentry->d_op->d_dname && (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root)) return path->dentry->d_op->d_dname(path->dentry, buf, buflen); rcu_read_lock(); get_fs_root_rcu(current->fs, &root); error = path_with_deleted(path, &root, &res, &buflen); rcu_read_unlock(); if (error < 0) res = ERR_PTR(error); return res; } EXPORT_SYMBOL(d_path); /* * Helper function for dentry_operations.d_dname() members */ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, const char *fmt, ...) { va_list args; char temp[64]; int sz; va_start(args, fmt); sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; va_end(args); if (sz > sizeof(temp) || sz > buflen) return ERR_PTR(-ENAMETOOLONG); buffer += buflen - sz; return memcpy(buffer, temp, sz); } char *simple_dname(struct dentry *dentry, char *buffer, int buflen) { char *end = buffer + buflen; /* these dentries are never renamed, so d_lock is not needed */ if (prepend(&end, &buflen, " (deleted)", 11) || prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) || prepend(&end, &buflen, "/", 1)) end = ERR_PTR(-ENAMETOOLONG); return end; } EXPORT_SYMBOL(simple_dname); /* * Write full pathname from the root of the filesystem into the buffer. */ static char *__dentry_path(struct dentry *d, char *buf, int buflen) { struct dentry *dentry; char *end, *retval; int len, seq = 0; int error = 0; if (buflen < 2) goto Elong; rcu_read_lock(); restart: dentry = d; end = buf + buflen; len = buflen; prepend(&end, &len, "\0", 1); /* Get '/' right */ retval = end-1; *retval = '/'; read_seqbegin_or_lock(&rename_lock, &seq); while (!IS_ROOT(dentry)) { struct dentry *parent = dentry->d_parent; prefetch(parent); error = prepend_name(&end, &len, &dentry->d_name); if (error) break; retval = end; dentry = parent; } if (!(seq & 1)) rcu_read_unlock(); if (need_seqretry(&rename_lock, seq)) { seq = 1; goto restart; } done_seqretry(&rename_lock, seq); if (error) goto Elong; return retval; Elong: return ERR_PTR(-ENAMETOOLONG); } char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen) { return __dentry_path(dentry, buf, buflen); } EXPORT_SYMBOL(dentry_path_raw); char *dentry_path(struct dentry *dentry, char *buf, int buflen) { char *p = NULL; char *retval; if (d_unlinked(dentry)) { p = buf + buflen; if (prepend(&p, &buflen, "//deleted", 10) != 0) goto Elong; buflen++; } retval = __dentry_path(dentry, buf, buflen); if (!IS_ERR(retval) && p) *p = '/'; /* restore '/' overriden with '\0' */ return retval; Elong: return ERR_PTR(-ENAMETOOLONG); } static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root, struct path *pwd) { unsigned seq; do { seq = read_seqcount_begin(&fs->seq); *root = fs->root; *pwd = fs->pwd; } while (read_seqcount_retry(&fs->seq, seq)); } /* * NOTE! The user-level library version returns a * character pointer. The kernel system call just * returns the length of the buffer filled (which * includes the ending '\0' character), or a negative * error value. So libc would do something like * * char *getcwd(char * buf, size_t size) * { * int retval; * * retval = sys_getcwd(buf, size); * if (retval >= 0) * return buf; * errno = -retval; * return NULL; * } */ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) { int error; struct path pwd, root; char *page = __getname(); if (!page) return -ENOMEM; rcu_read_lock(); get_fs_root_and_pwd_rcu(current->fs, &root, &pwd); error = -ENOENT; if (!d_unlinked(pwd.dentry)) { unsigned long len; char *cwd = page + PATH_MAX; int buflen = PATH_MAX; prepend(&cwd, &buflen, "\0", 1); error = prepend_path(&pwd, &root, &cwd, &buflen); rcu_read_unlock(); if (error < 0) goto out; /* Unreachable from current root */ if (error > 0) { error = prepend_unreachable(&cwd, &buflen); if (error) goto out; } error = -ERANGE; len = PATH_MAX + page - cwd; if (len <= size) { error = len; if (copy_to_user(buf, cwd, len)) error = -EFAULT; } } else { rcu_read_unlock(); } out: __putname(page); return error; } /* * Test whether new_dentry is a subdirectory of old_dentry. * * Trivially implemented using the dcache structure */ /** * is_subdir - is new dentry a subdirectory of old_dentry * @new_dentry: new dentry * @old_dentry: old dentry * * Returns true if new_dentry is a subdirectory of the parent (at any depth). * Returns false otherwise. * Caller must ensure that "new_dentry" is pinned before calling is_subdir() */ bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) { bool result; unsigned seq; if (new_dentry == old_dentry) return true; do { /* for restarting inner loop in case of seq retry */ seq = read_seqbegin(&rename_lock); /* * Need rcu_readlock to protect against the d_parent trashing * due to d_move */ rcu_read_lock(); if (d_ancestor(old_dentry, new_dentry)) result = true; else result = false; rcu_read_unlock(); } while (read_seqretry(&rename_lock, seq)); return result; } static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry) { struct dentry *root = data; if (dentry != root) { if (d_unhashed(dentry) || !dentry->d_inode) return D_WALK_SKIP; if (!(dentry->d_flags & DCACHE_GENOCIDE)) { dentry->d_flags |= DCACHE_GENOCIDE; dentry->d_lockref.count--; } } return D_WALK_CONTINUE; } void d_genocide(struct dentry *parent) { d_walk(parent, parent, d_genocide_kill, NULL); } void d_tmpfile(struct dentry *dentry, struct inode *inode) { inode_dec_link_count(inode); BUG_ON(dentry->d_name.name != dentry->d_iname || !hlist_unhashed(&dentry->d_u.d_alias) || !d_unlinked(dentry)); spin_lock(&dentry->d_parent->d_lock); spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); dentry->d_name.len = sprintf(dentry->d_iname, "#%llu", (unsigned long long)inode->i_ino); spin_unlock(&dentry->d_lock); spin_unlock(&dentry->d_parent->d_lock); d_instantiate(dentry, inode); } EXPORT_SYMBOL(d_tmpfile); static __initdata unsigned long dhash_entries; static int __init set_dhash_entries(char *str) { if (!str) return 0; dhash_entries = simple_strtoul(str, &str, 0); return 1; } __setup("dhash_entries=", set_dhash_entries); static void __init dcache_init_early(void) { unsigned int loop; /* If hashes are distributed across NUMA nodes, defer * hash allocation until vmalloc space is available. */ if (hashdist) return; dentry_hashtable = alloc_large_system_hash("Dentry cache", sizeof(struct hlist_bl_head), dhash_entries, 13, HASH_EARLY, &d_hash_shift, &d_hash_mask, 0, 0); for (loop = 0; loop < (1U << d_hash_shift); loop++) INIT_HLIST_BL_HEAD(dentry_hashtable + loop); } static void __init dcache_init(void) { unsigned int loop; /* * A constructor could be added for stable state like the lists, * but it is probably not worth it because of the cache nature * of the dcache. */ dentry_cache = KMEM_CACHE(dentry, SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT); /* Hash may have been set up in dcache_init_early */ if (!hashdist) return; dentry_hashtable = alloc_large_system_hash("Dentry cache", sizeof(struct hlist_bl_head), dhash_entries, 13, 0, &d_hash_shift, &d_hash_mask, 0, 0); for (loop = 0; loop < (1U << d_hash_shift); loop++) INIT_HLIST_BL_HEAD(dentry_hashtable + loop); } /* SLAB cache for __getname() consumers */ struct kmem_cache *names_cachep __read_mostly; EXPORT_SYMBOL(names_cachep); EXPORT_SYMBOL(d_genocide); void __init vfs_caches_init_early(void) { int i; for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++) INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]); dcache_init_early(); inode_init_early(); } void __init vfs_caches_init(void) { names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); dcache_init(); inode_init(); files_init(); files_maxfiles_init(); mnt_init(); bdev_cache_init(); chrdev_init(); }
./CrossVul/dataset_final_sorted/CWE-362/c/good_3275_0
crossvul-cpp_data_good_3430_0
/* * icall.c: * * Authors: * Dietmar Maurer (dietmar@ximian.com) * Paolo Molaro (lupus@ximian.com) * Patrik Torstensson (patrik.torstensson@labs2.com) * * Copyright 2001-2003 Ximian, Inc (http://www.ximian.com) * Copyright 2004-2009 Novell, Inc (http://www.novell.com) */ #include <config.h> #include <glib.h> #include <stdarg.h> #include <string.h> #include <ctype.h> #ifdef HAVE_ALLOCA_H #include <alloca.h> #endif #ifdef HAVE_SYS_TIME_H #include <sys/time.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #if defined (PLATFORM_WIN32) #include <stdlib.h> #endif #include "mono/utils/mono-membar.h" #include <mono/metadata/object.h> #include <mono/metadata/threads.h> #include <mono/metadata/threads-types.h> #include <mono/metadata/threadpool.h> #include <mono/metadata/monitor.h> #include <mono/metadata/reflection.h> #include <mono/metadata/assembly.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/exception.h> #include <mono/metadata/file-io.h> #include <mono/metadata/console-io.h> #include <mono/metadata/socket-io.h> #include <mono/metadata/mono-endian.h> #include <mono/metadata/tokentype.h> #include <mono/metadata/domain-internals.h> #include <mono/metadata/metadata-internals.h> #include <mono/metadata/class-internals.h> #include <mono/metadata/marshal.h> #include <mono/metadata/gc-internal.h> #include <mono/metadata/mono-gc.h> #include <mono/metadata/rand.h> #include <mono/metadata/sysmath.h> #include <mono/metadata/string-icalls.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/process.h> #include <mono/metadata/environment.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/locales.h> #include <mono/metadata/filewatcher.h> #include <mono/metadata/char-conversions.h> #include <mono/metadata/security.h> #include <mono/metadata/mono-config.h> #include <mono/metadata/cil-coff.h> #include <mono/metadata/number-formatter.h> #include <mono/metadata/security-manager.h> #include <mono/metadata/security-core-clr.h> #include <mono/metadata/mono-perfcounters.h> #include <mono/metadata/mono-debug.h> #include <mono/metadata/verify-internals.h> #include <mono/io-layer/io-layer.h> #include <mono/utils/strtod.h> #include <mono/utils/monobitset.h> #include <mono/utils/mono-time.h> #include <mono/utils/mono-proclib.h> #include <mono/utils/mono-string.h> #include <mono/utils/mono-error-internals.h> #if defined (PLATFORM_WIN32) #include <windows.h> #include <shlobj.h> #endif #include "decimal.h" static MonoReflectionAssembly* ves_icall_System_Reflection_Assembly_GetCallingAssembly (void); static MonoArray* type_array_from_modifiers (MonoImage *image, MonoType *type, int optional); /* This is an implementation of a growable pointer array that avoids doing memory allocations for small sizes. * It works by allocating an initial small array on stack and only going to malloc'd memory if needed. */ typedef struct { void **data; int size; int capacity; } MonoPtrArray; #define MONO_PTR_ARRAY_MAX_ON_STACK (16) #define mono_ptr_array_init(ARRAY, INITIAL_SIZE) do {\ (ARRAY).size = 0; \ (ARRAY).capacity = MAX (INITIAL_SIZE, MONO_PTR_ARRAY_MAX_ON_STACK); \ (ARRAY).data = INITIAL_SIZE > MONO_PTR_ARRAY_MAX_ON_STACK ? mono_gc_alloc_fixed (sizeof (void*) * INITIAL_SIZE, NULL) : g_newa (void*, MONO_PTR_ARRAY_MAX_ON_STACK); \ } while (0) #define mono_ptr_array_destroy(ARRAY) do {\ if ((ARRAY).capacity > MONO_PTR_ARRAY_MAX_ON_STACK) \ mono_gc_free_fixed ((ARRAY).data); \ } while (0) #define mono_ptr_array_append(ARRAY, VALUE) do { \ if ((ARRAY).size >= (ARRAY).capacity) {\ void *__tmp = mono_gc_alloc_fixed (sizeof (void*) * (ARRAY).capacity * 2, NULL); \ memcpy (__tmp, (ARRAY).data, (ARRAY).capacity * sizeof (void*)); \ if ((ARRAY).capacity > MONO_PTR_ARRAY_MAX_ON_STACK) \ mono_gc_free_fixed ((ARRAY).data); \ (ARRAY).data = __tmp; \ (ARRAY).capacity *= 2;\ }\ ((ARRAY).data [(ARRAY).size++] = VALUE); \ } while (0) #define mono_ptr_array_set(ARRAY, IDX, VALUE) do { \ ((ARRAY).data [(IDX)] = VALUE); \ } while (0) #define mono_ptr_array_get(ARRAY, IDX) ((ARRAY).data [(IDX)]) #define mono_ptr_array_size(ARRAY) ((ARRAY).size) static inline MonoBoolean is_generic_parameter (MonoType *type) { return !type->byref && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR); } /* * We expect a pointer to a char, not a string */ static gboolean mono_double_ParseImpl (char *ptr, double *result) { gchar *endptr = NULL; *result = 0.0; MONO_ARCH_SAVE_REGS; #ifdef __arm__ if (*ptr) *result = strtod (ptr, &endptr); #else if (*ptr){ #ifdef _EGLIB_MAJOR /* Need to lock here because EGLIB (#464316) has locking defined as no-ops, and that breaks mono_strtod */ EnterCriticalSection (&mono_strtod_mutex); *result = mono_strtod (ptr, &endptr); LeaveCriticalSection (&mono_strtod_mutex); #else *result = mono_strtod (ptr, &endptr); #endif } #endif if (!*ptr || (endptr && *endptr)) return FALSE; return TRUE; } static MonoObject * ves_icall_System_Array_GetValueImpl (MonoObject *this, guint32 pos) { MonoClass *ac; MonoArray *ao; gint32 esize; gpointer *ea; MONO_ARCH_SAVE_REGS; ao = (MonoArray *)this; ac = (MonoClass *)ao->obj.vtable->klass; esize = mono_array_element_size (ac); ea = (gpointer*)((char*)ao->vector + (pos * esize)); if (ac->element_class->valuetype) return mono_value_box (this->vtable->domain, ac->element_class, ea); else return *ea; } static MonoObject * ves_icall_System_Array_GetValue (MonoObject *this, MonoObject *idxs) { MonoClass *ac, *ic; MonoArray *ao, *io; gint32 i, pos, *ind; MONO_ARCH_SAVE_REGS; MONO_CHECK_ARG_NULL (idxs); io = (MonoArray *)idxs; ic = (MonoClass *)io->obj.vtable->klass; ao = (MonoArray *)this; ac = (MonoClass *)ao->obj.vtable->klass; g_assert (ic->rank == 1); if (io->bounds != NULL || io->max_length != ac->rank) mono_raise_exception (mono_get_exception_argument (NULL, NULL)); ind = (gint32 *)io->vector; if (ao->bounds == NULL) { if (*ind < 0 || *ind >= ao->max_length) mono_raise_exception (mono_get_exception_index_out_of_range ()); return ves_icall_System_Array_GetValueImpl (this, *ind); } for (i = 0; i < ac->rank; i++) if ((ind [i] < ao->bounds [i].lower_bound) || (ind [i] >= (mono_array_lower_bound_t)ao->bounds [i].length + ao->bounds [i].lower_bound)) mono_raise_exception (mono_get_exception_index_out_of_range ()); pos = ind [0] - ao->bounds [0].lower_bound; for (i = 1; i < ac->rank; i++) pos = pos*ao->bounds [i].length + ind [i] - ao->bounds [i].lower_bound; return ves_icall_System_Array_GetValueImpl (this, pos); } static void ves_icall_System_Array_SetValueImpl (MonoArray *this, MonoObject *value, guint32 pos) { MonoClass *ac, *vc, *ec; gint32 esize, vsize; gpointer *ea, *va; int et, vt; guint64 u64 = 0; gint64 i64 = 0; gdouble r64 = 0; MONO_ARCH_SAVE_REGS; if (value) vc = value->vtable->klass; else vc = NULL; ac = this->obj.vtable->klass; ec = ac->element_class; esize = mono_array_element_size (ac); ea = (gpointer*)((char*)this->vector + (pos * esize)); va = (gpointer*)((char*)value + sizeof (MonoObject)); if (mono_class_is_nullable (ec)) { mono_nullable_init ((guint8*)ea, value, ec); return; } if (!value) { memset (ea, 0, esize); return; } #define NO_WIDENING_CONVERSION G_STMT_START{\ mono_raise_exception (mono_get_exception_argument ( \ "value", "not a widening conversion")); \ }G_STMT_END #define CHECK_WIDENING_CONVERSION(extra) G_STMT_START{\ if (esize < vsize + (extra)) \ mono_raise_exception (mono_get_exception_argument ( \ "value", "not a widening conversion")); \ }G_STMT_END #define INVALID_CAST G_STMT_START{\ mono_raise_exception (mono_get_exception_invalid_cast ()); \ }G_STMT_END /* Check element (destination) type. */ switch (ec->byval_arg.type) { case MONO_TYPE_STRING: switch (vc->byval_arg.type) { case MONO_TYPE_STRING: break; default: INVALID_CAST; } break; case MONO_TYPE_BOOLEAN: switch (vc->byval_arg.type) { case MONO_TYPE_BOOLEAN: break; case MONO_TYPE_CHAR: case MONO_TYPE_U1: case MONO_TYPE_U2: case MONO_TYPE_U4: case MONO_TYPE_U8: case MONO_TYPE_I1: case MONO_TYPE_I2: case MONO_TYPE_I4: case MONO_TYPE_I8: case MONO_TYPE_R4: case MONO_TYPE_R8: NO_WIDENING_CONVERSION; default: INVALID_CAST; } break; } if (!ec->valuetype) { if (!mono_object_isinst (value, ec)) INVALID_CAST; mono_gc_wbarrier_set_arrayref (this, ea, (MonoObject*)value); return; } if (mono_object_isinst (value, ec)) { if (ec->has_references) mono_value_copy (ea, (char*)value + sizeof (MonoObject), ec); else memcpy (ea, (char *)value + sizeof (MonoObject), esize); return; } if (!vc->valuetype) INVALID_CAST; vsize = mono_class_instance_size (vc) - sizeof (MonoObject); et = ec->byval_arg.type; if (et == MONO_TYPE_VALUETYPE && ec->byval_arg.data.klass->enumtype) et = mono_class_enum_basetype (ec->byval_arg.data.klass)->type; vt = vc->byval_arg.type; if (vt == MONO_TYPE_VALUETYPE && vc->byval_arg.data.klass->enumtype) vt = mono_class_enum_basetype (vc->byval_arg.data.klass)->type; #define ASSIGN_UNSIGNED(etype) G_STMT_START{\ switch (vt) { \ case MONO_TYPE_U1: \ case MONO_TYPE_U2: \ case MONO_TYPE_U4: \ case MONO_TYPE_U8: \ case MONO_TYPE_CHAR: \ CHECK_WIDENING_CONVERSION(0); \ *(etype *) ea = (etype) u64; \ return; \ /* You can't assign a signed value to an unsigned array. */ \ case MONO_TYPE_I1: \ case MONO_TYPE_I2: \ case MONO_TYPE_I4: \ case MONO_TYPE_I8: \ /* You can't assign a floating point number to an integer array. */ \ case MONO_TYPE_R4: \ case MONO_TYPE_R8: \ NO_WIDENING_CONVERSION; \ } \ }G_STMT_END #define ASSIGN_SIGNED(etype) G_STMT_START{\ switch (vt) { \ case MONO_TYPE_I1: \ case MONO_TYPE_I2: \ case MONO_TYPE_I4: \ case MONO_TYPE_I8: \ CHECK_WIDENING_CONVERSION(0); \ *(etype *) ea = (etype) i64; \ return; \ /* You can assign an unsigned value to a signed array if the array's */ \ /* element size is larger than the value size. */ \ case MONO_TYPE_U1: \ case MONO_TYPE_U2: \ case MONO_TYPE_U4: \ case MONO_TYPE_U8: \ case MONO_TYPE_CHAR: \ CHECK_WIDENING_CONVERSION(1); \ *(etype *) ea = (etype) u64; \ return; \ /* You can't assign a floating point number to an integer array. */ \ case MONO_TYPE_R4: \ case MONO_TYPE_R8: \ NO_WIDENING_CONVERSION; \ } \ }G_STMT_END #define ASSIGN_REAL(etype) G_STMT_START{\ switch (vt) { \ case MONO_TYPE_R4: \ case MONO_TYPE_R8: \ CHECK_WIDENING_CONVERSION(0); \ *(etype *) ea = (etype) r64; \ return; \ /* All integer values fit into a floating point array, so we don't */ \ /* need to CHECK_WIDENING_CONVERSION here. */ \ case MONO_TYPE_I1: \ case MONO_TYPE_I2: \ case MONO_TYPE_I4: \ case MONO_TYPE_I8: \ *(etype *) ea = (etype) i64; \ return; \ case MONO_TYPE_U1: \ case MONO_TYPE_U2: \ case MONO_TYPE_U4: \ case MONO_TYPE_U8: \ case MONO_TYPE_CHAR: \ *(etype *) ea = (etype) u64; \ return; \ } \ }G_STMT_END switch (vt) { case MONO_TYPE_U1: u64 = *(guint8 *) va; break; case MONO_TYPE_U2: u64 = *(guint16 *) va; break; case MONO_TYPE_U4: u64 = *(guint32 *) va; break; case MONO_TYPE_U8: u64 = *(guint64 *) va; break; case MONO_TYPE_I1: i64 = *(gint8 *) va; break; case MONO_TYPE_I2: i64 = *(gint16 *) va; break; case MONO_TYPE_I4: i64 = *(gint32 *) va; break; case MONO_TYPE_I8: i64 = *(gint64 *) va; break; case MONO_TYPE_R4: r64 = *(gfloat *) va; break; case MONO_TYPE_R8: r64 = *(gdouble *) va; break; case MONO_TYPE_CHAR: u64 = *(guint16 *) va; break; case MONO_TYPE_BOOLEAN: /* Boolean is only compatible with itself. */ switch (et) { case MONO_TYPE_CHAR: case MONO_TYPE_U1: case MONO_TYPE_U2: case MONO_TYPE_U4: case MONO_TYPE_U8: case MONO_TYPE_I1: case MONO_TYPE_I2: case MONO_TYPE_I4: case MONO_TYPE_I8: case MONO_TYPE_R4: case MONO_TYPE_R8: NO_WIDENING_CONVERSION; default: INVALID_CAST; } break; } /* If we can't do a direct copy, let's try a widening conversion. */ switch (et) { case MONO_TYPE_CHAR: ASSIGN_UNSIGNED (guint16); case MONO_TYPE_U1: ASSIGN_UNSIGNED (guint8); case MONO_TYPE_U2: ASSIGN_UNSIGNED (guint16); case MONO_TYPE_U4: ASSIGN_UNSIGNED (guint32); case MONO_TYPE_U8: ASSIGN_UNSIGNED (guint64); case MONO_TYPE_I1: ASSIGN_SIGNED (gint8); case MONO_TYPE_I2: ASSIGN_SIGNED (gint16); case MONO_TYPE_I4: ASSIGN_SIGNED (gint32); case MONO_TYPE_I8: ASSIGN_SIGNED (gint64); case MONO_TYPE_R4: ASSIGN_REAL (gfloat); case MONO_TYPE_R8: ASSIGN_REAL (gdouble); } INVALID_CAST; /* Not reached, INVALID_CAST does not return. Just to avoid a compiler warning ... */ return; #undef INVALID_CAST #undef NO_WIDENING_CONVERSION #undef CHECK_WIDENING_CONVERSION #undef ASSIGN_UNSIGNED #undef ASSIGN_SIGNED #undef ASSIGN_REAL } static void ves_icall_System_Array_SetValue (MonoArray *this, MonoObject *value, MonoArray *idxs) { MonoClass *ac, *ic; gint32 i, pos, *ind; MONO_ARCH_SAVE_REGS; MONO_CHECK_ARG_NULL (idxs); ic = idxs->obj.vtable->klass; ac = this->obj.vtable->klass; g_assert (ic->rank == 1); if (idxs->bounds != NULL || idxs->max_length != ac->rank) mono_raise_exception (mono_get_exception_argument (NULL, NULL)); ind = (gint32 *)idxs->vector; if (this->bounds == NULL) { if (*ind < 0 || *ind >= this->max_length) mono_raise_exception (mono_get_exception_index_out_of_range ()); ves_icall_System_Array_SetValueImpl (this, value, *ind); return; } for (i = 0; i < ac->rank; i++) if ((ind [i] < this->bounds [i].lower_bound) || (ind [i] >= (mono_array_lower_bound_t)this->bounds [i].length + this->bounds [i].lower_bound)) mono_raise_exception (mono_get_exception_index_out_of_range ()); pos = ind [0] - this->bounds [0].lower_bound; for (i = 1; i < ac->rank; i++) pos = pos * this->bounds [i].length + ind [i] - this->bounds [i].lower_bound; ves_icall_System_Array_SetValueImpl (this, value, pos); } static MonoArray * ves_icall_System_Array_CreateInstanceImpl (MonoReflectionType *type, MonoArray *lengths, MonoArray *bounds) { MonoClass *aklass; MonoArray *array; mono_array_size_t *sizes, i; gboolean bounded = FALSE; MONO_ARCH_SAVE_REGS; MONO_CHECK_ARG_NULL (type); MONO_CHECK_ARG_NULL (lengths); MONO_CHECK_ARG (lengths, mono_array_length (lengths) > 0); if (bounds) MONO_CHECK_ARG (bounds, mono_array_length (lengths) == mono_array_length (bounds)); for (i = 0; i < mono_array_length (lengths); i++) if (mono_array_get (lengths, gint32, i) < 0) mono_raise_exception (mono_get_exception_argument_out_of_range (NULL)); if (bounds && (mono_array_length (bounds) == 1) && (mono_array_get (bounds, gint32, 0) != 0)) /* vectors are not the same as one dimensional arrays with no-zero bounds */ bounded = TRUE; else bounded = FALSE; aklass = mono_bounded_array_class_get (mono_class_from_mono_type (type->type), mono_array_length (lengths), bounded); sizes = alloca (aklass->rank * sizeof(mono_array_size_t) * 2); for (i = 0; i < aklass->rank; ++i) { sizes [i] = mono_array_get (lengths, guint32, i); if (bounds) sizes [i + aklass->rank] = mono_array_get (bounds, guint32, i); else sizes [i + aklass->rank] = 0; } array = mono_array_new_full (mono_object_domain (type), aklass, sizes, sizes + aklass->rank); return array; } static MonoArray * ves_icall_System_Array_CreateInstanceImpl64 (MonoReflectionType *type, MonoArray *lengths, MonoArray *bounds) { MonoClass *aklass; MonoArray *array; mono_array_size_t *sizes, i; gboolean bounded = FALSE; MONO_ARCH_SAVE_REGS; MONO_CHECK_ARG_NULL (type); MONO_CHECK_ARG_NULL (lengths); MONO_CHECK_ARG (lengths, mono_array_length (lengths) > 0); if (bounds) MONO_CHECK_ARG (bounds, mono_array_length (lengths) == mono_array_length (bounds)); for (i = 0; i < mono_array_length (lengths); i++) if ((mono_array_get (lengths, gint64, i) < 0) || (mono_array_get (lengths, gint64, i) > MONO_ARRAY_MAX_INDEX)) mono_raise_exception (mono_get_exception_argument_out_of_range (NULL)); if (bounds && (mono_array_length (bounds) == 1) && (mono_array_get (bounds, gint64, 0) != 0)) /* vectors are not the same as one dimensional arrays with no-zero bounds */ bounded = TRUE; else bounded = FALSE; aklass = mono_bounded_array_class_get (mono_class_from_mono_type (type->type), mono_array_length (lengths), bounded); sizes = alloca (aklass->rank * sizeof(mono_array_size_t) * 2); for (i = 0; i < aklass->rank; ++i) { sizes [i] = mono_array_get (lengths, guint64, i); if (bounds) sizes [i + aklass->rank] = (mono_array_size_t) mono_array_get (bounds, guint64, i); else sizes [i + aklass->rank] = 0; } array = mono_array_new_full (mono_object_domain (type), aklass, sizes, sizes + aklass->rank); return array; } static gint32 ves_icall_System_Array_GetRank (MonoObject *this) { MONO_ARCH_SAVE_REGS; return this->vtable->klass->rank; } static gint32 ves_icall_System_Array_GetLength (MonoArray *this, gint32 dimension) { gint32 rank = ((MonoObject *)this)->vtable->klass->rank; mono_array_size_t length; MONO_ARCH_SAVE_REGS; if ((dimension < 0) || (dimension >= rank)) mono_raise_exception (mono_get_exception_index_out_of_range ()); if (this->bounds == NULL) length = this->max_length; else length = this->bounds [dimension].length; #ifdef MONO_BIG_ARRAYS if (length > G_MAXINT32) mono_raise_exception (mono_get_exception_overflow ()); #endif return length; } static gint64 ves_icall_System_Array_GetLongLength (MonoArray *this, gint32 dimension) { gint32 rank = ((MonoObject *)this)->vtable->klass->rank; MONO_ARCH_SAVE_REGS; if ((dimension < 0) || (dimension >= rank)) mono_raise_exception (mono_get_exception_index_out_of_range ()); if (this->bounds == NULL) return this->max_length; return this->bounds [dimension].length; } static gint32 ves_icall_System_Array_GetLowerBound (MonoArray *this, gint32 dimension) { gint32 rank = ((MonoObject *)this)->vtable->klass->rank; MONO_ARCH_SAVE_REGS; if ((dimension < 0) || (dimension >= rank)) mono_raise_exception (mono_get_exception_index_out_of_range ()); if (this->bounds == NULL) return 0; return this->bounds [dimension].lower_bound; } static void ves_icall_System_Array_ClearInternal (MonoArray *arr, int idx, int length) { int sz = mono_array_element_size (mono_object_class (arr)); memset (mono_array_addr_with_size (arr, sz, idx), 0, length * sz); } static gboolean ves_icall_System_Array_FastCopy (MonoArray *source, int source_idx, MonoArray* dest, int dest_idx, int length) { int element_size; void * dest_addr; void * source_addr; MonoClass *src_class; MonoClass *dest_class; MONO_ARCH_SAVE_REGS; if (source->obj.vtable->klass->rank != dest->obj.vtable->klass->rank) return FALSE; if (source->bounds || dest->bounds) return FALSE; /* there's no integer overflow since mono_array_length returns an unsigned integer */ if ((dest_idx + length > mono_array_length (dest)) || (source_idx + length > mono_array_length (source))) return FALSE; src_class = source->obj.vtable->klass->element_class; dest_class = dest->obj.vtable->klass->element_class; /* * Handle common cases. */ /* Case1: object[] -> valuetype[] (ArrayList::ToArray) */ if (src_class == mono_defaults.object_class && dest_class->valuetype) { // FIXME: This is racy return FALSE; /* int i; int has_refs = dest_class->has_references; for (i = source_idx; i < source_idx + length; ++i) { MonoObject *elem = mono_array_get (source, MonoObject*, i); if (elem && !mono_object_isinst (elem, dest_class)) return FALSE; } element_size = mono_array_element_size (dest->obj.vtable->klass); memset (mono_array_addr_with_size (dest, element_size, dest_idx), 0, element_size * length); for (i = 0; i < length; ++i) { MonoObject *elem = mono_array_get (source, MonoObject*, source_idx + i); void *addr = mono_array_addr_with_size (dest, element_size, dest_idx + i); if (!elem) continue; if (has_refs) mono_value_copy (addr, (char *)elem + sizeof (MonoObject), dest_class); else memcpy (addr, (char *)elem + sizeof (MonoObject), element_size); } return TRUE; */ } /* Check if we're copying a char[] <==> (u)short[] */ if (src_class != dest_class) { if (dest_class->valuetype || dest_class->enumtype || src_class->valuetype || src_class->enumtype) return FALSE; if (mono_class_is_subclass_of (src_class, dest_class, FALSE)) ; /* Case2: object[] -> reftype[] (ArrayList::ToArray) */ else if (mono_class_is_subclass_of (dest_class, src_class, FALSE)) { // FIXME: This is racy return FALSE; /* int i; for (i = source_idx; i < source_idx + length; ++i) { MonoObject *elem = mono_array_get (source, MonoObject*, i); if (elem && !mono_object_isinst (elem, dest_class)) return FALSE; } */ } else return FALSE; } if (dest_class->valuetype) { element_size = mono_array_element_size (source->obj.vtable->klass); source_addr = mono_array_addr_with_size (source, element_size, source_idx); if (dest_class->has_references) { mono_value_copy_array (dest, dest_idx, source_addr, length); } else { dest_addr = mono_array_addr_with_size (dest, element_size, dest_idx); memmove (dest_addr, source_addr, element_size * length); } } else { mono_array_memcpy_refs (dest, dest_idx, source, source_idx, length); } return TRUE; } static void ves_icall_System_Array_GetGenericValueImpl (MonoObject *this, guint32 pos, gpointer value) { MonoClass *ac; MonoArray *ao; gint32 esize; gpointer *ea; MONO_ARCH_SAVE_REGS; ao = (MonoArray *)this; ac = (MonoClass *)ao->obj.vtable->klass; esize = mono_array_element_size (ac); ea = (gpointer*)((char*)ao->vector + (pos * esize)); memcpy (value, ea, esize); } static void ves_icall_System_Array_SetGenericValueImpl (MonoObject *this, guint32 pos, gpointer value) { MonoClass *ac, *ec; MonoArray *ao; gint32 esize; gpointer *ea; MONO_ARCH_SAVE_REGS; ao = (MonoArray *)this; ac = (MonoClass *)ao->obj.vtable->klass; ec = ac->element_class; esize = mono_array_element_size (ac); ea = (gpointer*)((char*)ao->vector + (pos * esize)); if (MONO_TYPE_IS_REFERENCE (&ec->byval_arg)) { g_assert (esize == sizeof (gpointer)); mono_gc_wbarrier_generic_store (ea, *(gpointer*)value); } else { g_assert (ec->inited); if (ec->has_references) mono_gc_wbarrier_value_copy (ea, value, 1, ec); memcpy (ea, value, esize); } } static void ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray (MonoArray *array, MonoClassField *field_handle) { MonoClass *klass = array->obj.vtable->klass; guint32 size = mono_array_element_size (klass); MonoType *type = mono_type_get_underlying_type (&klass->element_class->byval_arg); int align; const char *field_data; if (MONO_TYPE_IS_REFERENCE (type) || type->type == MONO_TYPE_VALUETYPE) { MonoException *exc = mono_get_exception_argument("array", "Cannot initialize array of non-primitive type."); mono_raise_exception (exc); } if (!(field_handle->type->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA)) { MonoException *exc = mono_get_exception_argument("field_handle", "Field doesn't have an RVA"); mono_raise_exception (exc); } size *= array->max_length; field_data = mono_field_get_data (field_handle); if (size > mono_type_size (field_handle->type, &align)) { MonoException *exc = mono_get_exception_argument("field_handle", "Field not large enough to fill array"); mono_raise_exception (exc); } #if G_BYTE_ORDER != G_LITTLE_ENDIAN #define SWAP(n) {\ guint ## n *data = (guint ## n *) mono_array_addr (array, char, 0); \ guint ## n *src = (guint ## n *) field_data; \ guint ## n *end = (guint ## n *)((char*)src + size); \ \ for (; src < end; data++, src++) { \ *data = read ## n (src); \ } \ } /* printf ("Initialize array with elements of %s type\n", klass->element_class->name); */ switch (type->type) { case MONO_TYPE_CHAR: case MONO_TYPE_I2: case MONO_TYPE_U2: SWAP (16); break; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_R4: SWAP (32); break; case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R8: SWAP (64); break; default: memcpy (mono_array_addr (array, char, 0), field_data, size); break; } #else memcpy (mono_array_addr (array, char, 0), field_data, size); #ifdef ARM_FPU_FPA if (klass->element_class->byval_arg.type == MONO_TYPE_R8) { gint i; double tmp; double *data = (double*)mono_array_addr (array, double, 0); for (i = 0; i < size; i++, data++) { readr8 (data, &tmp); *data = tmp; } } #endif #endif } static gint ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_GetOffsetToStringData (void) { MONO_ARCH_SAVE_REGS; return offsetof (MonoString, chars); } static MonoObject * ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_GetObjectValue (MonoObject *obj) { MONO_ARCH_SAVE_REGS; if ((obj == NULL) || (! (obj->vtable->klass->valuetype))) return obj; else return mono_object_clone (obj); } static void ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_RunClassConstructor (MonoType *handle) { MonoClass *klass; MonoVTable *vtable; MONO_CHECK_ARG_NULL (handle); klass = mono_class_from_mono_type (handle); MONO_CHECK_ARG (handle, klass); vtable = mono_class_vtable_full (mono_domain_get (), klass, TRUE); /* This will call the type constructor */ mono_runtime_class_init (vtable); } static void ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_RunModuleConstructor (MonoImage *image) { MONO_ARCH_SAVE_REGS; mono_image_check_for_module_cctor (image); if (image->has_module_cctor) { MonoClass *module_klass = mono_class_get (image, MONO_TOKEN_TYPE_DEF | 1); /*It's fine to raise the exception here*/ mono_runtime_class_init (mono_class_vtable_full (mono_domain_get (), module_klass, TRUE)); } } static MonoObject * ves_icall_System_Object_MemberwiseClone (MonoObject *this) { MONO_ARCH_SAVE_REGS; return mono_object_clone (this); } static gint32 ves_icall_System_ValueType_InternalGetHashCode (MonoObject *this, MonoArray **fields) { MonoClass *klass; MonoObject **values = NULL; MonoObject *o; int count = 0; gint32 result = 0; MonoClassField* field; gpointer iter; MONO_ARCH_SAVE_REGS; klass = mono_object_class (this); if (mono_class_num_fields (klass) == 0) return mono_object_hash (this); /* * Compute the starting value of the hashcode for fields of primitive * types, and return the remaining fields in an array to the managed side. * This way, we can avoid costly reflection operations in managed code. */ iter = NULL; while ((field = mono_class_get_fields (klass, &iter))) { if (field->type->attrs & FIELD_ATTRIBUTE_STATIC) continue; if (mono_field_is_deleted (field)) continue; /* FIXME: Add more types */ switch (field->type->type) { case MONO_TYPE_I4: result ^= *(gint32*)((guint8*)this + field->offset); break; case MONO_TYPE_STRING: { MonoString *s; s = *(MonoString**)((guint8*)this + field->offset); if (s != NULL) result ^= mono_string_hash (s); break; } default: if (!values) values = g_newa (MonoObject*, mono_class_num_fields (klass)); o = mono_field_get_value_object (mono_object_domain (this), field, this); values [count++] = o; } } if (values) { int i; mono_gc_wbarrier_generic_store (fields, (MonoObject*) mono_array_new (mono_domain_get (), mono_defaults.object_class, count)); for (i = 0; i < count; ++i) mono_array_setref (*fields, i, values [i]); } else { *fields = NULL; } return result; } static MonoBoolean ves_icall_System_ValueType_Equals (MonoObject *this, MonoObject *that, MonoArray **fields) { MonoClass *klass; MonoObject **values = NULL; MonoObject *o; MonoClassField* field; gpointer iter; int count = 0; MONO_ARCH_SAVE_REGS; MONO_CHECK_ARG_NULL (that); if (this->vtable != that->vtable) return FALSE; klass = mono_object_class (this); if (klass->enumtype && mono_class_enum_basetype (klass) && mono_class_enum_basetype (klass)->type == MONO_TYPE_I4) return (*(gint32*)((guint8*)this + sizeof (MonoObject)) == *(gint32*)((guint8*)that + sizeof (MonoObject))); /* * Do the comparison for fields of primitive type and return a result if * possible. Otherwise, return the remaining fields in an array to the * managed side. This way, we can avoid costly reflection operations in * managed code. */ *fields = NULL; iter = NULL; while ((field = mono_class_get_fields (klass, &iter))) { if (field->type->attrs & FIELD_ATTRIBUTE_STATIC) continue; if (mono_field_is_deleted (field)) continue; /* FIXME: Add more types */ switch (field->type->type) { case MONO_TYPE_U1: case MONO_TYPE_I1: case MONO_TYPE_BOOLEAN: if (*((guint8*)this + field->offset) != *((guint8*)that + field->offset)) return FALSE; break; case MONO_TYPE_U2: case MONO_TYPE_I2: case MONO_TYPE_CHAR: if (*(gint16*)((guint8*)this + field->offset) != *(gint16*)((guint8*)that + field->offset)) return FALSE; break; case MONO_TYPE_U4: case MONO_TYPE_I4: if (*(gint32*)((guint8*)this + field->offset) != *(gint32*)((guint8*)that + field->offset)) return FALSE; break; case MONO_TYPE_U8: case MONO_TYPE_I8: if (*(gint64*)((guint8*)this + field->offset) != *(gint64*)((guint8*)that + field->offset)) return FALSE; break; case MONO_TYPE_R4: if (*(float*)((guint8*)this + field->offset) != *(float*)((guint8*)that + field->offset)) return FALSE; break; case MONO_TYPE_R8: if (*(double*)((guint8*)this + field->offset) != *(double*)((guint8*)that + field->offset)) return FALSE; break; case MONO_TYPE_STRING: { MonoString *s1, *s2; guint32 s1len, s2len; s1 = *(MonoString**)((guint8*)this + field->offset); s2 = *(MonoString**)((guint8*)that + field->offset); if (s1 == s2) break; if ((s1 == NULL) || (s2 == NULL)) return FALSE; s1len = mono_string_length (s1); s2len = mono_string_length (s2); if (s1len != s2len) return FALSE; if (memcmp (mono_string_chars (s1), mono_string_chars (s2), s1len * sizeof (gunichar2)) != 0) return FALSE; break; } default: if (!values) values = g_newa (MonoObject*, mono_class_num_fields (klass) * 2); o = mono_field_get_value_object (mono_object_domain (this), field, this); values [count++] = o; o = mono_field_get_value_object (mono_object_domain (this), field, that); values [count++] = o; } if (klass->enumtype) /* enums only have one non-static field */ break; } if (values) { int i; mono_gc_wbarrier_generic_store (fields, (MonoObject*) mono_array_new (mono_domain_get (), mono_defaults.object_class, count)); for (i = 0; i < count; ++i) mono_array_setref (*fields, i, values [i]); return FALSE; } else { return TRUE; } } static MonoReflectionType * ves_icall_System_Object_GetType (MonoObject *obj) { MONO_ARCH_SAVE_REGS; if (obj->vtable->klass != mono_defaults.transparent_proxy_class) return mono_type_get_object (mono_object_domain (obj), &obj->vtable->klass->byval_arg); else return mono_type_get_object (mono_object_domain (obj), &((MonoTransparentProxy*)obj)->remote_class->proxy_class->byval_arg); } static void mono_type_type_from_obj (MonoReflectionType *mtype, MonoObject *obj) { MONO_ARCH_SAVE_REGS; mtype->type = &obj->vtable->klass->byval_arg; g_assert (mtype->type->type); } static gint32 ves_icall_ModuleBuilder_getToken (MonoReflectionModuleBuilder *mb, MonoObject *obj) { MONO_ARCH_SAVE_REGS; MONO_CHECK_ARG_NULL (obj); return mono_image_create_token (mb->dynamic_image, obj, TRUE, TRUE); } static gint32 ves_icall_ModuleBuilder_getMethodToken (MonoReflectionModuleBuilder *mb, MonoReflectionMethod *method, MonoArray *opt_param_types) { MONO_ARCH_SAVE_REGS; MONO_CHECK_ARG_NULL (method); return mono_image_create_method_token ( mb->dynamic_image, (MonoObject *) method, opt_param_types); } static void ves_icall_ModuleBuilder_WriteToFile (MonoReflectionModuleBuilder *mb, HANDLE file) { MONO_ARCH_SAVE_REGS; mono_image_create_pefile (mb, file); } static void ves_icall_ModuleBuilder_build_metadata (MonoReflectionModuleBuilder *mb) { MONO_ARCH_SAVE_REGS; mono_image_build_metadata (mb); } static void ves_icall_ModuleBuilder_RegisterToken (MonoReflectionModuleBuilder *mb, MonoObject *obj, guint32 token) { MONO_ARCH_SAVE_REGS; mono_image_register_token (mb->dynamic_image, token, obj); } static gboolean get_caller (MonoMethod *m, gint32 no, gint32 ilo, gboolean managed, gpointer data) { MonoMethod **dest = data; /* skip unmanaged frames */ if (!managed) return FALSE; if (m == *dest) { *dest = NULL; return FALSE; } if (!(*dest)) { *dest = m; return TRUE; } return FALSE; } static gboolean get_executing (MonoMethod *m, gint32 no, gint32 ilo, gboolean managed, gpointer data) { MonoMethod **dest = data; /* skip unmanaged frames */ if (!managed) return FALSE; if (!(*dest)) { if (!strcmp (m->klass->name_space, "System.Reflection")) return FALSE; *dest = m; return TRUE; } return FALSE; } static gboolean get_caller_no_reflection (MonoMethod *m, gint32 no, gint32 ilo, gboolean managed, gpointer data) { MonoMethod **dest = data; /* skip unmanaged frames */ if (!managed) return FALSE; if (m->wrapper_type != MONO_WRAPPER_NONE) return FALSE; if (m->klass->image == mono_defaults.corlib && !strcmp (m->klass->name_space, "System.Reflection")) return FALSE; if (m == *dest) { *dest = NULL; return FALSE; } if (!(*dest)) { *dest = m; return TRUE; } return FALSE; } static MonoReflectionType * type_from_name (const char *str, MonoBoolean ignoreCase) { MonoType *type = NULL; MonoAssembly *assembly = NULL; MonoTypeNameParse info; char *temp_str = g_strdup (str); gboolean type_resolve = FALSE; MONO_ARCH_SAVE_REGS; /* mono_reflection_parse_type() mangles the string */ if (!mono_reflection_parse_type (temp_str, &info)) { mono_reflection_free_type_info (&info); g_free (temp_str); return NULL; } if (info.assembly.name) { assembly = mono_assembly_load (&info.assembly, NULL, NULL); } else { MonoMethod *m = mono_method_get_last_managed (); MonoMethod *dest = m; mono_stack_walk_no_il (get_caller_no_reflection, &dest); if (!dest) dest = m; /* * FIXME: mono_method_get_last_managed() sometimes returns NULL, thus * causing ves_icall_System_Reflection_Assembly_GetCallingAssembly() * to crash. This only seems to happen in some strange remoting * scenarios and I was unable to figure out what's happening there. * Dec 10, 2005 - Martin. */ if (dest) { assembly = dest->klass->image->assembly; type_resolve = TRUE; } else { g_warning (G_STRLOC); } } if (assembly) { /* When loading from the current assembly, AppDomain.TypeResolve will not be called yet */ type = mono_reflection_get_type (assembly->image, &info, ignoreCase, &type_resolve); } if (!info.assembly.name && !type) /* try mscorlib */ type = mono_reflection_get_type (NULL, &info, ignoreCase, &type_resolve); if (assembly && !type && type_resolve) { type_resolve = FALSE; /* This will invoke TypeResolve if not done in the first 'if' */ type = mono_reflection_get_type (assembly->image, &info, ignoreCase, &type_resolve); } mono_reflection_free_type_info (&info); g_free (temp_str); if (!type) return NULL; return mono_type_get_object (mono_domain_get (), type); } #ifdef UNUSED MonoReflectionType * mono_type_get (const char *str) { char *copy = g_strdup (str); MonoReflectionType *type = type_from_name (copy, FALSE); g_free (copy); return type; } #endif static MonoReflectionType* ves_icall_type_from_name (MonoString *name, MonoBoolean throwOnError, MonoBoolean ignoreCase) { char *str = mono_string_to_utf8 (name); MonoReflectionType *type; type = type_from_name (str, ignoreCase); g_free (str); if (type == NULL){ MonoException *e = NULL; if (throwOnError) e = mono_get_exception_type_load (name, NULL); mono_loader_clear_error (); if (e != NULL) mono_raise_exception (e); } return type; } static MonoReflectionType* ves_icall_type_from_handle (MonoType *handle) { MonoDomain *domain = mono_domain_get (); MonoClass *klass = mono_class_from_mono_type (handle); MONO_ARCH_SAVE_REGS; mono_class_init (klass); return mono_type_get_object (domain, handle); } static MonoBoolean ves_icall_System_Type_EqualsInternal (MonoReflectionType *type, MonoReflectionType *c) { MONO_ARCH_SAVE_REGS; if (c && type->type && c->type) return mono_metadata_type_equal (type->type, c->type); else return (type == c) ? TRUE : FALSE; } /* System.TypeCode */ typedef enum { TYPECODE_EMPTY, TYPECODE_OBJECT, TYPECODE_DBNULL, TYPECODE_BOOLEAN, TYPECODE_CHAR, TYPECODE_SBYTE, TYPECODE_BYTE, TYPECODE_INT16, TYPECODE_UINT16, TYPECODE_INT32, TYPECODE_UINT32, TYPECODE_INT64, TYPECODE_UINT64, TYPECODE_SINGLE, TYPECODE_DOUBLE, TYPECODE_DECIMAL, TYPECODE_DATETIME, TYPECODE_STRING = 18 } TypeCode; static guint32 ves_icall_type_GetTypeCodeInternal (MonoReflectionType *type) { int t = type->type->type; MONO_ARCH_SAVE_REGS; if (type->type->byref) return TYPECODE_OBJECT; handle_enum: switch (t) { case MONO_TYPE_VOID: return TYPECODE_OBJECT; case MONO_TYPE_BOOLEAN: return TYPECODE_BOOLEAN; case MONO_TYPE_U1: return TYPECODE_BYTE; case MONO_TYPE_I1: return TYPECODE_SBYTE; case MONO_TYPE_U2: return TYPECODE_UINT16; case MONO_TYPE_I2: return TYPECODE_INT16; case MONO_TYPE_CHAR: return TYPECODE_CHAR; case MONO_TYPE_PTR: case MONO_TYPE_U: case MONO_TYPE_I: return TYPECODE_OBJECT; case MONO_TYPE_U4: return TYPECODE_UINT32; case MONO_TYPE_I4: return TYPECODE_INT32; case MONO_TYPE_U8: return TYPECODE_UINT64; case MONO_TYPE_I8: return TYPECODE_INT64; case MONO_TYPE_R4: return TYPECODE_SINGLE; case MONO_TYPE_R8: return TYPECODE_DOUBLE; case MONO_TYPE_VALUETYPE: if (type->type->data.klass->enumtype) { t = mono_class_enum_basetype (type->type->data.klass)->type; goto handle_enum; } else { MonoClass *k = type->type->data.klass; if (strcmp (k->name_space, "System") == 0) { if (strcmp (k->name, "Decimal") == 0) return TYPECODE_DECIMAL; else if (strcmp (k->name, "DateTime") == 0) return TYPECODE_DATETIME; } } return TYPECODE_OBJECT; case MONO_TYPE_STRING: return TYPECODE_STRING; case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_OBJECT: case MONO_TYPE_VAR: case MONO_TYPE_MVAR: case MONO_TYPE_TYPEDBYREF: return TYPECODE_OBJECT; case MONO_TYPE_CLASS: { MonoClass *k = type->type->data.klass; if (strcmp (k->name_space, "System") == 0) { if (strcmp (k->name, "DBNull") == 0) return TYPECODE_DBNULL; } } return TYPECODE_OBJECT; case MONO_TYPE_GENERICINST: return TYPECODE_OBJECT; default: g_error ("type 0x%02x not handled in GetTypeCode()", t); } return 0; } static guint32 ves_icall_type_is_subtype_of (MonoReflectionType *type, MonoReflectionType *c, MonoBoolean check_interfaces) { MonoDomain *domain; MonoClass *klass; MonoClass *klassc; MONO_ARCH_SAVE_REGS; g_assert (type != NULL); domain = ((MonoObject *)type)->vtable->domain; if (!c) /* FIXME: dont know what do do here */ return 0; klass = mono_class_from_mono_type (type->type); klassc = mono_class_from_mono_type (c->type); if (type->type->byref) return klassc == mono_defaults.object_class; return mono_class_is_subclass_of (klass, klassc, check_interfaces); } static guint32 ves_icall_type_is_assignable_from (MonoReflectionType *type, MonoReflectionType *c) { MonoDomain *domain; MonoClass *klass; MonoClass *klassc; MONO_ARCH_SAVE_REGS; g_assert (type != NULL); domain = ((MonoObject *)type)->vtable->domain; klass = mono_class_from_mono_type (type->type); klassc = mono_class_from_mono_type (c->type); if (type->type->byref && !c->type->byref) return FALSE; return mono_class_is_assignable_from (klass, klassc); } static guint32 ves_icall_type_IsInstanceOfType (MonoReflectionType *type, MonoObject *obj) { MonoClass *klass = mono_class_from_mono_type (type->type); return mono_object_isinst (obj, klass) != NULL; } static guint32 ves_icall_get_attributes (MonoReflectionType *type) { MonoClass *klass = mono_class_from_mono_type (type->type); MONO_ARCH_SAVE_REGS; return klass->flags; } static MonoReflectionMarshal* ves_icall_System_Reflection_FieldInfo_GetUnmanagedMarshal (MonoReflectionField *field) { MonoClass *klass = field->field->parent; MonoMarshalType *info; int i; if (klass->generic_container || (klass->generic_class && klass->generic_class->context.class_inst->is_open)) return NULL; info = mono_marshal_load_type_info (klass); for (i = 0; i < info->num_fields; ++i) { if (info->fields [i].field == field->field) { if (!info->fields [i].mspec) return NULL; else return mono_reflection_marshal_from_marshal_spec (field->object.vtable->domain, klass, info->fields [i].mspec); } } return NULL; } static MonoReflectionField* ves_icall_System_Reflection_FieldInfo_internal_from_handle_type (MonoClassField *handle, MonoType *type) { gboolean found = FALSE; MonoClass *klass; MonoClass *k; g_assert (handle); if (!type) { klass = handle->parent; } else { klass = mono_class_from_mono_type (type); /* Check that the field belongs to the class */ for (k = klass; k; k = k->parent) { if (k == handle->parent) { found = TRUE; break; } } if (!found) /* The managed code will throw the exception */ return NULL; } return mono_field_get_object (mono_domain_get (), klass, handle); } static MonoArray* ves_icall_System_Reflection_FieldInfo_GetTypeModifiers (MonoReflectionField *field, MonoBoolean optional) { MonoType *type = field->field->type; return type_array_from_modifiers (field->field->parent->image, type, optional); } static void ves_icall_get_method_info (MonoMethod *method, MonoMethodInfo *info) { MonoDomain *domain = mono_domain_get (); MonoMethodSignature* sig; MONO_ARCH_SAVE_REGS; sig = mono_method_signature (method); if (!sig) { g_assert (mono_loader_get_last_error ()); mono_raise_exception (mono_loader_error_prepare_exception (mono_loader_get_last_error ())); } MONO_STRUCT_SETREF (info, parent, mono_type_get_object (domain, &method->klass->byval_arg)); MONO_STRUCT_SETREF (info, ret, mono_type_get_object (domain, sig->ret)); info->attrs = method->flags; info->implattrs = method->iflags; if (sig->call_convention == MONO_CALL_DEFAULT) info->callconv = sig->sentinelpos >= 0 ? 2 : 1; else { if (sig->call_convention == MONO_CALL_VARARG || sig->sentinelpos >= 0) info->callconv = 2; else info->callconv = 1; } info->callconv |= (sig->hasthis << 5) | (sig->explicit_this << 6); } static MonoArray* ves_icall_get_parameter_info (MonoMethod *method, MonoReflectionMethod *member) { MonoDomain *domain = mono_domain_get (); return mono_param_get_objects_internal (domain, method, member->reftype ? mono_class_from_mono_type (member->reftype->type) : NULL); } static MonoReflectionMarshal* ves_icall_System_MonoMethodInfo_get_retval_marshal (MonoMethod *method) { MonoDomain *domain = mono_domain_get (); MonoReflectionMarshal* res = NULL; MonoMarshalSpec **mspecs; int i; mspecs = g_new (MonoMarshalSpec*, mono_method_signature (method)->param_count + 1); mono_method_get_marshal_info (method, mspecs); if (mspecs [0]) res = mono_reflection_marshal_from_marshal_spec (domain, method->klass, mspecs [0]); for (i = mono_method_signature (method)->param_count; i >= 0; i--) if (mspecs [i]) mono_metadata_free_marshal_spec (mspecs [i]); g_free (mspecs); return res; } static gint32 ves_icall_MonoField_GetFieldOffset (MonoReflectionField *field) { return field->field->offset - sizeof (MonoObject); } static MonoReflectionType* ves_icall_MonoField_GetParentType (MonoReflectionField *field, MonoBoolean declaring) { MonoClass *parent; MONO_ARCH_SAVE_REGS; parent = declaring? field->field->parent: field->klass; return mono_type_get_object (mono_object_domain (field), &parent->byval_arg); } static MonoObject * ves_icall_MonoField_GetValueInternal (MonoReflectionField *field, MonoObject *obj) { MonoObject *o; MonoClassField *cf = field->field; MonoClass *klass; MonoVTable *vtable; MonoType *t; MonoDomain *domain = mono_object_domain (field); gchar *v; gboolean is_static = FALSE; gboolean is_ref = FALSE; MONO_ARCH_SAVE_REGS; if (field->klass->image->assembly->ref_only) mono_raise_exception (mono_get_exception_invalid_operation ( "It is illegal to get the value on a field on a type loaded using the ReflectionOnly methods.")); if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) mono_security_core_clr_ensure_reflection_access_field (cf); mono_class_init (field->klass); if (cf->type->attrs & FIELD_ATTRIBUTE_STATIC) is_static = TRUE; if (obj && !is_static) { /* Check that the field belongs to the object */ gboolean found = FALSE; MonoClass *k; for (k = obj->vtable->klass; k; k = k->parent) { if (k == cf->parent) { found = TRUE; break; } } if (!found) { char *msg = g_strdup_printf ("Field '%s' defined on type '%s' is not a field on the target object which is of type '%s'.", mono_field_get_name (cf), cf->parent->name, obj->vtable->klass->name); MonoException *ex = mono_get_exception_argument (NULL, msg); g_free (msg); mono_raise_exception (ex); } } t = mono_type_get_underlying_type (cf->type); switch (t->type) { case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_CLASS: case MONO_TYPE_ARRAY: case MONO_TYPE_SZARRAY: is_ref = TRUE; break; case MONO_TYPE_U1: case MONO_TYPE_I1: case MONO_TYPE_BOOLEAN: case MONO_TYPE_U2: case MONO_TYPE_I2: case MONO_TYPE_CHAR: case MONO_TYPE_U: case MONO_TYPE_I: case MONO_TYPE_U4: case MONO_TYPE_I4: case MONO_TYPE_R4: case MONO_TYPE_U8: case MONO_TYPE_I8: case MONO_TYPE_R8: case MONO_TYPE_VALUETYPE: is_ref = t->byref; break; case MONO_TYPE_GENERICINST: if (mono_type_generic_inst_is_valuetype (t)) { is_ref = t->byref; } else { is_ref = TRUE; } break; default: g_error ("type 0x%x not handled in " "ves_icall_Monofield_GetValue", t->type); return NULL; } vtable = NULL; if (is_static) { vtable = mono_class_vtable_full (domain, cf->parent, TRUE); if (!vtable->initialized && !(cf->type->attrs & FIELD_ATTRIBUTE_LITERAL)) mono_runtime_class_init (vtable); } if (is_ref) { if (is_static) { mono_field_static_get_value (vtable, cf, &o); } else { mono_field_get_value (obj, cf, &o); } return o; } if (mono_class_is_nullable (mono_class_from_mono_type (cf->type))) { MonoClass *nklass = mono_class_from_mono_type (cf->type); guint8 *buf; /* Convert the Nullable structure into a boxed vtype */ if (is_static) buf = (guint8*)vtable->data + cf->offset; else buf = (guint8*)obj + cf->offset; return mono_nullable_box (buf, nklass); } /* boxed value type */ klass = mono_class_from_mono_type (cf->type); o = mono_object_new (domain, klass); v = ((gchar *) o) + sizeof (MonoObject); if (is_static) { mono_field_static_get_value (vtable, cf, v); } else { mono_field_get_value (obj, cf, v); } return o; } static void ves_icall_MonoField_SetValueInternal (MonoReflectionField *field, MonoObject *obj, MonoObject *value) { MonoClassField *cf = field->field; gchar *v; MONO_ARCH_SAVE_REGS; if (field->klass->image->assembly->ref_only) mono_raise_exception (mono_get_exception_invalid_operation ( "It is illegal to set the value on a field on a type loaded using the ReflectionOnly methods.")); if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) mono_security_core_clr_ensure_reflection_access_field (cf); v = (gchar *) value; if (!cf->type->byref) { switch (cf->type->type) { case MONO_TYPE_U1: case MONO_TYPE_I1: case MONO_TYPE_BOOLEAN: case MONO_TYPE_U2: case MONO_TYPE_I2: case MONO_TYPE_CHAR: case MONO_TYPE_U: case MONO_TYPE_I: case MONO_TYPE_U4: case MONO_TYPE_I4: case MONO_TYPE_R4: case MONO_TYPE_U8: case MONO_TYPE_I8: case MONO_TYPE_R8: case MONO_TYPE_VALUETYPE: if (v != NULL) v += sizeof (MonoObject); break; case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_CLASS: case MONO_TYPE_ARRAY: case MONO_TYPE_SZARRAY: /* Do nothing */ break; case MONO_TYPE_GENERICINST: { MonoGenericClass *gclass = cf->type->data.generic_class; g_assert (!gclass->context.class_inst->is_open); if (mono_class_is_nullable (mono_class_from_mono_type (cf->type))) { MonoClass *nklass = mono_class_from_mono_type (cf->type); MonoObject *nullable; /* * Convert the boxed vtype into a Nullable structure. * This is complicated by the fact that Nullables have * a variable structure. */ nullable = mono_object_new (mono_domain_get (), nklass); mono_nullable_init (mono_object_unbox (nullable), value, nklass); v = mono_object_unbox (nullable); } else if (gclass->container_class->valuetype && (v != NULL)) v += sizeof (MonoObject); break; } default: g_error ("type 0x%x not handled in " "ves_icall_FieldInfo_SetValueInternal", cf->type->type); return; } } if (cf->type->attrs & FIELD_ATTRIBUTE_STATIC) { MonoVTable *vtable = mono_class_vtable_full (mono_object_domain (field), cf->parent, TRUE); if (!vtable->initialized) mono_runtime_class_init (vtable); mono_field_static_set_value (vtable, cf, v); } else { mono_field_set_value (obj, cf, v); } } static MonoObject * ves_icall_MonoField_GetRawConstantValue (MonoReflectionField *this) { MonoObject *o = NULL; MonoClassField *field = this->field; MonoClass *klass; MonoDomain *domain = mono_object_domain (this); gchar *v; MonoTypeEnum def_type; const char *def_value; MONO_ARCH_SAVE_REGS; mono_class_init (field->parent); if (!(field->type->attrs & FIELD_ATTRIBUTE_HAS_DEFAULT)) mono_raise_exception (mono_get_exception_invalid_operation (NULL)); if (field->parent->image->dynamic) { /* FIXME: */ g_assert_not_reached (); } def_value = mono_class_get_field_default_value (field, &def_type); switch (def_type) { case MONO_TYPE_U1: case MONO_TYPE_I1: case MONO_TYPE_BOOLEAN: case MONO_TYPE_U2: case MONO_TYPE_I2: case MONO_TYPE_CHAR: case MONO_TYPE_U: case MONO_TYPE_I: case MONO_TYPE_U4: case MONO_TYPE_I4: case MONO_TYPE_R4: case MONO_TYPE_U8: case MONO_TYPE_I8: case MONO_TYPE_R8: { MonoType *t; /* boxed value type */ t = g_new0 (MonoType, 1); t->type = def_type; klass = mono_class_from_mono_type (t); g_free (t); o = mono_object_new (domain, klass); v = ((gchar *) o) + sizeof (MonoObject); mono_get_constant_value_from_blob (domain, def_type, def_value, v); break; } case MONO_TYPE_STRING: case MONO_TYPE_CLASS: mono_get_constant_value_from_blob (domain, def_type, def_value, &o); break; default: g_assert_not_reached (); } return o; } static MonoReflectionType* ves_icall_MonoGenericMethod_get_ReflectedType (MonoReflectionGenericMethod *rmethod) { MonoMethod *method = rmethod->method.method; return mono_type_get_object (mono_object_domain (rmethod), &method->klass->byval_arg); } /* From MonoProperty.cs */ typedef enum { PInfo_Attributes = 1, PInfo_GetMethod = 1 << 1, PInfo_SetMethod = 1 << 2, PInfo_ReflectedType = 1 << 3, PInfo_DeclaringType = 1 << 4, PInfo_Name = 1 << 5 } PInfo; static void ves_icall_get_property_info (MonoReflectionProperty *property, MonoPropertyInfo *info, PInfo req_info) { MonoDomain *domain = mono_object_domain (property); MONO_ARCH_SAVE_REGS; if ((req_info & PInfo_ReflectedType) != 0) MONO_STRUCT_SETREF (info, parent, mono_type_get_object (domain, &property->klass->byval_arg)); else if ((req_info & PInfo_DeclaringType) != 0) MONO_STRUCT_SETREF (info, parent, mono_type_get_object (domain, &property->property->parent->byval_arg)); if ((req_info & PInfo_Name) != 0) MONO_STRUCT_SETREF (info, name, mono_string_new (domain, property->property->name)); if ((req_info & PInfo_Attributes) != 0) info->attrs = property->property->attrs; if ((req_info & PInfo_GetMethod) != 0) MONO_STRUCT_SETREF (info, get, property->property->get ? mono_method_get_object (domain, property->property->get, property->klass): NULL); if ((req_info & PInfo_SetMethod) != 0) MONO_STRUCT_SETREF (info, set, property->property->set ? mono_method_get_object (domain, property->property->set, property->klass): NULL); /* * There may be other methods defined for properties, though, it seems they are not exposed * in the reflection API */ } static void ves_icall_get_event_info (MonoReflectionMonoEvent *event, MonoEventInfo *info) { MonoDomain *domain = mono_object_domain (event); MONO_ARCH_SAVE_REGS; MONO_STRUCT_SETREF (info, reflected_type, mono_type_get_object (domain, &event->klass->byval_arg)); MONO_STRUCT_SETREF (info, declaring_type, mono_type_get_object (domain, &event->event->parent->byval_arg)); MONO_STRUCT_SETREF (info, name, mono_string_new (domain, event->event->name)); info->attrs = event->event->attrs; MONO_STRUCT_SETREF (info, add_method, event->event->add ? mono_method_get_object (domain, event->event->add, NULL): NULL); MONO_STRUCT_SETREF (info, remove_method, event->event->remove ? mono_method_get_object (domain, event->event->remove, NULL): NULL); MONO_STRUCT_SETREF (info, raise_method, event->event->raise ? mono_method_get_object (domain, event->event->raise, NULL): NULL); if (event->event->other) { int i, n = 0; while (event->event->other [n]) n++; MONO_STRUCT_SETREF (info, other_methods, mono_array_new (domain, mono_defaults.method_info_class, n)); for (i = 0; i < n; i++) mono_array_setref (info->other_methods, i, mono_method_get_object (domain, event->event->other [i], NULL)); } } static MonoArray* ves_icall_Type_GetInterfaces (MonoReflectionType* type) { MonoError error; MonoDomain *domain = mono_object_domain (type); MonoArray *intf; GPtrArray *ifaces = NULL; int i; MonoClass *class = mono_class_from_mono_type (type->type); MonoClass *parent; MonoBitSet *slots; MonoGenericContext *context = NULL; MONO_ARCH_SAVE_REGS; if (class->generic_class && class->generic_class->context.class_inst->is_open) { context = mono_class_get_context (class); class = class->generic_class->container_class; } mono_class_setup_vtable (class); slots = mono_bitset_new (class->max_interface_id + 1, 0); for (parent = class; parent; parent = parent->parent) { GPtrArray *tmp_ifaces = mono_class_get_implemented_interfaces (parent, &error); if (!mono_error_ok (&error)) { mono_bitset_free (slots); mono_error_raise_exception (&error); return NULL; } else if (tmp_ifaces) { for (i = 0; i < tmp_ifaces->len; ++i) { MonoClass *ic = g_ptr_array_index (tmp_ifaces, i); if (mono_bitset_test (slots, ic->interface_id)) continue; mono_bitset_set (slots, ic->interface_id); if (ifaces == NULL) ifaces = g_ptr_array_new (); g_ptr_array_add (ifaces, ic); } g_ptr_array_free (tmp_ifaces, TRUE); } } mono_bitset_free (slots); if (!ifaces) return mono_array_new_cached (domain, mono_defaults.monotype_class, 0); intf = mono_array_new_cached (domain, mono_defaults.monotype_class, ifaces->len); for (i = 0; i < ifaces->len; ++i) { MonoClass *ic = g_ptr_array_index (ifaces, i); MonoType *ret = &ic->byval_arg, *inflated = NULL; if (context && ic->generic_class && ic->generic_class->context.class_inst->is_open) inflated = ret = mono_class_inflate_generic_type (ret, context); mono_array_setref (intf, i, mono_type_get_object (domain, ret)); if (inflated) mono_metadata_free_type (inflated); } g_ptr_array_free (ifaces, TRUE); return intf; } static void ves_icall_Type_GetInterfaceMapData (MonoReflectionType *type, MonoReflectionType *iface, MonoArray **targets, MonoArray **methods) { MonoClass *class = mono_class_from_mono_type (type->type); MonoClass *iclass = mono_class_from_mono_type (iface->type); MonoReflectionMethod *member; MonoMethod* method; gpointer iter; int i = 0, len, ioffset; MonoDomain *domain; MONO_ARCH_SAVE_REGS; mono_class_setup_vtable (class); /* type doesn't implement iface: the exception is thrown in managed code */ if (! MONO_CLASS_IMPLEMENTS_INTERFACE (class, iclass->interface_id)) return; len = mono_class_num_methods (iclass); ioffset = mono_class_interface_offset (class, iclass); domain = mono_object_domain (type); mono_gc_wbarrier_generic_store (targets, (MonoObject*) mono_array_new (domain, mono_defaults.method_info_class, len)); mono_gc_wbarrier_generic_store (methods, (MonoObject*) mono_array_new (domain, mono_defaults.method_info_class, len)); iter = NULL; while ((method = mono_class_get_methods (iclass, &iter))) { member = mono_method_get_object (domain, method, iclass); mono_array_setref (*methods, i, member); member = mono_method_get_object (domain, class->vtable [i + ioffset], class); mono_array_setref (*targets, i, member); i ++; } } static void ves_icall_Type_GetPacking (MonoReflectionType *type, guint32 *packing, guint32 *size) { MonoClass *klass = mono_class_from_mono_type (type->type); if (klass->image->dynamic) { MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)type; *packing = tb->packing_size; *size = tb->class_size; } else { mono_metadata_packing_from_typedef (klass->image, klass->type_token, packing, size); } } static MonoReflectionType* ves_icall_MonoType_GetElementType (MonoReflectionType *type) { MonoClass *class; MONO_ARCH_SAVE_REGS; if (!type->type->byref && type->type->type == MONO_TYPE_SZARRAY) return mono_type_get_object (mono_object_domain (type), &type->type->data.klass->byval_arg); class = mono_class_from_mono_type (type->type); // GetElementType should only return a type for: // Array Pointer PassedByRef if (type->type->byref) return mono_type_get_object (mono_object_domain (type), &class->byval_arg); else if (class->element_class && MONO_CLASS_IS_ARRAY (class)) return mono_type_get_object (mono_object_domain (type), &class->element_class->byval_arg); else if (class->element_class && type->type->type == MONO_TYPE_PTR) return mono_type_get_object (mono_object_domain (type), &class->element_class->byval_arg); else return NULL; } static MonoReflectionType* ves_icall_get_type_parent (MonoReflectionType *type) { MonoClass *class = mono_class_from_mono_type (type->type); MONO_ARCH_SAVE_REGS; return class->parent ? mono_type_get_object (mono_object_domain (type), &class->parent->byval_arg): NULL; } static MonoBoolean ves_icall_type_ispointer (MonoReflectionType *type) { MONO_ARCH_SAVE_REGS; return type->type->type == MONO_TYPE_PTR; } static MonoBoolean ves_icall_type_isprimitive (MonoReflectionType *type) { MONO_ARCH_SAVE_REGS; return (!type->type->byref && (((type->type->type >= MONO_TYPE_BOOLEAN) && (type->type->type <= MONO_TYPE_R8)) || (type->type->type == MONO_TYPE_I) || (type->type->type == MONO_TYPE_U))); } static MonoBoolean ves_icall_type_isbyref (MonoReflectionType *type) { MONO_ARCH_SAVE_REGS; return type->type->byref; } static MonoBoolean ves_icall_type_iscomobject (MonoReflectionType *type) { MonoClass *klass = mono_class_from_mono_type (type->type); MONO_ARCH_SAVE_REGS; return (klass && klass->is_com_object); } static MonoReflectionModule* ves_icall_MonoType_get_Module (MonoReflectionType *type) { MonoClass *class = mono_class_from_mono_type (type->type); MONO_ARCH_SAVE_REGS; return mono_module_get_object (mono_object_domain (type), class->image); } static MonoReflectionAssembly* ves_icall_MonoType_get_Assembly (MonoReflectionType *type) { MonoDomain *domain = mono_domain_get (); MonoClass *class = mono_class_from_mono_type (type->type); MONO_ARCH_SAVE_REGS; return mono_assembly_get_object (domain, class->image->assembly); } static MonoReflectionType* ves_icall_MonoType_get_DeclaringType (MonoReflectionType *type) { MonoDomain *domain = mono_domain_get (); MonoClass *class; MONO_ARCH_SAVE_REGS; if (type->type->byref) return NULL; if (type->type->type == MONO_TYPE_VAR) class = mono_type_get_generic_param_owner (type->type)->owner.klass; else if (type->type->type == MONO_TYPE_MVAR) class = mono_type_get_generic_param_owner (type->type)->owner.method->klass; else class = mono_class_from_mono_type (type->type)->nested_in; return class ? mono_type_get_object (domain, &class->byval_arg) : NULL; } static MonoReflectionType* ves_icall_MonoType_get_UnderlyingSystemType (MonoReflectionType *type) { MonoDomain *domain = mono_domain_get (); MonoClass *class = mono_class_from_mono_type (type->type); MONO_ARCH_SAVE_REGS; if (class->enumtype && mono_class_enum_basetype (class)) /* types that are modified typebuilders may not have enum_basetype set */ return mono_type_get_object (domain, mono_class_enum_basetype (class)); else if (class->element_class) return mono_type_get_object (domain, &class->element_class->byval_arg); else return NULL; } static MonoString* ves_icall_MonoType_get_Name (MonoReflectionType *type) { MonoDomain *domain = mono_domain_get (); MonoClass *class = mono_class_from_mono_type (type->type); MONO_ARCH_SAVE_REGS; if (type->type->byref) { char *n = g_strdup_printf ("%s&", class->name); MonoString *res = mono_string_new (domain, n); g_free (n); return res; } else { return mono_string_new (domain, class->name); } } static MonoString* ves_icall_MonoType_get_Namespace (MonoReflectionType *type) { MonoDomain *domain = mono_domain_get (); MonoClass *class = mono_class_from_mono_type (type->type); MONO_ARCH_SAVE_REGS; while (class->nested_in) class = class->nested_in; if (class->name_space [0] == '\0') return NULL; else return mono_string_new (domain, class->name_space); } static gint32 ves_icall_MonoType_GetArrayRank (MonoReflectionType *type) { MonoClass *class; if (type->type->type != MONO_TYPE_ARRAY && type->type->type != MONO_TYPE_SZARRAY) mono_raise_exception (mono_get_exception_argument ("type", "Type must be an array type")); class = mono_class_from_mono_type (type->type); return class->rank; } static MonoArray* ves_icall_MonoType_GetGenericArguments (MonoReflectionType *type) { MonoArray *res; MonoClass *klass, *pklass; MonoDomain *domain = mono_object_domain (type); MonoVTable *array_vtable = mono_class_vtable_full (domain, mono_array_class_get_cached (mono_defaults.systemtype_class, 1), TRUE); int i; MONO_ARCH_SAVE_REGS; klass = mono_class_from_mono_type (type->type); if (klass->generic_container) { MonoGenericContainer *container = klass->generic_container; res = mono_array_new_specific (array_vtable, container->type_argc); for (i = 0; i < container->type_argc; ++i) { pklass = mono_class_from_generic_parameter (mono_generic_container_get_param (container, i), klass->image, FALSE); mono_array_setref (res, i, mono_type_get_object (domain, &pklass->byval_arg)); } } else if (klass->generic_class) { MonoGenericInst *inst = klass->generic_class->context.class_inst; res = mono_array_new_specific (array_vtable, inst->type_argc); for (i = 0; i < inst->type_argc; ++i) mono_array_setref (res, i, mono_type_get_object (domain, inst->type_argv [i])); } else { res = mono_array_new_specific (array_vtable, 0); } return res; } static gboolean ves_icall_Type_get_IsGenericTypeDefinition (MonoReflectionType *type) { MonoClass *klass; MONO_ARCH_SAVE_REGS; if (!IS_MONOTYPE (type)) return FALSE; if (type->type->byref) return FALSE; klass = mono_class_from_mono_type (type->type); return klass->generic_container != NULL; } static MonoReflectionType* ves_icall_Type_GetGenericTypeDefinition_impl (MonoReflectionType *type) { MonoClass *klass; MONO_ARCH_SAVE_REGS; if (type->type->byref) return NULL; klass = mono_class_from_mono_type (type->type); if (klass->generic_container) { return type; /* check this one */ } if (klass->generic_class) { MonoClass *generic_class = klass->generic_class->container_class; if (generic_class->wastypebuilder && generic_class->reflection_info) return generic_class->reflection_info; else return mono_type_get_object (mono_object_domain (type), &generic_class->byval_arg); } return NULL; } static MonoReflectionType* ves_icall_Type_MakeGenericType (MonoReflectionType *type, MonoArray *type_array) { MonoClass *class; MonoType *geninst, **types; int i, count; MONO_ARCH_SAVE_REGS; count = mono_array_length (type_array); types = g_new0 (MonoType *, count); for (i = 0; i < count; i++) { MonoReflectionType *t = mono_array_get (type_array, gpointer, i); types [i] = t->type; } geninst = mono_reflection_bind_generic_parameters (type, count, types); g_free (types); if (!geninst) return NULL; class = mono_class_from_mono_type (geninst); /*we might inflate to the GTD*/ if (class->generic_class && !mono_verifier_class_is_valid_generic_instantiation (class)) mono_raise_exception (mono_get_exception_argument ("method", "Invalid generic arguments")); return mono_type_get_object (mono_object_domain (type), geninst); } static gboolean ves_icall_Type_get_IsGenericInstance (MonoReflectionType *type) { MonoClass *klass; MONO_ARCH_SAVE_REGS; if (type->type->byref) return FALSE; klass = mono_class_from_mono_type (type->type); return klass->generic_class != NULL; } static gboolean ves_icall_Type_get_IsGenericType (MonoReflectionType *type) { MonoClass *klass; MONO_ARCH_SAVE_REGS; if (!IS_MONOTYPE (type)) return FALSE; if (type->type->byref) return FALSE; klass = mono_class_from_mono_type (type->type); return klass->generic_class != NULL || klass->generic_container != NULL; } static gint32 ves_icall_Type_GetGenericParameterPosition (MonoReflectionType *type) { MONO_ARCH_SAVE_REGS; if (!IS_MONOTYPE (type)) return -1; if (is_generic_parameter (type->type)) return mono_type_get_generic_param_num (type->type); return -1; } static GenericParameterAttributes ves_icall_Type_GetGenericParameterAttributes (MonoReflectionType *type) { MONO_ARCH_SAVE_REGS; g_assert (IS_MONOTYPE (type)); g_assert (is_generic_parameter (type->type)); return mono_generic_param_info (type->type->data.generic_param)->flags; } static MonoArray * ves_icall_Type_GetGenericParameterConstraints (MonoReflectionType *type) { MonoGenericParamInfo *param_info; MonoDomain *domain; MonoClass **ptr; MonoArray *res; int i, count; MONO_ARCH_SAVE_REGS; g_assert (IS_MONOTYPE (type)); domain = mono_object_domain (type); param_info = mono_generic_param_info (type->type->data.generic_param); for (count = 0, ptr = param_info->constraints; ptr && *ptr; ptr++, count++) ; res = mono_array_new (domain, mono_defaults.monotype_class, count); for (i = 0; i < count; i++) mono_array_setref (res, i, mono_type_get_object (domain, &param_info->constraints [i]->byval_arg)); return res; } static MonoBoolean ves_icall_MonoType_get_IsGenericParameter (MonoReflectionType *type) { MONO_ARCH_SAVE_REGS; return is_generic_parameter (type->type); } static MonoBoolean ves_icall_TypeBuilder_get_IsGenericParameter (MonoReflectionTypeBuilder *tb) { MONO_ARCH_SAVE_REGS; return is_generic_parameter (tb->type.type); } static void ves_icall_EnumBuilder_setup_enum_type (MonoReflectionType *enumtype, MonoReflectionType *t) { enumtype->type = t->type; } static MonoReflectionMethod* ves_icall_MonoType_GetCorrespondingInflatedMethod (MonoReflectionType *type, MonoReflectionMethod* generic) { MonoDomain *domain; MonoClass *klass; MonoMethod *method; gpointer iter; MONO_ARCH_SAVE_REGS; domain = ((MonoObject *)type)->vtable->domain; klass = mono_class_from_mono_type (type->type); iter = NULL; while ((method = mono_class_get_methods (klass, &iter))) { if (method->token == generic->method->token) return mono_method_get_object (domain, method, klass); } return NULL; } static MonoReflectionMethod * ves_icall_MonoType_get_DeclaringMethod (MonoReflectionType *ref_type) { MonoMethod *method; MonoType *type = ref_type->type; MONO_ARCH_SAVE_REGS; if (type->byref || (type->type != MONO_TYPE_MVAR && type->type != MONO_TYPE_VAR)) mono_raise_exception (mono_get_exception_invalid_operation ("DeclaringMethod can only be used on generic arguments")); if (type->type == MONO_TYPE_VAR) return NULL; method = mono_type_get_generic_param_owner (type)->owner.method; g_assert (method); return mono_method_get_object (mono_object_domain (ref_type), method, method->klass); } static MonoReflectionDllImportAttribute* ves_icall_MonoMethod_GetDllImportAttribute (MonoMethod *method) { static MonoClass *DllImportAttributeClass = NULL; MonoDomain *domain = mono_domain_get (); MonoReflectionDllImportAttribute *attr; MonoImage *image = method->klass->image; MonoMethodPInvoke *piinfo = (MonoMethodPInvoke *)method; MonoTableInfo *tables = image->tables; MonoTableInfo *im = &tables [MONO_TABLE_IMPLMAP]; MonoTableInfo *mr = &tables [MONO_TABLE_MODULEREF]; guint32 im_cols [MONO_IMPLMAP_SIZE]; guint32 scope_token; const char *import = NULL; const char *scope = NULL; guint32 flags; if (!(method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) return NULL; if (!DllImportAttributeClass) { DllImportAttributeClass = mono_class_from_name (mono_defaults.corlib, "System.Runtime.InteropServices", "DllImportAttribute"); g_assert (DllImportAttributeClass); } if (method->klass->image->dynamic) { MonoReflectionMethodAux *method_aux = g_hash_table_lookup ( ((MonoDynamicImage*)method->klass->image)->method_aux_hash, method); if (method_aux) { import = method_aux->dllentry; scope = method_aux->dll; } if (!import || !scope) { mono_raise_exception (mono_get_exception_argument ("method", "System.Reflection.Emit method with invalid pinvoke information")); return NULL; } } else { if (piinfo->implmap_idx) { mono_metadata_decode_row (im, piinfo->implmap_idx - 1, im_cols, MONO_IMPLMAP_SIZE); piinfo->piflags = im_cols [MONO_IMPLMAP_FLAGS]; import = mono_metadata_string_heap (image, im_cols [MONO_IMPLMAP_NAME]); scope_token = mono_metadata_decode_row_col (mr, im_cols [MONO_IMPLMAP_SCOPE] - 1, MONO_MODULEREF_NAME); scope = mono_metadata_string_heap (image, scope_token); } } flags = piinfo->piflags; attr = (MonoReflectionDllImportAttribute*)mono_object_new (domain, DllImportAttributeClass); MONO_OBJECT_SETREF (attr, dll, mono_string_new (domain, scope)); MONO_OBJECT_SETREF (attr, entry_point, mono_string_new (domain, import)); attr->call_conv = (flags & 0x700) >> 8; attr->charset = ((flags & 0x6) >> 1) + 1; if (attr->charset == 1) attr->charset = 2; attr->exact_spelling = (flags & 0x1) != 0; attr->set_last_error = (flags & 0x40) != 0; attr->best_fit_mapping = (flags & 0x30) == 0x10; attr->throw_on_unmappable = (flags & 0x3000) == 0x1000; attr->preserve_sig = FALSE; return attr; } static MonoReflectionMethod * ves_icall_MonoMethod_GetGenericMethodDefinition (MonoReflectionMethod *method) { MonoMethodInflated *imethod; MonoMethod *result; MONO_ARCH_SAVE_REGS; if (method->method->is_generic) return method; if (!method->method->is_inflated) return NULL; imethod = (MonoMethodInflated *) method->method; result = imethod->declaring; /* Not a generic method. */ if (!result->is_generic) return NULL; if (method->method->klass->image->dynamic) { MonoDynamicImage *image = (MonoDynamicImage*)method->method->klass->image; MonoReflectionMethod *res; /* * FIXME: Why is this stuff needed at all ? Why can't the code below work for * the dynamic case as well ? */ mono_loader_lock (); res = mono_g_hash_table_lookup (image->generic_def_objects, imethod); mono_loader_unlock (); if (res) return res; } if (imethod->context.class_inst) { MonoClass *klass = ((MonoMethod *) imethod)->klass; /*Generic methods gets the context of the GTD.*/ if (mono_class_get_context (klass)) result = mono_class_inflate_generic_method_full (result, klass, mono_class_get_context (klass)); } return mono_method_get_object (mono_object_domain (method), result, NULL); } static gboolean ves_icall_MonoMethod_get_IsGenericMethod (MonoReflectionMethod *method) { MONO_ARCH_SAVE_REGS; return mono_method_signature (method->method)->generic_param_count != 0; } static gboolean ves_icall_MonoMethod_get_IsGenericMethodDefinition (MonoReflectionMethod *method) { MONO_ARCH_SAVE_REGS; return method->method->is_generic; } static MonoArray* ves_icall_MonoMethod_GetGenericArguments (MonoReflectionMethod *method) { MonoArray *res; MonoDomain *domain; int count, i; MONO_ARCH_SAVE_REGS; domain = mono_object_domain (method); if (method->method->is_inflated) { MonoGenericInst *inst = mono_method_get_context (method->method)->method_inst; if (inst) { count = inst->type_argc; res = mono_array_new (domain, mono_defaults.systemtype_class, count); for (i = 0; i < count; i++) mono_array_setref (res, i, mono_type_get_object (domain, inst->type_argv [i])); return res; } } count = mono_method_signature (method->method)->generic_param_count; res = mono_array_new (domain, mono_defaults.systemtype_class, count); for (i = 0; i < count; i++) { MonoGenericContainer *container = mono_method_get_generic_container (method->method); MonoGenericParam *param = mono_generic_container_get_param (container, i); MonoClass *pklass = mono_class_from_generic_parameter ( param, method->method->klass->image, TRUE); mono_array_setref (res, i, mono_type_get_object (domain, &pklass->byval_arg)); } return res; } static MonoObject * ves_icall_InternalInvoke (MonoReflectionMethod *method, MonoObject *this, MonoArray *params, MonoException **exc) { /* * Invoke from reflection is supposed to always be a virtual call (the API * is stupid), mono_runtime_invoke_*() calls the provided method, allowing * greater flexibility. */ MonoMethod *m = method->method; int pcount; void *obj = this; MONO_ARCH_SAVE_REGS; *exc = NULL; if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) mono_security_core_clr_ensure_reflection_access_method (m); if (!(m->flags & METHOD_ATTRIBUTE_STATIC)) { if (!mono_class_vtable_full (mono_object_domain (method), m->klass, FALSE)) { mono_gc_wbarrier_generic_store (exc, (MonoObject*) mono_class_get_exception_for_failure (m->klass)); return NULL; } if (this) { if (!mono_object_isinst (this, m->klass)) { mono_gc_wbarrier_generic_store (exc, (MonoObject*) mono_exception_from_name_msg (mono_defaults.corlib, "System.Reflection", "TargetException", "Object does not match target type.")); return NULL; } m = mono_object_get_virtual_method (this, m); /* must pass the pointer to the value for valuetype methods */ if (m->klass->valuetype) obj = mono_object_unbox (this); } else if (strcmp (m->name, ".ctor") && !m->wrapper_type) { mono_gc_wbarrier_generic_store (exc, (MonoObject*) mono_exception_from_name_msg (mono_defaults.corlib, "System.Reflection", "TargetException", "Non-static method requires a target.")); return NULL; } } pcount = params? mono_array_length (params): 0; if (pcount != mono_method_signature (m)->param_count) { mono_gc_wbarrier_generic_store (exc, (MonoObject*) mono_exception_from_name (mono_defaults.corlib, "System.Reflection", "TargetParameterCountException")); return NULL; } if ((m->klass->flags & TYPE_ATTRIBUTE_ABSTRACT) && !strcmp (m->name, ".ctor") && !this) { mono_gc_wbarrier_generic_store (exc, (MonoObject*) mono_exception_from_name_msg (mono_defaults.corlib, "System.Reflection", "TargetException", "Cannot invoke constructor of an abstract class.")); return NULL; } if (m->klass->image->assembly->ref_only) { mono_gc_wbarrier_generic_store (exc, (MonoObject*) mono_get_exception_invalid_operation ("It is illegal to invoke a method on a type loaded using the ReflectionOnly api.")); return NULL; } if (m->klass->rank && !strcmp (m->name, ".ctor")) { int i; mono_array_size_t *lengths; mono_array_size_t *lower_bounds; pcount = mono_array_length (params); lengths = alloca (sizeof (mono_array_size_t) * pcount); for (i = 0; i < pcount; ++i) lengths [i] = *(mono_array_size_t*) ((char*)mono_array_get (params, gpointer, i) + sizeof (MonoObject)); if (m->klass->rank == pcount) { /* Only lengths provided. */ lower_bounds = NULL; } else { g_assert (pcount == (m->klass->rank * 2)); /* lower bounds are first. */ lower_bounds = lengths; lengths += m->klass->rank; } return (MonoObject*)mono_array_new_full (mono_object_domain (params), m->klass, lengths, lower_bounds); } return mono_runtime_invoke_array (m, obj, params, NULL); } static MonoObject * ves_icall_InternalExecute (MonoReflectionMethod *method, MonoObject *this, MonoArray *params, MonoArray **outArgs) { MonoDomain *domain = mono_object_domain (method); MonoMethod *m = method->method; MonoMethodSignature *sig = mono_method_signature (m); MonoArray *out_args; MonoObject *result; int i, j, outarg_count = 0; MONO_ARCH_SAVE_REGS; if (m->klass == mono_defaults.object_class) { if (!strcmp (m->name, "FieldGetter")) { MonoClass *k = this->vtable->klass; MonoString *name; char *str; /* If this is a proxy, then it must be a CBO */ if (k == mono_defaults.transparent_proxy_class) { MonoTransparentProxy *tp = (MonoTransparentProxy*) this; this = tp->rp->unwrapped_server; g_assert (this); k = this->vtable->klass; } name = mono_array_get (params, MonoString *, 1); str = mono_string_to_utf8 (name); do { MonoClassField* field = mono_class_get_field_from_name (k, str); if (field) { MonoClass *field_klass = mono_class_from_mono_type (field->type); if (field_klass->valuetype) result = mono_value_box (domain, field_klass, (char *)this + field->offset); else result = *((gpointer *)((char *)this + field->offset)); out_args = mono_array_new (domain, mono_defaults.object_class, 1); mono_gc_wbarrier_generic_store (outArgs, (MonoObject*) out_args); mono_array_setref (out_args, 0, result); g_free (str); return NULL; } k = k->parent; } while (k); g_free (str); g_assert_not_reached (); } else if (!strcmp (m->name, "FieldSetter")) { MonoClass *k = this->vtable->klass; MonoString *name; guint32 size; gint32 align; char *str; /* If this is a proxy, then it must be a CBO */ if (k == mono_defaults.transparent_proxy_class) { MonoTransparentProxy *tp = (MonoTransparentProxy*) this; this = tp->rp->unwrapped_server; g_assert (this); k = this->vtable->klass; } name = mono_array_get (params, MonoString *, 1); str = mono_string_to_utf8 (name); do { MonoClassField* field = mono_class_get_field_from_name (k, str); if (field) { MonoClass *field_klass = mono_class_from_mono_type (field->type); MonoObject *val = mono_array_get (params, gpointer, 2); if (field_klass->valuetype) { size = mono_type_size (field->type, &align); #ifdef HAVE_SGEN_GC mono_gc_wbarrier_value_copy ((char *)this + field->offset, (char*)val + sizeof (MonoObject), 1, field_klass); #endif memcpy ((char *)this + field->offset, ((char *)val) + sizeof (MonoObject), size); } else { mono_gc_wbarrier_set_field (this, (char*)this + field->offset, val); } out_args = mono_array_new (domain, mono_defaults.object_class, 0); mono_gc_wbarrier_generic_store (outArgs, (MonoObject*) out_args); g_free (str); return NULL; } k = k->parent; } while (k); g_free (str); g_assert_not_reached (); } } for (i = 0; i < mono_array_length (params); i++) { if (sig->params [i]->byref) outarg_count++; } out_args = mono_array_new (domain, mono_defaults.object_class, outarg_count); /* handle constructors only for objects already allocated */ if (!strcmp (method->method->name, ".ctor")) g_assert (this); /* This can be called only on MBR objects, so no need to unbox for valuetypes. */ g_assert (!method->method->klass->valuetype); result = mono_runtime_invoke_array (method->method, this, params, NULL); for (i = 0, j = 0; i < mono_array_length (params); i++) { if (sig->params [i]->byref) { gpointer arg; arg = mono_array_get (params, gpointer, i); mono_array_setref (out_args, j, arg); j++; } } mono_gc_wbarrier_generic_store (outArgs, (MonoObject*) out_args); return result; } static guint64 read_enum_value (char *mem, int type) { switch (type) { case MONO_TYPE_U1: return *(guint8*)mem; case MONO_TYPE_I1: return *(gint8*)mem; case MONO_TYPE_U2: return *(guint16*)mem; case MONO_TYPE_I2: return *(gint16*)mem; case MONO_TYPE_U4: return *(guint32*)mem; case MONO_TYPE_I4: return *(gint32*)mem; case MONO_TYPE_U8: return *(guint64*)mem; case MONO_TYPE_I8: return *(gint64*)mem; default: g_assert_not_reached (); } return 0; } static void write_enum_value (char *mem, int type, guint64 value) { switch (type) { case MONO_TYPE_U1: case MONO_TYPE_I1: { guint8 *p = (guint8*)mem; *p = value; break; } case MONO_TYPE_U2: case MONO_TYPE_I2: { guint16 *p = (void*)mem; *p = value; break; } case MONO_TYPE_U4: case MONO_TYPE_I4: { guint32 *p = (void*)mem; *p = value; break; } case MONO_TYPE_U8: case MONO_TYPE_I8: { guint64 *p = (void*)mem; *p = value; break; } default: g_assert_not_reached (); } return; } static MonoObject * ves_icall_System_Enum_ToObject (MonoReflectionType *enumType, MonoObject *value) { MonoDomain *domain; MonoClass *enumc, *objc; MonoObject *res; MonoType *etype; guint64 val; MONO_ARCH_SAVE_REGS; MONO_CHECK_ARG_NULL (enumType); MONO_CHECK_ARG_NULL (value); domain = mono_object_domain (enumType); enumc = mono_class_from_mono_type (enumType->type); objc = value->vtable->klass; if (!enumc->enumtype) mono_raise_exception (mono_get_exception_argument ("enumType", "Type provided must be an Enum.")); if (!((objc->enumtype) || (objc->byval_arg.type >= MONO_TYPE_I1 && objc->byval_arg.type <= MONO_TYPE_U8))) mono_raise_exception (mono_get_exception_argument ("value", "The value passed in must be an enum base or an underlying type for an enum, such as an Int32.")); etype = mono_class_enum_basetype (enumc); if (!etype) /* MS throws this for typebuilders */ mono_raise_exception (mono_get_exception_argument ("Type must be a type provided by the runtime.", "enumType")); res = mono_object_new (domain, enumc); val = read_enum_value ((char *)value + sizeof (MonoObject), objc->enumtype? mono_class_enum_basetype (objc)->type: objc->byval_arg.type); write_enum_value ((char *)res + sizeof (MonoObject), etype->type, val); return res; } static MonoObject * ves_icall_System_Enum_get_value (MonoObject *this) { MonoObject *res; MonoClass *enumc; gpointer dst; gpointer src; int size; MONO_ARCH_SAVE_REGS; if (!this) return NULL; g_assert (this->vtable->klass->enumtype); enumc = mono_class_from_mono_type (mono_class_enum_basetype (this->vtable->klass)); res = mono_object_new (mono_object_domain (this), enumc); dst = (char *)res + sizeof (MonoObject); src = (char *)this + sizeof (MonoObject); size = mono_class_value_size (enumc, NULL); memcpy (dst, src, size); return res; } static MonoReflectionType * ves_icall_System_Enum_get_underlying_type (MonoReflectionType *type) { MonoType *etype; MONO_ARCH_SAVE_REGS; etype = mono_class_enum_basetype (mono_class_from_mono_type (type->type)); if (!etype) /* MS throws this for typebuilders */ mono_raise_exception (mono_get_exception_argument ("Type must be a type provided by the runtime.", "enumType")); return mono_type_get_object (mono_object_domain (type), etype); } static int ves_icall_System_Enum_compare_value_to (MonoObject *this, MonoObject *other) { gpointer tdata = (char *)this + sizeof (MonoObject); gpointer odata = (char *)other + sizeof (MonoObject); MonoType *basetype = mono_class_enum_basetype (this->vtable->klass); g_assert (basetype); #define COMPARE_ENUM_VALUES(ENUM_TYPE) do { \ ENUM_TYPE me = *((ENUM_TYPE*)tdata); \ ENUM_TYPE other = *((ENUM_TYPE*)odata); \ if (me == other) \ return 0; \ return me > other ? 1 : -1; \ } while (0) #define COMPARE_ENUM_VALUES_RANGE(ENUM_TYPE) do { \ ENUM_TYPE me = *((ENUM_TYPE*)tdata); \ ENUM_TYPE other = *((ENUM_TYPE*)odata); \ if (me == other) \ return 0; \ return me - other; \ } while (0) switch (basetype->type) { case MONO_TYPE_U1: COMPARE_ENUM_VALUES (guint8); case MONO_TYPE_I1: COMPARE_ENUM_VALUES (gint8); case MONO_TYPE_CHAR: case MONO_TYPE_U2: COMPARE_ENUM_VALUES_RANGE (guint16); case MONO_TYPE_I2: COMPARE_ENUM_VALUES (gint16); case MONO_TYPE_U4: COMPARE_ENUM_VALUES (guint32); case MONO_TYPE_I4: COMPARE_ENUM_VALUES (gint32); case MONO_TYPE_U8: COMPARE_ENUM_VALUES (guint64); case MONO_TYPE_I8: COMPARE_ENUM_VALUES (gint64); default: g_error ("Implement type 0x%02x in get_hashcode", basetype->type); } #undef COMPARE_ENUM_VALUES_RANGE #undef COMPARE_ENUM_VALUES return 0; } static int ves_icall_System_Enum_get_hashcode (MonoObject *this) { gpointer data = (char *)this + sizeof (MonoObject); MonoType *basetype = mono_class_enum_basetype (this->vtable->klass); g_assert (basetype); switch (basetype->type) { case MONO_TYPE_I1: return *((gint8*)data); case MONO_TYPE_U1: return *((guint8*)data); case MONO_TYPE_CHAR: case MONO_TYPE_U2: return *((guint16*)data); case MONO_TYPE_I2: return *((gint16*)data); case MONO_TYPE_U4: return *((guint32*)data); case MONO_TYPE_I4: return *((gint32*)data); case MONO_TYPE_U8: case MONO_TYPE_I8: { gint64 value = *((gint64*)data); return (gint)(value & 0xffffffff) ^ (int)(value >> 32); } default: g_error ("Implement type 0x%02x in get_hashcode", basetype->type); } return 0; } static void ves_icall_get_enum_info (MonoReflectionType *type, MonoEnumInfo *info) { MonoDomain *domain = mono_object_domain (type); MonoClass *enumc = mono_class_from_mono_type (type->type); guint j = 0, nvalues, crow; gpointer iter; MonoClassField *field; MONO_ARCH_SAVE_REGS; MONO_STRUCT_SETREF (info, utype, mono_type_get_object (domain, mono_class_enum_basetype (enumc))); nvalues = mono_class_num_fields (enumc) ? mono_class_num_fields (enumc) - 1 : 0; MONO_STRUCT_SETREF (info, names, mono_array_new (domain, mono_defaults.string_class, nvalues)); MONO_STRUCT_SETREF (info, values, mono_array_new (domain, enumc, nvalues)); crow = -1; iter = NULL; while ((field = mono_class_get_fields (enumc, &iter))) { const char *p; int len; MonoTypeEnum def_type; if (strcmp ("value__", mono_field_get_name (field)) == 0) continue; if (mono_field_is_deleted (field)) continue; mono_array_setref (info->names, j, mono_string_new (domain, mono_field_get_name (field))); p = mono_class_get_field_default_value (field, &def_type); len = mono_metadata_decode_blob_size (p, &p); switch (mono_class_enum_basetype (enumc)->type) { case MONO_TYPE_U1: case MONO_TYPE_I1: mono_array_set (info->values, gchar, j, *p); break; case MONO_TYPE_CHAR: case MONO_TYPE_U2: case MONO_TYPE_I2: mono_array_set (info->values, gint16, j, read16 (p)); break; case MONO_TYPE_U4: case MONO_TYPE_I4: mono_array_set (info->values, gint32, j, read32 (p)); break; case MONO_TYPE_U8: case MONO_TYPE_I8: mono_array_set (info->values, gint64, j, read64 (p)); break; default: g_error ("Implement type 0x%02x in get_enum_info", mono_class_enum_basetype (enumc)->type); } ++j; } } enum { BFLAGS_IgnoreCase = 1, BFLAGS_DeclaredOnly = 2, BFLAGS_Instance = 4, BFLAGS_Static = 8, BFLAGS_Public = 0x10, BFLAGS_NonPublic = 0x20, BFLAGS_FlattenHierarchy = 0x40, BFLAGS_InvokeMethod = 0x100, BFLAGS_CreateInstance = 0x200, BFLAGS_GetField = 0x400, BFLAGS_SetField = 0x800, BFLAGS_GetProperty = 0x1000, BFLAGS_SetProperty = 0x2000, BFLAGS_ExactBinding = 0x10000, BFLAGS_SuppressChangeType = 0x20000, BFLAGS_OptionalParamBinding = 0x40000 }; static MonoReflectionField * ves_icall_Type_GetField (MonoReflectionType *type, MonoString *name, guint32 bflags) { MonoDomain *domain; MonoClass *startklass, *klass; int match; MonoClassField *field; gpointer iter; char *utf8_name; int (*compare_func) (const char *s1, const char *s2) = NULL; domain = ((MonoObject *)type)->vtable->domain; klass = startklass = mono_class_from_mono_type (type->type); MONO_ARCH_SAVE_REGS; if (!name) mono_raise_exception (mono_get_exception_argument_null ("name")); if (type->type->byref) return NULL; compare_func = (bflags & BFLAGS_IgnoreCase) ? mono_utf8_strcasecmp : strcmp; handle_parent: if (klass->exception_type != MONO_EXCEPTION_NONE) mono_raise_exception (mono_class_get_exception_for_failure (klass)); iter = NULL; while ((field = mono_class_get_fields (klass, &iter))) { match = 0; if (field->type == NULL) continue; if (mono_field_is_deleted (field)) continue; if ((field->type->attrs & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK) == FIELD_ATTRIBUTE_PUBLIC) { if (bflags & BFLAGS_Public) match++; } else if ((klass == startklass) || (field->type->attrs & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK) != FIELD_ATTRIBUTE_PRIVATE) { if (bflags & BFLAGS_NonPublic) { match++; } } if (!match) continue; match = 0; if (field->type->attrs & FIELD_ATTRIBUTE_STATIC) { if (bflags & BFLAGS_Static) if ((bflags & BFLAGS_FlattenHierarchy) || (klass == startklass)) match++; } else { if (bflags & BFLAGS_Instance) match++; } if (!match) continue; utf8_name = mono_string_to_utf8 (name); if (compare_func (mono_field_get_name (field), utf8_name)) { g_free (utf8_name); continue; } g_free (utf8_name); return mono_field_get_object (domain, klass, field); } if (!(bflags & BFLAGS_DeclaredOnly) && (klass = klass->parent)) goto handle_parent; return NULL; } static MonoArray* ves_icall_Type_GetFields_internal (MonoReflectionType *type, guint32 bflags, MonoReflectionType *reftype) { MonoDomain *domain; MonoClass *startklass, *klass, *refklass; MonoArray *res; MonoObject *member; int i, match; gpointer iter; MonoClassField *field; MonoPtrArray tmp_array; MONO_ARCH_SAVE_REGS; domain = ((MonoObject *)type)->vtable->domain; if (type->type->byref) return mono_array_new (domain, mono_defaults.field_info_class, 0); klass = startklass = mono_class_from_mono_type (type->type); refklass = mono_class_from_mono_type (reftype->type); mono_ptr_array_init (tmp_array, 2); handle_parent: if (klass->exception_type != MONO_EXCEPTION_NONE) mono_raise_exception (mono_class_get_exception_for_failure (klass)); iter = NULL; while ((field = mono_class_get_fields (klass, &iter))) { match = 0; if (mono_field_is_deleted (field)) continue; if ((field->type->attrs & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK) == FIELD_ATTRIBUTE_PUBLIC) { if (bflags & BFLAGS_Public) match++; } else if ((klass == startklass) || (field->type->attrs & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK) != FIELD_ATTRIBUTE_PRIVATE) { if (bflags & BFLAGS_NonPublic) { match++; } } if (!match) continue; match = 0; if (field->type->attrs & FIELD_ATTRIBUTE_STATIC) { if (bflags & BFLAGS_Static) if ((bflags & BFLAGS_FlattenHierarchy) || (klass == startklass)) match++; } else { if (bflags & BFLAGS_Instance) match++; } if (!match) continue; member = (MonoObject*)mono_field_get_object (domain, refklass, field); mono_ptr_array_append (tmp_array, member); } if (!(bflags & BFLAGS_DeclaredOnly) && (klass = klass->parent)) goto handle_parent; res = mono_array_new_cached (domain, mono_defaults.field_info_class, mono_ptr_array_size (tmp_array)); for (i = 0; i < mono_ptr_array_size (tmp_array); ++i) mono_array_setref (res, i, mono_ptr_array_get (tmp_array, i)); mono_ptr_array_destroy (tmp_array); return res; } static gboolean method_nonpublic (MonoMethod* method, gboolean start_klass) { switch (method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) { case METHOD_ATTRIBUTE_ASSEM: return (start_klass || mono_defaults.generic_ilist_class); case METHOD_ATTRIBUTE_PRIVATE: return start_klass; case METHOD_ATTRIBUTE_PUBLIC: return FALSE; default: return TRUE; } } static MonoArray* ves_icall_Type_GetMethodsByName (MonoReflectionType *type, MonoString *name, guint32 bflags, MonoBoolean ignore_case, MonoReflectionType *reftype) { static MonoClass *MethodInfo_array; MonoDomain *domain; MonoClass *startklass, *klass, *refklass; MonoArray *res; MonoMethod *method; gpointer iter; MonoObject *member; int i, len, match, nslots; /*FIXME, use MonoBitSet*/ guint32 method_slots_default [8]; guint32 *method_slots = NULL; gchar *mname = NULL; int (*compare_func) (const char *s1, const char *s2) = NULL; MonoVTable *array_vtable; MonoException *ex; MonoPtrArray tmp_array; MONO_ARCH_SAVE_REGS; mono_ptr_array_init (tmp_array, 4); if (!MethodInfo_array) { MonoClass *klass = mono_array_class_get (mono_defaults.method_info_class, 1); mono_memory_barrier (); MethodInfo_array = klass; } domain = ((MonoObject *)type)->vtable->domain; array_vtable = mono_class_vtable_full (domain, MethodInfo_array, TRUE); if (type->type->byref) return mono_array_new_specific (array_vtable, 0); klass = startklass = mono_class_from_mono_type (type->type); refklass = mono_class_from_mono_type (reftype->type); len = 0; if (name != NULL) { mname = mono_string_to_utf8 (name); compare_func = (ignore_case) ? mono_utf8_strcasecmp : strcmp; } /* An optimization for calls made from Delegate:CreateDelegate () */ if (klass->delegate && mname && !strcmp (mname, "Invoke") && (bflags == (BFLAGS_Public | BFLAGS_Static | BFLAGS_Instance))) { method = mono_get_delegate_invoke (klass); if (mono_loader_get_last_error ()) goto loader_error; member = (MonoObject*)mono_method_get_object (domain, method, refklass); res = mono_array_new_specific (array_vtable, 1); mono_array_setref (res, 0, member); g_free (mname); return res; } mono_class_setup_vtable (klass); if (klass->exception_type != MONO_EXCEPTION_NONE || mono_loader_get_last_error ()) goto loader_error; if (is_generic_parameter (type->type)) nslots = mono_class_get_vtable_size (klass->parent); else nslots = MONO_CLASS_IS_INTERFACE (klass) ? mono_class_num_methods (klass) : mono_class_get_vtable_size (klass); if (nslots >= sizeof (method_slots_default) * 8) { method_slots = g_new0 (guint32, nslots / 32 + 1); } else { method_slots = method_slots_default; memset (method_slots, 0, sizeof (method_slots_default)); } handle_parent: mono_class_setup_vtable (klass); if (klass->exception_type != MONO_EXCEPTION_NONE || mono_loader_get_last_error ()) goto loader_error; iter = NULL; while ((method = mono_class_get_methods (klass, &iter))) { match = 0; if (method->slot != -1) { g_assert (method->slot < nslots); if (method_slots [method->slot >> 5] & (1 << (method->slot & 0x1f))) continue; if (!(method->flags & METHOD_ATTRIBUTE_NEW_SLOT)) method_slots [method->slot >> 5] |= 1 << (method->slot & 0x1f); } if (method->name [0] == '.' && (strcmp (method->name, ".ctor") == 0 || strcmp (method->name, ".cctor") == 0)) continue; if ((method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC) { if (bflags & BFLAGS_Public) match++; } else if ((bflags & BFLAGS_NonPublic) && method_nonpublic (method, (klass == startklass))) { match++; } if (!match) continue; match = 0; if (method->flags & METHOD_ATTRIBUTE_STATIC) { if (bflags & BFLAGS_Static) if ((bflags & BFLAGS_FlattenHierarchy) || (klass == startklass)) match++; } else { if (bflags & BFLAGS_Instance) match++; } if (!match) continue; if (name != NULL) { if (compare_func (mname, method->name)) continue; } match = 0; member = (MonoObject*)mono_method_get_object (domain, method, refklass); mono_ptr_array_append (tmp_array, member); } if (!(bflags & BFLAGS_DeclaredOnly) && (klass = klass->parent)) goto handle_parent; g_free (mname); if (method_slots != method_slots_default) g_free (method_slots); res = mono_array_new_specific (array_vtable, mono_ptr_array_size (tmp_array)); for (i = 0; i < mono_ptr_array_size (tmp_array); ++i) mono_array_setref (res, i, mono_ptr_array_get (tmp_array, i)); mono_ptr_array_destroy (tmp_array); return res; loader_error: g_free (mname); if (method_slots != method_slots_default) g_free (method_slots); mono_ptr_array_destroy (tmp_array); if (klass->exception_type != MONO_EXCEPTION_NONE) { ex = mono_class_get_exception_for_failure (klass); } else { ex = mono_loader_error_prepare_exception (mono_loader_get_last_error ()); mono_loader_clear_error (); } mono_raise_exception (ex); return NULL; } static MonoArray* ves_icall_Type_GetConstructors_internal (MonoReflectionType *type, guint32 bflags, MonoReflectionType *reftype) { MonoDomain *domain; static MonoClass *System_Reflection_ConstructorInfo; MonoClass *startklass, *klass, *refklass; MonoArray *res; MonoMethod *method; MonoObject *member; int i, match; gpointer iter = NULL; MonoPtrArray tmp_array; MONO_ARCH_SAVE_REGS; mono_ptr_array_init (tmp_array, 4); /*FIXME, guestimating*/ domain = ((MonoObject *)type)->vtable->domain; if (type->type->byref) return mono_array_new_cached (domain, mono_defaults.method_info_class, 0); klass = startklass = mono_class_from_mono_type (type->type); refklass = mono_class_from_mono_type (reftype->type); if (klass->exception_type != MONO_EXCEPTION_NONE) mono_raise_exception (mono_class_get_exception_for_failure (klass)); if (!System_Reflection_ConstructorInfo) System_Reflection_ConstructorInfo = mono_class_from_name ( mono_defaults.corlib, "System.Reflection", "ConstructorInfo"); iter = NULL; while ((method = mono_class_get_methods (klass, &iter))) { match = 0; if (strcmp (method->name, ".ctor") && strcmp (method->name, ".cctor")) continue; if ((method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC) { if (bflags & BFLAGS_Public) match++; } else { if (bflags & BFLAGS_NonPublic) match++; } if (!match) continue; match = 0; if (method->flags & METHOD_ATTRIBUTE_STATIC) { if (bflags & BFLAGS_Static) if ((bflags & BFLAGS_FlattenHierarchy) || (klass == startklass)) match++; } else { if (bflags & BFLAGS_Instance) match++; } if (!match) continue; member = (MonoObject*)mono_method_get_object (domain, method, refklass); mono_ptr_array_append (tmp_array, member); } res = mono_array_new_cached (domain, System_Reflection_ConstructorInfo, mono_ptr_array_size (tmp_array)); for (i = 0; i < mono_ptr_array_size (tmp_array); ++i) mono_array_setref (res, i, mono_ptr_array_get (tmp_array, i)); mono_ptr_array_destroy (tmp_array); return res; } static guint property_hash (gconstpointer data) { MonoProperty *prop = (MonoProperty*)data; return g_str_hash (prop->name); } static gboolean property_equal (MonoProperty *prop1, MonoProperty *prop2) { // Properties are hide-by-name-and-signature if (!g_str_equal (prop1->name, prop2->name)) return FALSE; if (prop1->get && prop2->get && !mono_metadata_signature_equal (mono_method_signature (prop1->get), mono_method_signature (prop2->get))) return FALSE; if (prop1->set && prop2->set && !mono_metadata_signature_equal (mono_method_signature (prop1->set), mono_method_signature (prop2->set))) return FALSE; return TRUE; } static gboolean property_accessor_nonpublic (MonoMethod* accessor, gboolean start_klass) { if (!accessor) return FALSE; return method_nonpublic (accessor, start_klass); } static MonoArray* ves_icall_Type_GetPropertiesByName (MonoReflectionType *type, MonoString *name, guint32 bflags, MonoBoolean ignore_case, MonoReflectionType *reftype) { MonoDomain *domain; static MonoClass *System_Reflection_PropertyInfo; MonoClass *startklass, *klass; MonoArray *res; MonoMethod *method; MonoProperty *prop; int i, match; guint32 flags; gchar *propname = NULL; int (*compare_func) (const char *s1, const char *s2) = NULL; gpointer iter; GHashTable *properties; MonoPtrArray tmp_array; MONO_ARCH_SAVE_REGS; mono_ptr_array_init (tmp_array, 8); /*This the average for ASP.NET types*/ if (!System_Reflection_PropertyInfo) System_Reflection_PropertyInfo = mono_class_from_name ( mono_defaults.corlib, "System.Reflection", "PropertyInfo"); domain = ((MonoObject *)type)->vtable->domain; if (type->type->byref) return mono_array_new_cached (domain, System_Reflection_PropertyInfo, 0); klass = startklass = mono_class_from_mono_type (type->type); if (name != NULL) { propname = mono_string_to_utf8 (name); compare_func = (ignore_case) ? mono_utf8_strcasecmp : strcmp; } mono_class_setup_vtable (klass); properties = g_hash_table_new (property_hash, (GEqualFunc)property_equal); handle_parent: mono_class_setup_vtable (klass); if (klass->exception_type != MONO_EXCEPTION_NONE) { g_hash_table_destroy (properties); if (name != NULL) g_free (propname); mono_raise_exception (mono_class_get_exception_for_failure (klass)); } iter = NULL; while ((prop = mono_class_get_properties (klass, &iter))) { match = 0; method = prop->get; if (!method) method = prop->set; if (method) flags = method->flags; else flags = 0; if ((prop->get && ((prop->get->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC)) || (prop->set && ((prop->set->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC))) { if (bflags & BFLAGS_Public) match++; } else if (bflags & BFLAGS_NonPublic) { if (property_accessor_nonpublic(prop->get, startklass == klass) || property_accessor_nonpublic(prop->set, startklass == klass)) { match++; } } if (!match) continue; match = 0; if (flags & METHOD_ATTRIBUTE_STATIC) { if (bflags & BFLAGS_Static) if ((bflags & BFLAGS_FlattenHierarchy) || (klass == startklass)) match++; } else { if (bflags & BFLAGS_Instance) match++; } if (!match) continue; match = 0; if (name != NULL) { if (compare_func (propname, prop->name)) continue; } if (g_hash_table_lookup (properties, prop)) continue; mono_ptr_array_append (tmp_array, mono_property_get_object (domain, startklass, prop)); g_hash_table_insert (properties, prop, prop); } if ((!(bflags & BFLAGS_DeclaredOnly) && (klass = klass->parent))) goto handle_parent; g_hash_table_destroy (properties); g_free (propname); res = mono_array_new_cached (domain, System_Reflection_PropertyInfo, mono_ptr_array_size (tmp_array)); for (i = 0; i < mono_ptr_array_size (tmp_array); ++i) mono_array_setref (res, i, mono_ptr_array_get (tmp_array, i)); mono_ptr_array_destroy (tmp_array); return res; } static MonoReflectionEvent * ves_icall_MonoType_GetEvent (MonoReflectionType *type, MonoString *name, guint32 bflags) { MonoDomain *domain; MonoClass *klass, *startklass; gpointer iter; MonoEvent *event; MonoMethod *method; gchar *event_name; int (*compare_func) (const char *s1, const char *s2) = NULL; MONO_ARCH_SAVE_REGS; event_name = mono_string_to_utf8 (name); if (type->type->byref) return NULL; klass = startklass = mono_class_from_mono_type (type->type); domain = mono_object_domain (type); compare_func = (bflags & BFLAGS_IgnoreCase) ? mono_utf8_strcasecmp : strcmp; handle_parent: if (klass->exception_type != MONO_EXCEPTION_NONE) mono_raise_exception (mono_class_get_exception_for_failure (klass)); iter = NULL; while ((event = mono_class_get_events (klass, &iter))) { if (compare_func (event->name, event_name)) continue; method = event->add; if (!method) method = event->remove; if (!method) method = event->raise; if (method) { if ((method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC) { if (!(bflags & BFLAGS_Public)) continue; } else { if (!(bflags & BFLAGS_NonPublic)) continue; if ((klass != startklass) && (method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PRIVATE) continue; } if (method->flags & METHOD_ATTRIBUTE_STATIC) { if (!(bflags & BFLAGS_Static)) continue; if (!(bflags & BFLAGS_FlattenHierarchy) && (klass != startklass)) continue; } else { if (!(bflags & BFLAGS_Instance)) continue; } } else if (!(bflags & BFLAGS_NonPublic)) continue; g_free (event_name); return mono_event_get_object (domain, startklass, event); } if (!(bflags & BFLAGS_DeclaredOnly) && (klass = klass->parent)) goto handle_parent; g_free (event_name); return NULL; } static MonoArray* ves_icall_Type_GetEvents_internal (MonoReflectionType *type, guint32 bflags, MonoReflectionType *reftype) { MonoDomain *domain; static MonoClass *System_Reflection_EventInfo; MonoClass *startklass, *klass; MonoArray *res; MonoMethod *method; MonoEvent *event; int i, match; gpointer iter; MonoPtrArray tmp_array; MONO_ARCH_SAVE_REGS; mono_ptr_array_init (tmp_array, 4); if (!System_Reflection_EventInfo) System_Reflection_EventInfo = mono_class_from_name ( mono_defaults.corlib, "System.Reflection", "EventInfo"); domain = mono_object_domain (type); if (type->type->byref) return mono_array_new_cached (domain, System_Reflection_EventInfo, 0); klass = startklass = mono_class_from_mono_type (type->type); handle_parent: if (klass->exception_type != MONO_EXCEPTION_NONE) mono_raise_exception (mono_class_get_exception_for_failure (klass)); iter = NULL; while ((event = mono_class_get_events (klass, &iter))) { match = 0; method = event->add; if (!method) method = event->remove; if (!method) method = event->raise; if (method) { if ((method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC) { if (bflags & BFLAGS_Public) match++; } else if ((klass == startklass) || (method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) != METHOD_ATTRIBUTE_PRIVATE) { if (bflags & BFLAGS_NonPublic) match++; } } else if (bflags & BFLAGS_NonPublic) match ++; if (!match) continue; match = 0; if (method) { if (method->flags & METHOD_ATTRIBUTE_STATIC) { if (bflags & BFLAGS_Static) if ((bflags & BFLAGS_FlattenHierarchy) || (klass == startklass)) match++; } else { if (bflags & BFLAGS_Instance) match++; } } else if (bflags & BFLAGS_Instance) match ++; if (!match) continue; mono_ptr_array_append (tmp_array, mono_event_get_object (domain, startklass, event)); } if (!(bflags & BFLAGS_DeclaredOnly) && (klass = klass->parent)) goto handle_parent; res = mono_array_new_cached (domain, System_Reflection_EventInfo, mono_ptr_array_size (tmp_array)); for (i = 0; i < mono_ptr_array_size (tmp_array); ++i) mono_array_setref (res, i, mono_ptr_array_get (tmp_array, i)); mono_ptr_array_destroy (tmp_array); return res; } static MonoReflectionType * ves_icall_Type_GetNestedType (MonoReflectionType *type, MonoString *name, guint32 bflags) { MonoDomain *domain; MonoClass *klass; MonoClass *nested; char *str; gpointer iter; MONO_ARCH_SAVE_REGS; if (name == NULL) mono_raise_exception (mono_get_exception_argument_null ("name")); domain = ((MonoObject *)type)->vtable->domain; if (type->type->byref) return NULL; klass = mono_class_from_mono_type (type->type); str = mono_string_to_utf8 (name); handle_parent: if (klass->exception_type != MONO_EXCEPTION_NONE) mono_raise_exception (mono_class_get_exception_for_failure (klass)); /* * If a nested type is generic, return its generic type definition. * Note that this means that the return value is essentially a * nested type of the generic type definition of @klass. * * A note in MSDN claims that a generic type definition can have * nested types that aren't generic. In any case, the container of that * nested type would be the generic type definition. */ if (klass->generic_class) klass = klass->generic_class->container_class; iter = NULL; while ((nested = mono_class_get_nested_types (klass, &iter))) { int match = 0; if ((nested->flags & TYPE_ATTRIBUTE_VISIBILITY_MASK) == TYPE_ATTRIBUTE_NESTED_PUBLIC) { if (bflags & BFLAGS_Public) match++; } else { if (bflags & BFLAGS_NonPublic) match++; } if (!match) continue; if (strcmp (nested->name, str) == 0){ g_free (str); return mono_type_get_object (domain, &nested->byval_arg); } } if (!(bflags & BFLAGS_DeclaredOnly) && (klass = klass->parent)) goto handle_parent; g_free (str); return NULL; } static MonoArray* ves_icall_Type_GetNestedTypes (MonoReflectionType *type, guint32 bflags) { MonoDomain *domain; MonoClass *klass; MonoArray *res; MonoObject *member; int i, match; MonoClass *nested; gpointer iter; MonoPtrArray tmp_array; MONO_ARCH_SAVE_REGS; domain = ((MonoObject *)type)->vtable->domain; if (type->type->byref) return mono_array_new (domain, mono_defaults.monotype_class, 0); klass = mono_class_from_mono_type (type->type); if (klass->exception_type != MONO_EXCEPTION_NONE) mono_raise_exception (mono_class_get_exception_for_failure (klass)); /* * If a nested type is generic, return its generic type definition. * Note that this means that the return value is essentially the set * of nested types of the generic type definition of @klass. * * A note in MSDN claims that a generic type definition can have * nested types that aren't generic. In any case, the container of that * nested type would be the generic type definition. */ if (klass->generic_class) klass = klass->generic_class->container_class; mono_ptr_array_init (tmp_array, 1); iter = NULL; while ((nested = mono_class_get_nested_types (klass, &iter))) { match = 0; if ((nested->flags & TYPE_ATTRIBUTE_VISIBILITY_MASK) == TYPE_ATTRIBUTE_NESTED_PUBLIC) { if (bflags & BFLAGS_Public) match++; } else { if (bflags & BFLAGS_NonPublic) match++; } if (!match) continue; member = (MonoObject*)mono_type_get_object (domain, &nested->byval_arg); mono_ptr_array_append (tmp_array, member); } res = mono_array_new_cached (domain, mono_defaults.monotype_class, mono_ptr_array_size (tmp_array)); for (i = 0; i < mono_ptr_array_size (tmp_array); ++i) mono_array_setref (res, i, mono_ptr_array_get (tmp_array, i)); mono_ptr_array_destroy (tmp_array); return res; } static MonoReflectionType* ves_icall_System_Reflection_Assembly_InternalGetType (MonoReflectionAssembly *assembly, MonoReflectionModule *module, MonoString *name, MonoBoolean throwOnError, MonoBoolean ignoreCase) { gchar *str; MonoType *type = NULL; MonoTypeNameParse info; gboolean type_resolve; MONO_ARCH_SAVE_REGS; /* On MS.NET, this does not fire a TypeResolve event */ type_resolve = TRUE; str = mono_string_to_utf8 (name); /*g_print ("requested type %s in %s\n", str, assembly->assembly->aname.name);*/ if (!mono_reflection_parse_type (str, &info)) { g_free (str); mono_reflection_free_type_info (&info); if (throwOnError) /* uhm: this is a parse error, though... */ mono_raise_exception (mono_get_exception_type_load (name, NULL)); /*g_print ("failed parse\n");*/ return NULL; } if (info.assembly.name) { g_free (str); mono_reflection_free_type_info (&info); if (throwOnError) { /* 1.0 and 2.0 throw different exceptions */ if (mono_defaults.generic_ilist_class) mono_raise_exception (mono_get_exception_argument (NULL, "Type names passed to Assembly.GetType() must not specify an assembly.")); else mono_raise_exception (mono_get_exception_type_load (name, NULL)); } return NULL; } if (module != NULL) { if (module->image) type = mono_reflection_get_type (module->image, &info, ignoreCase, &type_resolve); else type = NULL; } else if (assembly->assembly->dynamic) { /* Enumerate all modules */ MonoReflectionAssemblyBuilder *abuilder = (MonoReflectionAssemblyBuilder*)assembly; int i; type = NULL; if (abuilder->modules) { for (i = 0; i < mono_array_length (abuilder->modules); ++i) { MonoReflectionModuleBuilder *mb = mono_array_get (abuilder->modules, MonoReflectionModuleBuilder*, i); type = mono_reflection_get_type (&mb->dynamic_image->image, &info, ignoreCase, &type_resolve); if (type) break; } } if (!type && abuilder->loaded_modules) { for (i = 0; i < mono_array_length (abuilder->loaded_modules); ++i) { MonoReflectionModule *mod = mono_array_get (abuilder->loaded_modules, MonoReflectionModule*, i); type = mono_reflection_get_type (mod->image, &info, ignoreCase, &type_resolve); if (type) break; } } } else type = mono_reflection_get_type (assembly->assembly->image, &info, ignoreCase, &type_resolve); g_free (str); mono_reflection_free_type_info (&info); if (!type) { MonoException *e = NULL; if (throwOnError) e = mono_get_exception_type_load (name, NULL); if (mono_loader_get_last_error () && mono_defaults.generic_ilist_class) e = mono_loader_error_prepare_exception (mono_loader_get_last_error ()); mono_loader_clear_error (); if (e != NULL) mono_raise_exception (e); return NULL; } if (type->type == MONO_TYPE_CLASS) { MonoClass *klass = mono_type_get_class (type); if (mono_is_security_manager_active () && !klass->exception_type) /* Some security problems are detected during generic vtable construction */ mono_class_setup_vtable (klass); /* need to report exceptions ? */ if (throwOnError && klass->exception_type) { /* report SecurityException (or others) that occured when loading the assembly */ MonoException *exc = mono_class_get_exception_for_failure (klass); mono_loader_clear_error (); mono_raise_exception (exc); } else if (klass->exception_type == MONO_EXCEPTION_SECURITY_INHERITANCEDEMAND) { return NULL; } } /* g_print ("got it\n"); */ return mono_type_get_object (mono_object_domain (assembly), type); } static gboolean replace_shadow_path (MonoDomain *domain, gchar *dirname, gchar **filename) { gchar *content; gchar *shadow_ini_file; gsize len; /* Check for shadow-copied assembly */ if (mono_is_shadow_copy_enabled (domain, dirname)) { shadow_ini_file = g_build_filename (dirname, "__AssemblyInfo__.ini", NULL); content = NULL; if (!g_file_get_contents (shadow_ini_file, &content, &len, NULL) || !g_file_test (content, G_FILE_TEST_IS_REGULAR)) { if (content) { g_free (content); content = NULL; } } g_free (shadow_ini_file); if (content != NULL) { if (*filename) g_free (*filename); *filename = content; return TRUE; } } return FALSE; } static MonoString * ves_icall_System_Reflection_Assembly_get_code_base (MonoReflectionAssembly *assembly, MonoBoolean escaped) { MonoDomain *domain = mono_object_domain (assembly); MonoAssembly *mass = assembly->assembly; MonoString *res = NULL; gchar *uri; gchar *absolute; gchar *dirname; MONO_ARCH_SAVE_REGS; if (g_path_is_absolute (mass->image->name)) { absolute = g_strdup (mass->image->name); dirname = g_path_get_dirname (absolute); } else { absolute = g_build_filename (mass->basedir, mass->image->name, NULL); dirname = g_strdup (mass->basedir); } replace_shadow_path (domain, dirname, &absolute); g_free (dirname); #if PLATFORM_WIN32 { gint i; for (i = strlen (absolute) - 1; i >= 0; i--) if (absolute [i] == '\\') absolute [i] = '/'; } #endif if (escaped) { uri = g_filename_to_uri (absolute, NULL, NULL); } else { const char *prepend = "file://"; #if PLATFORM_WIN32 if (*absolute == '/' && *(absolute + 1) == '/') { prepend = "file:"; } else { prepend = "file:///"; } #endif uri = g_strconcat (prepend, absolute, NULL); } if (uri) { res = mono_string_new (domain, uri); g_free (uri); } g_free (absolute); return res; } static MonoBoolean ves_icall_System_Reflection_Assembly_get_global_assembly_cache (MonoReflectionAssembly *assembly) { MonoAssembly *mass = assembly->assembly; MONO_ARCH_SAVE_REGS; return mass->in_gac; } static MonoReflectionAssembly* ves_icall_System_Reflection_Assembly_load_with_partial_name (MonoString *mname, MonoObject *evidence) { gchar *name; MonoAssembly *res; MonoImageOpenStatus status; MONO_ARCH_SAVE_REGS; name = mono_string_to_utf8 (mname); res = mono_assembly_load_with_partial_name (name, &status); g_free (name); if (res == NULL) return NULL; return mono_assembly_get_object (mono_domain_get (), res); } static MonoString * ves_icall_System_Reflection_Assembly_get_location (MonoReflectionAssembly *assembly) { MonoDomain *domain = mono_object_domain (assembly); MonoString *res; MONO_ARCH_SAVE_REGS; res = mono_string_new (domain, mono_image_get_filename (assembly->assembly->image)); return res; } static MonoBoolean ves_icall_System_Reflection_Assembly_get_ReflectionOnly (MonoReflectionAssembly *assembly) { MONO_ARCH_SAVE_REGS; return assembly->assembly->ref_only; } static MonoString * ves_icall_System_Reflection_Assembly_InternalImageRuntimeVersion (MonoReflectionAssembly *assembly) { MonoDomain *domain = mono_object_domain (assembly); MONO_ARCH_SAVE_REGS; return mono_string_new (domain, assembly->assembly->image->version); } static MonoReflectionMethod* ves_icall_System_Reflection_Assembly_get_EntryPoint (MonoReflectionAssembly *assembly) { guint32 token = mono_image_get_entry_point (assembly->assembly->image); MONO_ARCH_SAVE_REGS; if (!token) return NULL; return mono_method_get_object (mono_object_domain (assembly), mono_get_method (assembly->assembly->image, token, NULL), NULL); } static MonoReflectionModule* ves_icall_System_Reflection_Assembly_GetManifestModuleInternal (MonoReflectionAssembly *assembly) { return mono_module_get_object (mono_object_domain (assembly), assembly->assembly->image); } static MonoArray* ves_icall_System_Reflection_Assembly_GetManifestResourceNames (MonoReflectionAssembly *assembly) { MonoTableInfo *table = &assembly->assembly->image->tables [MONO_TABLE_MANIFESTRESOURCE]; MonoArray *result = mono_array_new (mono_object_domain (assembly), mono_defaults.string_class, table->rows); int i; const char *val; MONO_ARCH_SAVE_REGS; for (i = 0; i < table->rows; ++i) { val = mono_metadata_string_heap (assembly->assembly->image, mono_metadata_decode_row_col (table, i, MONO_MANIFEST_NAME)); mono_array_setref (result, i, mono_string_new (mono_object_domain (assembly), val)); } return result; } static MonoObject* create_version (MonoDomain *domain, guint32 major, guint32 minor, guint32 build, guint32 revision) { static MonoClass *System_Version = NULL; static MonoMethod *create_version = NULL; MonoObject *result; gpointer args [4]; if (!System_Version) { System_Version = mono_class_from_name (mono_defaults.corlib, "System", "Version"); g_assert (System_Version); } if (!create_version) { MonoMethodDesc *desc = mono_method_desc_new (":.ctor(int,int,int,int)", FALSE); create_version = mono_method_desc_search_in_class (desc, System_Version); g_assert (create_version); mono_method_desc_free (desc); } args [0] = &major; args [1] = &minor; args [2] = &build; args [3] = &revision; result = mono_object_new (domain, System_Version); mono_runtime_invoke (create_version, result, args, NULL); return result; } static MonoArray* ves_icall_System_Reflection_Assembly_GetReferencedAssemblies (MonoReflectionAssembly *assembly) { static MonoClass *System_Reflection_AssemblyName; MonoArray *result; MonoDomain *domain = mono_object_domain (assembly); int i, count = 0; static MonoMethod *create_culture = NULL; MonoImage *image = assembly->assembly->image; MonoTableInfo *t; MONO_ARCH_SAVE_REGS; if (!System_Reflection_AssemblyName) System_Reflection_AssemblyName = mono_class_from_name ( mono_defaults.corlib, "System.Reflection", "AssemblyName"); t = &assembly->assembly->image->tables [MONO_TABLE_ASSEMBLYREF]; count = t->rows; result = mono_array_new (domain, System_Reflection_AssemblyName, count); if (count > 0 && !create_culture) { MonoMethodDesc *desc = mono_method_desc_new ( "System.Globalization.CultureInfo:CreateCulture(string,bool)", TRUE); create_culture = mono_method_desc_search_in_image (desc, mono_defaults.corlib); g_assert (create_culture); mono_method_desc_free (desc); } for (i = 0; i < count; i++) { MonoReflectionAssemblyName *aname; guint32 cols [MONO_ASSEMBLYREF_SIZE]; mono_metadata_decode_row (t, i, cols, MONO_ASSEMBLYREF_SIZE); aname = (MonoReflectionAssemblyName *) mono_object_new ( domain, System_Reflection_AssemblyName); MONO_OBJECT_SETREF (aname, name, mono_string_new (domain, mono_metadata_string_heap (image, cols [MONO_ASSEMBLYREF_NAME]))); aname->major = cols [MONO_ASSEMBLYREF_MAJOR_VERSION]; aname->minor = cols [MONO_ASSEMBLYREF_MINOR_VERSION]; aname->build = cols [MONO_ASSEMBLYREF_BUILD_NUMBER]; aname->revision = cols [MONO_ASSEMBLYREF_REV_NUMBER]; aname->flags = cols [MONO_ASSEMBLYREF_FLAGS]; aname->versioncompat = 1; /* SameMachine (default) */ aname->hashalg = ASSEMBLY_HASH_SHA1; /* SHA1 (default) */ MONO_OBJECT_SETREF (aname, version, create_version (domain, aname->major, aname->minor, aname->build, aname->revision)); if (create_culture) { gpointer args [2]; MonoBoolean assembly_ref = 1; args [0] = mono_string_new (domain, mono_metadata_string_heap (image, cols [MONO_ASSEMBLYREF_CULTURE])); args [1] = &assembly_ref; MONO_OBJECT_SETREF (aname, cultureInfo, mono_runtime_invoke (create_culture, NULL, args, NULL)); } if (cols [MONO_ASSEMBLYREF_PUBLIC_KEY]) { const gchar *pkey_ptr = mono_metadata_blob_heap (image, cols [MONO_ASSEMBLYREF_PUBLIC_KEY]); guint32 pkey_len = mono_metadata_decode_blob_size (pkey_ptr, &pkey_ptr); if ((cols [MONO_ASSEMBLYREF_FLAGS] & ASSEMBLYREF_FULL_PUBLIC_KEY_FLAG)) { /* public key token isn't copied - the class library will automatically generate it from the public key if required */ MONO_OBJECT_SETREF (aname, publicKey, mono_array_new (domain, mono_defaults.byte_class, pkey_len)); memcpy (mono_array_addr (aname->publicKey, guint8, 0), pkey_ptr, pkey_len); } else { MONO_OBJECT_SETREF (aname, keyToken, mono_array_new (domain, mono_defaults.byte_class, pkey_len)); memcpy (mono_array_addr (aname->keyToken, guint8, 0), pkey_ptr, pkey_len); } } else { MONO_OBJECT_SETREF (aname, keyToken, mono_array_new (domain, mono_defaults.byte_class, 0)); } /* note: this function doesn't return the codebase on purpose (i.e. it can be used under partial trust as path information isn't present). */ mono_array_setref (result, i, aname); } return result; } typedef struct { MonoArray *res; int idx; } NameSpaceInfo; static void foreach_namespace (const char* key, gconstpointer val, NameSpaceInfo *info) { MonoString *name = mono_string_new (mono_object_domain (info->res), key); mono_array_setref (info->res, info->idx, name); info->idx++; } static MonoArray* ves_icall_System_Reflection_Assembly_GetNamespaces (MonoReflectionAssembly *assembly) { MonoImage *img = assembly->assembly->image; MonoArray *res; NameSpaceInfo info; int len; MONO_ARCH_SAVE_REGS; mono_image_lock (img); mono_image_init_name_cache (img); RETRY_LEN: len = g_hash_table_size (img->name_cache); mono_image_unlock (img); /*we can't create objects holding the image lock */ res = mono_array_new (mono_object_domain (assembly), mono_defaults.string_class, len); mono_image_lock (img); /*len might have changed, create a new array*/ if (len != g_hash_table_size (img->name_cache)) goto RETRY_LEN; info.res = res; info.idx = 0; g_hash_table_foreach (img->name_cache, (GHFunc)foreach_namespace, &info); mono_image_unlock (img); return res; } /* move this in some file in mono/util/ */ static char * g_concat_dir_and_file (const char *dir, const char *file) { g_return_val_if_fail (dir != NULL, NULL); g_return_val_if_fail (file != NULL, NULL); /* * If the directory name doesn't have a / on the end, we need * to add one so we get a proper path to the file */ if (dir [strlen(dir) - 1] != G_DIR_SEPARATOR) return g_strconcat (dir, G_DIR_SEPARATOR_S, file, NULL); else return g_strconcat (dir, file, NULL); } static void * ves_icall_System_Reflection_Assembly_GetManifestResourceInternal (MonoReflectionAssembly *assembly, MonoString *name, gint32 *size, MonoReflectionModule **ref_module) { char *n = mono_string_to_utf8 (name); MonoTableInfo *table = &assembly->assembly->image->tables [MONO_TABLE_MANIFESTRESOURCE]; guint32 i; guint32 cols [MONO_MANIFEST_SIZE]; guint32 impl, file_idx; const char *val; MonoImage *module; MONO_ARCH_SAVE_REGS; for (i = 0; i < table->rows; ++i) { mono_metadata_decode_row (table, i, cols, MONO_MANIFEST_SIZE); val = mono_metadata_string_heap (assembly->assembly->image, cols [MONO_MANIFEST_NAME]); if (strcmp (val, n) == 0) break; } g_free (n); if (i == table->rows) return NULL; /* FIXME */ impl = cols [MONO_MANIFEST_IMPLEMENTATION]; if (impl) { /* * this code should only be called after obtaining the * ResourceInfo and handling the other cases. */ g_assert ((impl & MONO_IMPLEMENTATION_MASK) == MONO_IMPLEMENTATION_FILE); file_idx = impl >> MONO_IMPLEMENTATION_BITS; module = mono_image_load_file_for_image (assembly->assembly->image, file_idx); if (!module) return NULL; } else module = assembly->assembly->image; mono_gc_wbarrier_generic_store (ref_module, (MonoObject*) mono_module_get_object (mono_domain_get (), module)); return (void*)mono_image_get_resource (module, cols [MONO_MANIFEST_OFFSET], (guint32*)size); } static gboolean ves_icall_System_Reflection_Assembly_GetManifestResourceInfoInternal (MonoReflectionAssembly *assembly, MonoString *name, MonoManifestResourceInfo *info) { MonoTableInfo *table = &assembly->assembly->image->tables [MONO_TABLE_MANIFESTRESOURCE]; int i; guint32 cols [MONO_MANIFEST_SIZE]; guint32 file_cols [MONO_FILE_SIZE]; const char *val; char *n; MONO_ARCH_SAVE_REGS; n = mono_string_to_utf8 (name); for (i = 0; i < table->rows; ++i) { mono_metadata_decode_row (table, i, cols, MONO_MANIFEST_SIZE); val = mono_metadata_string_heap (assembly->assembly->image, cols [MONO_MANIFEST_NAME]); if (strcmp (val, n) == 0) break; } g_free (n); if (i == table->rows) return FALSE; if (!cols [MONO_MANIFEST_IMPLEMENTATION]) { info->location = RESOURCE_LOCATION_EMBEDDED | RESOURCE_LOCATION_IN_MANIFEST; } else { switch (cols [MONO_MANIFEST_IMPLEMENTATION] & MONO_IMPLEMENTATION_MASK) { case MONO_IMPLEMENTATION_FILE: i = cols [MONO_MANIFEST_IMPLEMENTATION] >> MONO_IMPLEMENTATION_BITS; table = &assembly->assembly->image->tables [MONO_TABLE_FILE]; mono_metadata_decode_row (table, i - 1, file_cols, MONO_FILE_SIZE); val = mono_metadata_string_heap (assembly->assembly->image, file_cols [MONO_FILE_NAME]); MONO_OBJECT_SETREF (info, filename, mono_string_new (mono_object_domain (assembly), val)); if (file_cols [MONO_FILE_FLAGS] && FILE_CONTAINS_NO_METADATA) info->location = 0; else info->location = RESOURCE_LOCATION_EMBEDDED; break; case MONO_IMPLEMENTATION_ASSEMBLYREF: i = cols [MONO_MANIFEST_IMPLEMENTATION] >> MONO_IMPLEMENTATION_BITS; mono_assembly_load_reference (assembly->assembly->image, i - 1); if (assembly->assembly->image->references [i - 1] == (gpointer)-1) { char *msg = g_strdup_printf ("Assembly %d referenced from assembly %s not found ", i - 1, assembly->assembly->image->name); MonoException *ex = mono_get_exception_file_not_found2 (msg, NULL); g_free (msg); mono_raise_exception (ex); } MONO_OBJECT_SETREF (info, assembly, mono_assembly_get_object (mono_domain_get (), assembly->assembly->image->references [i - 1])); /* Obtain info recursively */ ves_icall_System_Reflection_Assembly_GetManifestResourceInfoInternal (info->assembly, name, info); info->location |= RESOURCE_LOCATION_ANOTHER_ASSEMBLY; break; case MONO_IMPLEMENTATION_EXP_TYPE: g_assert_not_reached (); break; } } return TRUE; } static MonoObject* ves_icall_System_Reflection_Assembly_GetFilesInternal (MonoReflectionAssembly *assembly, MonoString *name, MonoBoolean resource_modules) { MonoTableInfo *table = &assembly->assembly->image->tables [MONO_TABLE_FILE]; MonoArray *result = NULL; int i, count; const char *val; char *n; MONO_ARCH_SAVE_REGS; /* check hash if needed */ if (name) { n = mono_string_to_utf8 (name); for (i = 0; i < table->rows; ++i) { val = mono_metadata_string_heap (assembly->assembly->image, mono_metadata_decode_row_col (table, i, MONO_FILE_NAME)); if (strcmp (val, n) == 0) { MonoString *fn; g_free (n); n = g_concat_dir_and_file (assembly->assembly->basedir, val); fn = mono_string_new (mono_object_domain (assembly), n); g_free (n); return (MonoObject*)fn; } } g_free (n); return NULL; } count = 0; for (i = 0; i < table->rows; ++i) { if (resource_modules || !(mono_metadata_decode_row_col (table, i, MONO_FILE_FLAGS) & FILE_CONTAINS_NO_METADATA)) count ++; } result = mono_array_new (mono_object_domain (assembly), mono_defaults.string_class, count); count = 0; for (i = 0; i < table->rows; ++i) { if (resource_modules || !(mono_metadata_decode_row_col (table, i, MONO_FILE_FLAGS) & FILE_CONTAINS_NO_METADATA)) { val = mono_metadata_string_heap (assembly->assembly->image, mono_metadata_decode_row_col (table, i, MONO_FILE_NAME)); n = g_concat_dir_and_file (assembly->assembly->basedir, val); mono_array_setref (result, count, mono_string_new (mono_object_domain (assembly), n)); g_free (n); count ++; } } return (MonoObject*)result; } static MonoArray* ves_icall_System_Reflection_Assembly_GetModulesInternal (MonoReflectionAssembly *assembly) { MonoDomain *domain = mono_domain_get(); MonoArray *res; MonoClass *klass; int i, j, file_count = 0; MonoImage **modules; guint32 module_count, real_module_count; MonoTableInfo *table; guint32 cols [MONO_FILE_SIZE]; MonoImage *image = assembly->assembly->image; g_assert (image != NULL); g_assert (!assembly->assembly->dynamic); table = &image->tables [MONO_TABLE_FILE]; file_count = table->rows; modules = image->modules; module_count = image->module_count; real_module_count = 0; for (i = 0; i < module_count; ++i) if (modules [i]) real_module_count ++; klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "Module"); res = mono_array_new (domain, klass, 1 + real_module_count + file_count); mono_array_setref (res, 0, mono_module_get_object (domain, image)); j = 1; for (i = 0; i < module_count; ++i) if (modules [i]) { mono_array_setref (res, j, mono_module_get_object (domain, modules[i])); ++j; } for (i = 0; i < file_count; ++i, ++j) { mono_metadata_decode_row (table, i, cols, MONO_FILE_SIZE); if (cols [MONO_FILE_FLAGS] && FILE_CONTAINS_NO_METADATA) mono_array_setref (res, j, mono_module_file_get_object (domain, image, i)); else { MonoImage *m = mono_image_load_file_for_image (image, i + 1); if (!m) { MonoString *fname = mono_string_new (mono_domain_get (), mono_metadata_string_heap (image, cols [MONO_FILE_NAME])); mono_raise_exception (mono_get_exception_file_not_found2 (NULL, fname)); } mono_array_setref (res, j, mono_module_get_object (domain, m)); } } return res; } static MonoReflectionMethod* ves_icall_GetCurrentMethod (void) { MonoMethod *m = mono_method_get_last_managed (); MONO_ARCH_SAVE_REGS; return mono_method_get_object (mono_domain_get (), m, NULL); } static MonoMethod* mono_method_get_equivalent_method (MonoMethod *method, MonoClass *klass) { int offset = -1, i; if (method->is_inflated && ((MonoMethodInflated*)method)->context.method_inst) { MonoMethodInflated *inflated = (MonoMethodInflated*)method; //method is inflated, we should inflate it on the other class MonoGenericContext ctx; ctx.method_inst = inflated->context.method_inst; ctx.class_inst = inflated->context.class_inst; if (klass->generic_class) ctx.class_inst = klass->generic_class->context.class_inst; else if (klass->generic_container) ctx.class_inst = klass->generic_container->context.class_inst; return mono_class_inflate_generic_method_full (inflated->declaring, klass, &ctx); } mono_class_setup_methods (method->klass); if (method->klass->exception_type) return NULL; for (i = 0; i < method->klass->method.count; ++i) { if (method->klass->methods [i] == method) { offset = i; break; } } mono_class_setup_methods (klass); if (klass->exception_type) return NULL; g_assert (offset >= 0 && offset < klass->method.count); return klass->methods [offset]; } static MonoReflectionMethod* ves_icall_System_Reflection_MethodBase_GetMethodFromHandleInternalType (MonoMethod *method, MonoType *type) { MonoClass *klass; if (type) { klass = mono_class_from_mono_type (type); if (mono_class_get_generic_type_definition (method->klass) != mono_class_get_generic_type_definition (klass)) return NULL; if (method->klass != klass) { method = mono_method_get_equivalent_method (method, klass); if (!method) return NULL; } } else klass = method->klass; return mono_method_get_object (mono_domain_get (), method, klass); } static MonoReflectionMethod* ves_icall_System_Reflection_MethodBase_GetMethodFromHandleInternal (MonoMethod *method) { return mono_method_get_object (mono_domain_get (), method, NULL); } static MonoReflectionMethodBody* ves_icall_System_Reflection_MethodBase_GetMethodBodyInternal (MonoMethod *method) { return mono_method_body_get_object (mono_domain_get (), method); } static MonoReflectionAssembly* ves_icall_System_Reflection_Assembly_GetExecutingAssembly (void) { MonoMethod *dest = NULL; MONO_ARCH_SAVE_REGS; mono_stack_walk_no_il (get_executing, &dest); return mono_assembly_get_object (mono_domain_get (), dest->klass->image->assembly); } static MonoReflectionAssembly* ves_icall_System_Reflection_Assembly_GetEntryAssembly (void) { MonoDomain* domain = mono_domain_get (); MONO_ARCH_SAVE_REGS; if (!domain->entry_assembly) return NULL; return mono_assembly_get_object (domain, domain->entry_assembly); } static MonoReflectionAssembly* ves_icall_System_Reflection_Assembly_GetCallingAssembly (void) { MonoMethod *m; MonoMethod *dest; MONO_ARCH_SAVE_REGS; dest = NULL; mono_stack_walk_no_il (get_executing, &dest); m = dest; mono_stack_walk_no_il (get_caller, &dest); if (!dest) dest = m; return mono_assembly_get_object (mono_domain_get (), dest->klass->image->assembly); } static MonoString * ves_icall_System_MonoType_getFullName (MonoReflectionType *object, gboolean full_name, gboolean assembly_qualified) { MonoDomain *domain = mono_object_domain (object); MonoTypeNameFormat format; MonoString *res; gchar *name; MONO_ARCH_SAVE_REGS; if (full_name) format = assembly_qualified ? MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED : MONO_TYPE_NAME_FORMAT_FULL_NAME; else format = MONO_TYPE_NAME_FORMAT_REFLECTION; name = mono_type_get_name_full (object->type, format); if (!name) return NULL; if (full_name && (object->type->type == MONO_TYPE_VAR || object->type->type == MONO_TYPE_MVAR)) { g_free (name); return NULL; } res = mono_string_new (domain, name); g_free (name); return res; } static void fill_reflection_assembly_name (MonoDomain *domain, MonoReflectionAssemblyName *aname, MonoAssemblyName *name, const char *absolute, gboolean by_default_version, gboolean default_publickey, gboolean default_token) { static MonoMethod *create_culture = NULL; gpointer args [2]; guint32 pkey_len; const char *pkey_ptr; gchar *codebase; MonoBoolean assembly_ref = 0; MONO_ARCH_SAVE_REGS; MONO_OBJECT_SETREF (aname, name, mono_string_new (domain, name->name)); aname->major = name->major; aname->minor = name->minor; aname->build = name->build; aname->flags = name->flags; aname->revision = name->revision; aname->hashalg = name->hash_alg; aname->versioncompat = 1; /* SameMachine (default) */ if (by_default_version) MONO_OBJECT_SETREF (aname, version, create_version (domain, name->major, name->minor, name->build, name->revision)); codebase = NULL; if (absolute != NULL && *absolute != '\0') { const gchar *prepend = "file://"; gchar *result; codebase = g_strdup (absolute); #if PLATFORM_WIN32 { gint i; for (i = strlen (codebase) - 1; i >= 0; i--) if (codebase [i] == '\\') codebase [i] = '/'; if (*codebase == '/' && *(codebase + 1) == '/') { prepend = "file:"; } else { prepend = "file:///"; } } #endif result = g_strconcat (prepend, codebase, NULL); g_free (codebase); codebase = result; } if (codebase) { MONO_OBJECT_SETREF (aname, codebase, mono_string_new (domain, codebase)); g_free (codebase); } if (!create_culture) { MonoMethodDesc *desc = mono_method_desc_new ("System.Globalization.CultureInfo:CreateCulture(string,bool)", TRUE); create_culture = mono_method_desc_search_in_image (desc, mono_defaults.corlib); g_assert (create_culture); mono_method_desc_free (desc); } if (name->culture) { args [0] = mono_string_new (domain, name->culture); args [1] = &assembly_ref; MONO_OBJECT_SETREF (aname, cultureInfo, mono_runtime_invoke (create_culture, NULL, args, NULL)); } if (name->public_key) { pkey_ptr = (char*)name->public_key; pkey_len = mono_metadata_decode_blob_size (pkey_ptr, &pkey_ptr); MONO_OBJECT_SETREF (aname, publicKey, mono_array_new (domain, mono_defaults.byte_class, pkey_len)); memcpy (mono_array_addr (aname->publicKey, guint8, 0), pkey_ptr, pkey_len); aname->flags |= ASSEMBLYREF_FULL_PUBLIC_KEY_FLAG; } else if (default_publickey) { MONO_OBJECT_SETREF (aname, publicKey, mono_array_new (domain, mono_defaults.byte_class, 0)); aname->flags |= ASSEMBLYREF_FULL_PUBLIC_KEY_FLAG; } /* MonoAssemblyName keeps the public key token as an hexadecimal string */ if (name->public_key_token [0]) { int i, j; char *p; MONO_OBJECT_SETREF (aname, keyToken, mono_array_new (domain, mono_defaults.byte_class, 8)); p = mono_array_addr (aname->keyToken, char, 0); for (i = 0, j = 0; i < 8; i++) { *p = g_ascii_xdigit_value (name->public_key_token [j++]) << 4; *p |= g_ascii_xdigit_value (name->public_key_token [j++]); p++; } } else if (default_token) { MONO_OBJECT_SETREF (aname, keyToken, mono_array_new (domain, mono_defaults.byte_class, 0)); } } static MonoString * ves_icall_System_Reflection_Assembly_get_fullName (MonoReflectionAssembly *assembly) { MonoDomain *domain = mono_object_domain (assembly); MonoAssembly *mass = assembly->assembly; MonoString *res; gchar *name; name = g_strdup_printf ( "%s, Version=%d.%d.%d.%d, Culture=%s, PublicKeyToken=%s%s", mass->aname.name, mass->aname.major, mass->aname.minor, mass->aname.build, mass->aname.revision, mass->aname.culture && *mass->aname.culture? mass->aname.culture: "neutral", mass->aname.public_key_token [0] ? (char *)mass->aname.public_key_token : "null", (mass->aname.flags & ASSEMBLYREF_RETARGETABLE_FLAG) ? ", Retargetable=Yes" : ""); res = mono_string_new (domain, name); g_free (name); return res; } static void ves_icall_System_Reflection_Assembly_FillName (MonoReflectionAssembly *assembly, MonoReflectionAssemblyName *aname) { gchar *absolute; MonoAssembly *mass = assembly->assembly; MONO_ARCH_SAVE_REGS; if (g_path_is_absolute (mass->image->name)) { fill_reflection_assembly_name (mono_object_domain (assembly), aname, &mass->aname, mass->image->name, TRUE, TRUE, mono_framework_version () >= 2); return; } absolute = g_build_filename (mass->basedir, mass->image->name, NULL); fill_reflection_assembly_name (mono_object_domain (assembly), aname, &mass->aname, absolute, TRUE, TRUE, mono_framework_version () >= 2); g_free (absolute); } static void ves_icall_System_Reflection_Assembly_InternalGetAssemblyName (MonoString *fname, MonoReflectionAssemblyName *aname) { char *filename; MonoImageOpenStatus status = MONO_IMAGE_OK; gboolean res; MonoImage *image; MonoAssemblyName name; char *dirname MONO_ARCH_SAVE_REGS; filename = mono_string_to_utf8 (fname); dirname = g_path_get_dirname (filename); replace_shadow_path (mono_domain_get (), dirname, &filename); g_free (dirname); image = mono_image_open (filename, &status); if (!image){ MonoException *exc; g_free (filename); if (status == MONO_IMAGE_IMAGE_INVALID) exc = mono_get_exception_bad_image_format2 (NULL, fname); else exc = mono_get_exception_file_not_found2 (NULL, fname); mono_raise_exception (exc); } res = mono_assembly_fill_assembly_name (image, &name); if (!res) { mono_image_close (image); g_free (filename); mono_raise_exception (mono_get_exception_argument ("assemblyFile", "The file does not contain a manifest")); } fill_reflection_assembly_name (mono_domain_get (), aname, &name, filename, TRUE, mono_framework_version () == 1, mono_framework_version () >= 2); g_free (filename); mono_image_close (image); } static MonoBoolean ves_icall_System_Reflection_Assembly_LoadPermissions (MonoReflectionAssembly *assembly, char **minimum, guint32 *minLength, char **optional, guint32 *optLength, char **refused, guint32 *refLength) { MonoBoolean result = FALSE; MonoDeclSecurityEntry entry; /* SecurityAction.RequestMinimum */ if (mono_declsec_get_assembly_action (assembly->assembly, SECURITY_ACTION_REQMIN, &entry)) { *minimum = entry.blob; *minLength = entry.size; result = TRUE; } /* SecurityAction.RequestOptional */ if (mono_declsec_get_assembly_action (assembly->assembly, SECURITY_ACTION_REQOPT, &entry)) { *optional = entry.blob; *optLength = entry.size; result = TRUE; } /* SecurityAction.RequestRefuse */ if (mono_declsec_get_assembly_action (assembly->assembly, SECURITY_ACTION_REQREFUSE, &entry)) { *refused = entry.blob; *refLength = entry.size; result = TRUE; } return result; } static MonoArray* mono_module_get_types (MonoDomain *domain, MonoImage *image, MonoArray **exceptions, MonoBoolean exportedOnly) { MonoArray *res; MonoClass *klass; MonoTableInfo *tdef = &image->tables [MONO_TABLE_TYPEDEF]; int i, count; guint32 attrs, visibility; /* we start the count from 1 because we skip the special type <Module> */ if (exportedOnly) { count = 0; for (i = 1; i < tdef->rows; ++i) { attrs = mono_metadata_decode_row_col (tdef, i, MONO_TYPEDEF_FLAGS); visibility = attrs & TYPE_ATTRIBUTE_VISIBILITY_MASK; if (visibility == TYPE_ATTRIBUTE_PUBLIC || visibility == TYPE_ATTRIBUTE_NESTED_PUBLIC) count++; } } else { count = tdef->rows - 1; } res = mono_array_new (domain, mono_defaults.monotype_class, count); *exceptions = mono_array_new (domain, mono_defaults.exception_class, count); count = 0; for (i = 1; i < tdef->rows; ++i) { attrs = mono_metadata_decode_row_col (tdef, i, MONO_TYPEDEF_FLAGS); visibility = attrs & TYPE_ATTRIBUTE_VISIBILITY_MASK; if (!exportedOnly || (visibility == TYPE_ATTRIBUTE_PUBLIC || visibility == TYPE_ATTRIBUTE_NESTED_PUBLIC)) { klass = mono_class_get (image, (i + 1) | MONO_TOKEN_TYPE_DEF); if (klass) { mono_array_setref (res, count, mono_type_get_object (domain, &klass->byval_arg)); } else { MonoLoaderError *error; MonoException *ex; error = mono_loader_get_last_error (); g_assert (error != NULL); ex = mono_loader_error_prepare_exception (error); mono_array_setref (*exceptions, count, ex); } if (mono_loader_get_last_error ()) mono_loader_clear_error (); count++; } } return res; } static MonoArray* ves_icall_System_Reflection_Assembly_GetTypes (MonoReflectionAssembly *assembly, MonoBoolean exportedOnly) { MonoArray *res = NULL; MonoArray *exceptions = NULL; MonoImage *image = NULL; MonoTableInfo *table = NULL; MonoDomain *domain; GList *list = NULL; int i, len, ex_count; MONO_ARCH_SAVE_REGS; domain = mono_object_domain (assembly); g_assert (!assembly->assembly->dynamic); image = assembly->assembly->image; table = &image->tables [MONO_TABLE_FILE]; res = mono_module_get_types (domain, image, &exceptions, exportedOnly); /* Append data from all modules in the assembly */ for (i = 0; i < table->rows; ++i) { if (!(mono_metadata_decode_row_col (table, i, MONO_FILE_FLAGS) & FILE_CONTAINS_NO_METADATA)) { MonoImage *loaded_image = mono_assembly_load_module (image->assembly, i + 1); if (loaded_image) { MonoArray *ex2; MonoArray *res2 = mono_module_get_types (domain, loaded_image, &ex2, exportedOnly); /* Append the new types to the end of the array */ if (mono_array_length (res2) > 0) { guint32 len1, len2; MonoArray *res3, *ex3; len1 = mono_array_length (res); len2 = mono_array_length (res2); res3 = mono_array_new (domain, mono_defaults.monotype_class, len1 + len2); mono_array_memcpy_refs (res3, 0, res, 0, len1); mono_array_memcpy_refs (res3, len1, res2, 0, len2); res = res3; ex3 = mono_array_new (domain, mono_defaults.monotype_class, len1 + len2); mono_array_memcpy_refs (ex3, 0, exceptions, 0, len1); mono_array_memcpy_refs (ex3, len1, ex2, 0, len2); exceptions = ex3; } } } } /* the ReflectionTypeLoadException must have all the types (Types property), * NULL replacing types which throws an exception. The LoaderException must * contain all exceptions for NULL items. */ len = mono_array_length (res); ex_count = 0; for (i = 0; i < len; i++) { MonoReflectionType *t = mono_array_get (res, gpointer, i); MonoClass *klass; if (t) { klass = mono_type_get_class (t->type); if ((klass != NULL) && klass->exception_type) { /* keep the class in the list */ list = g_list_append (list, klass); /* and replace Type with NULL */ mono_array_setref (res, i, NULL); } } else { ex_count ++; } } if (list || ex_count) { GList *tmp = NULL; MonoException *exc = NULL; MonoArray *exl = NULL; int j, length = g_list_length (list) + ex_count; mono_loader_clear_error (); exl = mono_array_new (domain, mono_defaults.exception_class, length); /* Types for which mono_class_get () succeeded */ for (i = 0, tmp = list; tmp; i++, tmp = tmp->next) { MonoException *exc = mono_class_get_exception_for_failure (tmp->data); mono_array_setref (exl, i, exc); } /* Types for which it don't */ for (j = 0; j < mono_array_length (exceptions); ++j) { MonoException *exc = mono_array_get (exceptions, MonoException*, j); if (exc) { g_assert (i < length); mono_array_setref (exl, i, exc); i ++; } } g_list_free (list); list = NULL; exc = mono_get_exception_reflection_type_load (res, exl); mono_loader_clear_error (); mono_raise_exception (exc); } return res; } static gboolean ves_icall_System_Reflection_AssemblyName_ParseName (MonoReflectionAssemblyName *name, MonoString *assname) { MonoAssemblyName aname; MonoDomain *domain = mono_object_domain (name); char *val; gboolean is_version_defined; gboolean is_token_defined; aname.public_key = NULL; val = mono_string_to_utf8 (assname); if (!mono_assembly_name_parse_full (val, &aname, TRUE, &is_version_defined, &is_token_defined)) { g_free ((guint8*) aname.public_key); g_free (val); return FALSE; } fill_reflection_assembly_name (domain, name, &aname, "", is_version_defined, FALSE, is_token_defined); mono_assembly_name_free (&aname); g_free ((guint8*) aname.public_key); g_free (val); return TRUE; } static MonoReflectionType* ves_icall_System_Reflection_Module_GetGlobalType (MonoReflectionModule *module) { MonoDomain *domain = mono_object_domain (module); MonoClass *klass; MONO_ARCH_SAVE_REGS; g_assert (module->image); if (module->image->dynamic && ((MonoDynamicImage*)(module->image))->initial_image) /* These images do not have a global type */ return NULL; klass = mono_class_get (module->image, 1 | MONO_TOKEN_TYPE_DEF); return mono_type_get_object (domain, &klass->byval_arg); } static void ves_icall_System_Reflection_Module_Close (MonoReflectionModule *module) { /*if (module->image) mono_image_close (module->image);*/ } static MonoString* ves_icall_System_Reflection_Module_GetGuidInternal (MonoReflectionModule *module) { MonoDomain *domain = mono_object_domain (module); MONO_ARCH_SAVE_REGS; g_assert (module->image); return mono_string_new (domain, module->image->guid); } static gpointer ves_icall_System_Reflection_Module_GetHINSTANCE (MonoReflectionModule *module) { #ifdef PLATFORM_WIN32 if (module->image && module->image->is_module_handle) return module->image->raw_data; #endif return (gpointer) (-1); } static void ves_icall_System_Reflection_Module_GetPEKind (MonoImage *image, gint32 *pe_kind, gint32 *machine) { if (image->dynamic) { MonoDynamicImage *dyn = (MonoDynamicImage*)image; *pe_kind = dyn->pe_kind; *machine = dyn->machine; } else { *pe_kind = ((MonoCLIImageInfo*)(image->image_info))->cli_cli_header.ch_flags & 0x3; *machine = ((MonoCLIImageInfo*)(image->image_info))->cli_header.coff.coff_machine; } } static gint32 ves_icall_System_Reflection_Module_GetMDStreamVersion (MonoImage *image) { return (image->md_version_major << 16) | (image->md_version_minor); } static MonoArray* ves_icall_System_Reflection_Module_InternalGetTypes (MonoReflectionModule *module) { MonoArray *exceptions; int i; MONO_ARCH_SAVE_REGS; if (!module->image) return mono_array_new (mono_object_domain (module), mono_defaults.monotype_class, 0); else { MonoArray *res = mono_module_get_types (mono_object_domain (module), module->image, &exceptions, FALSE); for (i = 0; i < mono_array_length (exceptions); ++i) { MonoException *ex = mono_array_get (exceptions, MonoException *, i); if (ex) mono_raise_exception (ex); } return res; } } static gboolean mono_metadata_memberref_is_method (MonoImage *image, guint32 token) { guint32 cols [MONO_MEMBERREF_SIZE]; const char *sig; mono_metadata_decode_row (&image->tables [MONO_TABLE_MEMBERREF], mono_metadata_token_index (token) - 1, cols, MONO_MEMBERREF_SIZE); sig = mono_metadata_blob_heap (image, cols [MONO_MEMBERREF_SIGNATURE]); mono_metadata_decode_blob_size (sig, &sig); return (*sig != 0x6); } static void init_generic_context_from_args (MonoGenericContext *context, MonoArray *type_args, MonoArray *method_args) { if (type_args) context->class_inst = mono_metadata_get_generic_inst (mono_array_length (type_args), mono_array_addr (type_args, MonoType*, 0)); else context->class_inst = NULL; if (method_args) context->method_inst = mono_metadata_get_generic_inst (mono_array_length (method_args), mono_array_addr (method_args, MonoType*, 0)); else context->method_inst = NULL; } static MonoType* ves_icall_System_Reflection_Module_ResolveTypeToken (MonoImage *image, guint32 token, MonoArray *type_args, MonoArray *method_args, MonoResolveTokenError *error) { MonoClass *klass; int table = mono_metadata_token_table (token); int index = mono_metadata_token_index (token); MonoGenericContext context; *error = ResolveTokenError_Other; /* Validate token */ if ((table != MONO_TABLE_TYPEDEF) && (table != MONO_TABLE_TYPEREF) && (table != MONO_TABLE_TYPESPEC)) { *error = ResolveTokenError_BadTable; return NULL; } if (image->dynamic) { if (type_args || method_args) mono_raise_exception (mono_get_exception_not_implemented (NULL)); klass = mono_lookup_dynamic_token_class (image, token, FALSE, NULL, NULL); if (!klass) return NULL; return &klass->byval_arg; } if ((index <= 0) || (index > image->tables [table].rows)) { *error = ResolveTokenError_OutOfRange; return NULL; } init_generic_context_from_args (&context, type_args, method_args); klass = mono_class_get_full (image, token, &context); if (mono_loader_get_last_error ()) mono_raise_exception (mono_loader_error_prepare_exception (mono_loader_get_last_error ())); if (klass) return &klass->byval_arg; else return NULL; } static MonoMethod* ves_icall_System_Reflection_Module_ResolveMethodToken (MonoImage *image, guint32 token, MonoArray *type_args, MonoArray *method_args, MonoResolveTokenError *error) { int table = mono_metadata_token_table (token); int index = mono_metadata_token_index (token); MonoGenericContext context; MonoMethod *method; *error = ResolveTokenError_Other; /* Validate token */ if ((table != MONO_TABLE_METHOD) && (table != MONO_TABLE_METHODSPEC) && (table != MONO_TABLE_MEMBERREF)) { *error = ResolveTokenError_BadTable; return NULL; } if (image->dynamic) { if (type_args || method_args) mono_raise_exception (mono_get_exception_not_implemented (NULL)); /* FIXME: validate memberref token type */ return mono_lookup_dynamic_token_class (image, token, FALSE, NULL, NULL); } if ((index <= 0) || (index > image->tables [table].rows)) { *error = ResolveTokenError_OutOfRange; return NULL; } if ((table == MONO_TABLE_MEMBERREF) && (!mono_metadata_memberref_is_method (image, token))) { *error = ResolveTokenError_BadTable; return NULL; } init_generic_context_from_args (&context, type_args, method_args); method = mono_get_method_full (image, token, NULL, &context); if (mono_loader_get_last_error ()) mono_raise_exception (mono_loader_error_prepare_exception (mono_loader_get_last_error ())); return method; } static MonoString* ves_icall_System_Reflection_Module_ResolveStringToken (MonoImage *image, guint32 token, MonoResolveTokenError *error) { int index = mono_metadata_token_index (token); *error = ResolveTokenError_Other; /* Validate token */ if (mono_metadata_token_code (token) != MONO_TOKEN_STRING) { *error = ResolveTokenError_BadTable; return NULL; } if (image->dynamic) return mono_lookup_dynamic_token_class (image, token, FALSE, NULL, NULL); if ((index <= 0) || (index >= image->heap_us.size)) { *error = ResolveTokenError_OutOfRange; return NULL; } /* FIXME: What to do if the index points into the middle of a string ? */ return mono_ldstr (mono_domain_get (), image, index); } static MonoClassField* ves_icall_System_Reflection_Module_ResolveFieldToken (MonoImage *image, guint32 token, MonoArray *type_args, MonoArray *method_args, MonoResolveTokenError *error) { MonoClass *klass; int table = mono_metadata_token_table (token); int index = mono_metadata_token_index (token); MonoGenericContext context; MonoClassField *field; *error = ResolveTokenError_Other; /* Validate token */ if ((table != MONO_TABLE_FIELD) && (table != MONO_TABLE_MEMBERREF)) { *error = ResolveTokenError_BadTable; return NULL; } if (image->dynamic) { if (type_args || method_args) mono_raise_exception (mono_get_exception_not_implemented (NULL)); /* FIXME: validate memberref token type */ return mono_lookup_dynamic_token_class (image, token, FALSE, NULL, NULL); } if ((index <= 0) || (index > image->tables [table].rows)) { *error = ResolveTokenError_OutOfRange; return NULL; } if ((table == MONO_TABLE_MEMBERREF) && (mono_metadata_memberref_is_method (image, token))) { *error = ResolveTokenError_BadTable; return NULL; } init_generic_context_from_args (&context, type_args, method_args); field = mono_field_from_token (image, token, &klass, &context); if (mono_loader_get_last_error ()) mono_raise_exception (mono_loader_error_prepare_exception (mono_loader_get_last_error ())); return field; } static MonoObject* ves_icall_System_Reflection_Module_ResolveMemberToken (MonoImage *image, guint32 token, MonoArray *type_args, MonoArray *method_args, MonoResolveTokenError *error) { int table = mono_metadata_token_table (token); *error = ResolveTokenError_Other; switch (table) { case MONO_TABLE_TYPEDEF: case MONO_TABLE_TYPEREF: case MONO_TABLE_TYPESPEC: { MonoType *t = ves_icall_System_Reflection_Module_ResolveTypeToken (image, token, type_args, method_args, error); if (t) return (MonoObject*)mono_type_get_object (mono_domain_get (), t); else return NULL; } case MONO_TABLE_METHOD: case MONO_TABLE_METHODSPEC: { MonoMethod *m = ves_icall_System_Reflection_Module_ResolveMethodToken (image, token, type_args, method_args, error); if (m) return (MonoObject*)mono_method_get_object (mono_domain_get (), m, m->klass); else return NULL; } case MONO_TABLE_FIELD: { MonoClassField *f = ves_icall_System_Reflection_Module_ResolveFieldToken (image, token, type_args, method_args, error); if (f) return (MonoObject*)mono_field_get_object (mono_domain_get (), f->parent, f); else return NULL; } case MONO_TABLE_MEMBERREF: if (mono_metadata_memberref_is_method (image, token)) { MonoMethod *m = ves_icall_System_Reflection_Module_ResolveMethodToken (image, token, type_args, method_args, error); if (m) return (MonoObject*)mono_method_get_object (mono_domain_get (), m, m->klass); else return NULL; } else { MonoClassField *f = ves_icall_System_Reflection_Module_ResolveFieldToken (image, token, type_args, method_args, error); if (f) return (MonoObject*)mono_field_get_object (mono_domain_get (), f->parent, f); else return NULL; } break; default: *error = ResolveTokenError_BadTable; } return NULL; } static MonoArray* ves_icall_System_Reflection_Module_ResolveSignature (MonoImage *image, guint32 token, MonoResolveTokenError *error) { int table = mono_metadata_token_table (token); int idx = mono_metadata_token_index (token); MonoTableInfo *tables = image->tables; guint32 sig, len; const char *ptr; MonoArray *res; *error = ResolveTokenError_OutOfRange; /* FIXME: Support other tables ? */ if (table != MONO_TABLE_STANDALONESIG) return NULL; if (image->dynamic) return NULL; if ((idx == 0) || (idx > tables [MONO_TABLE_STANDALONESIG].rows)) return NULL; sig = mono_metadata_decode_row_col (&tables [MONO_TABLE_STANDALONESIG], idx - 1, 0); ptr = mono_metadata_blob_heap (image, sig); len = mono_metadata_decode_blob_size (ptr, &ptr); res = mono_array_new (mono_domain_get (), mono_defaults.byte_class, len); memcpy (mono_array_addr (res, guint8, 0), ptr, len); return res; } static MonoReflectionType* ves_icall_ModuleBuilder_create_modified_type (MonoReflectionTypeBuilder *tb, MonoString *smodifiers) { MonoClass *klass; int isbyref = 0, rank; char *str = mono_string_to_utf8 (smodifiers); char *p; MONO_ARCH_SAVE_REGS; klass = mono_class_from_mono_type (tb->type.type); p = str; /* logic taken from mono_reflection_parse_type(): keep in sync */ while (*p) { switch (*p) { case '&': if (isbyref) { /* only one level allowed by the spec */ g_free (str); return NULL; } isbyref = 1; p++; g_free (str); return mono_type_get_object (mono_object_domain (tb), &klass->this_arg); break; case '*': klass = mono_ptr_class_get (&klass->byval_arg); mono_class_init (klass); p++; break; case '[': rank = 1; p++; while (*p) { if (*p == ']') break; if (*p == ',') rank++; else if (*p != '*') { /* '*' means unknown lower bound */ g_free (str); return NULL; } ++p; } if (*p != ']') { g_free (str); return NULL; } p++; klass = mono_array_class_get (klass, rank); mono_class_init (klass); break; default: break; } } g_free (str); return mono_type_get_object (mono_object_domain (tb), &klass->byval_arg); } static MonoBoolean ves_icall_Type_IsArrayImpl (MonoReflectionType *t) { MonoType *type; MonoBoolean res; MONO_ARCH_SAVE_REGS; type = t->type; res = !type->byref && (type->type == MONO_TYPE_ARRAY || type->type == MONO_TYPE_SZARRAY); return res; } static void check_for_invalid_type (MonoClass *klass) { char *name; MonoString *str; if (klass->byval_arg.type != MONO_TYPE_TYPEDBYREF) return; name = mono_type_get_full_name (klass); str = mono_string_new (mono_domain_get (), name); g_free (name); mono_raise_exception ((MonoException*)mono_get_exception_type_load (str, NULL)); } static MonoReflectionType * ves_icall_Type_make_array_type (MonoReflectionType *type, int rank) { MonoClass *klass, *aklass; MONO_ARCH_SAVE_REGS; klass = mono_class_from_mono_type (type->type); check_for_invalid_type (klass); if (rank == 0) //single dimentional array aklass = mono_array_class_get (klass, 1); else aklass = mono_bounded_array_class_get (klass, rank, TRUE); return mono_type_get_object (mono_object_domain (type), &aklass->byval_arg); } static MonoReflectionType * ves_icall_Type_make_byref_type (MonoReflectionType *type) { MonoClass *klass; MONO_ARCH_SAVE_REGS; klass = mono_class_from_mono_type (type->type); check_for_invalid_type (klass); return mono_type_get_object (mono_object_domain (type), &klass->this_arg); } static MonoReflectionType * ves_icall_Type_MakePointerType (MonoReflectionType *type) { MonoClass *klass, *pklass; MONO_ARCH_SAVE_REGS; klass = mono_class_from_mono_type (type->type); check_for_invalid_type (klass); pklass = mono_ptr_class_get (type->type); return mono_type_get_object (mono_object_domain (type), &pklass->byval_arg); } static MonoObject * ves_icall_System_Delegate_CreateDelegate_internal (MonoReflectionType *type, MonoObject *target, MonoReflectionMethod *info, MonoBoolean throwOnBindFailure) { MonoClass *delegate_class = mono_class_from_mono_type (type->type); MonoObject *delegate; gpointer func; MonoMethod *method = info->method; MONO_ARCH_SAVE_REGS; mono_assert (delegate_class->parent == mono_defaults.multicastdelegate_class); if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) { if (!mono_security_core_clr_ensure_delegate_creation (method, throwOnBindFailure)) return NULL; } delegate = mono_object_new (mono_object_domain (type), delegate_class); if (method->dynamic) { /* Creating a trampoline would leak memory */ func = mono_compile_method (method); } else { func = mono_create_ftnptr (mono_domain_get (), mono_runtime_create_jump_trampoline (mono_domain_get (), method, TRUE)); } mono_delegate_ctor_with_method (delegate, target, func, method); return delegate; } static void ves_icall_System_Delegate_SetMulticastInvoke (MonoDelegate *this) { /* Reset the invoke impl to the default one */ this->invoke_impl = mono_runtime_create_delegate_trampoline (this->object.vtable->klass); } /* * Magic number to convert a time which is relative to * Jan 1, 1970 into a value which is relative to Jan 1, 0001. */ #define EPOCH_ADJUST ((guint64)62135596800LL) /* * Magic number to convert FILETIME base Jan 1, 1601 to DateTime - base Jan, 1, 0001 */ #define FILETIME_ADJUST ((guint64)504911232000000000LL) #ifdef PLATFORM_WIN32 /* convert a SYSTEMTIME which is of the form "last thursday in october" to a real date */ static void convert_to_absolute_date(SYSTEMTIME *date) { #define IS_LEAP(y) ((y % 4) == 0 && ((y % 100) != 0 || (y % 400) == 0)) static int days_in_month[] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; static int leap_days_in_month[] = { 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; /* from the calendar FAQ */ int a = (14 - date->wMonth) / 12; int y = date->wYear - a; int m = date->wMonth + 12 * a - 2; int d = (1 + y + y/4 - y/100 + y/400 + (31*m)/12) % 7; /* d is now the day of the week for the first of the month (0 == Sunday) */ int day_of_week = date->wDayOfWeek; /* set day_in_month to the first day in the month which falls on day_of_week */ int day_in_month = 1 + (day_of_week - d); if (day_in_month <= 0) day_in_month += 7; /* wDay is 1 for first weekday in month, 2 for 2nd ... 5 means last - so work that out allowing for days in the month */ date->wDay = day_in_month + (date->wDay - 1) * 7; if (date->wDay > (IS_LEAP(date->wYear) ? leap_days_in_month[date->wMonth - 1] : days_in_month[date->wMonth - 1])) date->wDay -= 7; } #endif #ifndef PLATFORM_WIN32 /* * Return's the offset from GMT of a local time. * * tm is a local time * t is the same local time as seconds. */ static int gmt_offset(struct tm *tm, time_t t) { #if defined (HAVE_TM_GMTOFF) return tm->tm_gmtoff; #else struct tm g; time_t t2; g = *gmtime(&t); g.tm_isdst = tm->tm_isdst; t2 = mktime(&g); return (int)difftime(t, t2); #endif } #endif /* * This is heavily based on zdump.c from glibc 2.2. * * * data[0]: start of daylight saving time (in DateTime ticks). * * data[1]: end of daylight saving time (in DateTime ticks). * * data[2]: utcoffset (in TimeSpan ticks). * * data[3]: additional offset when daylight saving (in TimeSpan ticks). * * name[0]: name of this timezone when not daylight saving. * * name[1]: name of this timezone when daylight saving. * * FIXME: This only works with "standard" Unix dates (years between 1900 and 2100) while * the class library allows years between 1 and 9999. * * Returns true on success and zero on failure. */ static guint32 ves_icall_System_CurrentSystemTimeZone_GetTimeZoneData (guint32 year, MonoArray **data, MonoArray **names) { #ifndef PLATFORM_WIN32 MonoDomain *domain = mono_domain_get (); struct tm start, tt; time_t t; long int gmtoff; int is_daylight = 0, day; char tzone [64]; MONO_ARCH_SAVE_REGS; MONO_CHECK_ARG_NULL (data); MONO_CHECK_ARG_NULL (names); mono_gc_wbarrier_generic_store (data, (MonoObject*) mono_array_new (domain, mono_defaults.int64_class, 4)); mono_gc_wbarrier_generic_store (names, (MonoObject*) mono_array_new (domain, mono_defaults.string_class, 2)); /* * no info is better than crashing: we'll need our own tz data * to make this work properly, anyway. The range is probably * reduced to 1970 .. 2037 because that is what mktime is * guaranteed to support (we get into an infinite loop * otherwise). */ memset (&start, 0, sizeof (start)); start.tm_mday = 1; start.tm_year = year-1900; t = mktime (&start); if ((year < 1970) || (year > 2037) || (t == -1)) { t = time (NULL); tt = *localtime (&t); strftime (tzone, sizeof (tzone), "%Z", &tt); mono_array_setref ((*names), 0, mono_string_new (domain, tzone)); mono_array_setref ((*names), 1, mono_string_new (domain, tzone)); return 1; } gmtoff = gmt_offset (&start, t); /* For each day of the year, calculate the tm_gmtoff. */ for (day = 0; day < 365; day++) { t += 3600*24; tt = *localtime (&t); /* Daylight saving starts or ends here. */ if (gmt_offset (&tt, t) != gmtoff) { struct tm tt1; time_t t1; /* Try to find the exact hour when daylight saving starts/ends. */ t1 = t; do { t1 -= 3600; tt1 = *localtime (&t1); } while (gmt_offset (&tt1, t1) != gmtoff); /* Try to find the exact minute when daylight saving starts/ends. */ do { t1 += 60; tt1 = *localtime (&t1); } while (gmt_offset (&tt1, t1) == gmtoff); t1+=gmtoff; strftime (tzone, sizeof (tzone), "%Z", &tt); /* Write data, if we're already in daylight saving, we're done. */ if (is_daylight) { mono_array_setref ((*names), 0, mono_string_new (domain, tzone)); mono_array_set ((*data), gint64, 1, ((gint64)t1 + EPOCH_ADJUST) * 10000000L); return 1; } else { mono_array_setref ((*names), 1, mono_string_new (domain, tzone)); mono_array_set ((*data), gint64, 0, ((gint64)t1 + EPOCH_ADJUST) * 10000000L); is_daylight = 1; } /* This is only set once when we enter daylight saving. */ mono_array_set ((*data), gint64, 2, (gint64)gmtoff * 10000000L); mono_array_set ((*data), gint64, 3, (gint64)(gmt_offset (&tt, t) - gmtoff) * 10000000L); gmtoff = gmt_offset (&tt, t); } } if (!is_daylight) { strftime (tzone, sizeof (tzone), "%Z", &tt); mono_array_setref ((*names), 0, mono_string_new (domain, tzone)); mono_array_setref ((*names), 1, mono_string_new (domain, tzone)); mono_array_set ((*data), gint64, 0, 0); mono_array_set ((*data), gint64, 1, 0); mono_array_set ((*data), gint64, 2, (gint64) gmtoff * 10000000L); mono_array_set ((*data), gint64, 3, 0); } return 1; #else MonoDomain *domain = mono_domain_get (); TIME_ZONE_INFORMATION tz_info; FILETIME ft; int i; int err, tz_id; tz_id = GetTimeZoneInformation (&tz_info); if (tz_id == TIME_ZONE_ID_INVALID) return 0; MONO_CHECK_ARG_NULL (data); MONO_CHECK_ARG_NULL (names); mono_gc_wbarrier_generic_store (data, mono_array_new (domain, mono_defaults.int64_class, 4)); mono_gc_wbarrier_generic_store (names, mono_array_new (domain, mono_defaults.string_class, 2)); for (i = 0; i < 32; ++i) if (!tz_info.DaylightName [i]) break; mono_array_setref ((*names), 1, mono_string_new_utf16 (domain, tz_info.DaylightName, i)); for (i = 0; i < 32; ++i) if (!tz_info.StandardName [i]) break; mono_array_setref ((*names), 0, mono_string_new_utf16 (domain, tz_info.StandardName, i)); if ((year <= 1601) || (year > 30827)) { /* * According to MSDN, the MS time functions can't handle dates outside * this interval. */ return 1; } /* even if the timezone has no daylight savings it may have Bias (e.g. GMT+13 it seems) */ if (tz_id != TIME_ZONE_ID_UNKNOWN) { tz_info.StandardDate.wYear = year; convert_to_absolute_date(&tz_info.StandardDate); err = SystemTimeToFileTime (&tz_info.StandardDate, &ft); //g_assert(err); if (err == 0) return 0; mono_array_set ((*data), gint64, 1, FILETIME_ADJUST + (((guint64)ft.dwHighDateTime<<32) | ft.dwLowDateTime)); tz_info.DaylightDate.wYear = year; convert_to_absolute_date(&tz_info.DaylightDate); err = SystemTimeToFileTime (&tz_info.DaylightDate, &ft); //g_assert(err); if (err == 0) return 0; mono_array_set ((*data), gint64, 0, FILETIME_ADJUST + (((guint64)ft.dwHighDateTime<<32) | ft.dwLowDateTime)); } mono_array_set ((*data), gint64, 2, (tz_info.Bias + tz_info.StandardBias) * -600000000LL); mono_array_set ((*data), gint64, 3, (tz_info.DaylightBias - tz_info.StandardBias) * -600000000LL); return 1; #endif } static gpointer ves_icall_System_Object_obj_address (MonoObject *this) { MONO_ARCH_SAVE_REGS; return this; } /* System.Buffer */ static inline gint32 mono_array_get_byte_length (MonoArray *array) { MonoClass *klass; int length; int i; klass = array->obj.vtable->klass; if (array->bounds == NULL) length = array->max_length; else { length = 1; for (i = 0; i < klass->rank; ++ i) length *= array->bounds [i].length; } switch (klass->element_class->byval_arg.type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_BOOLEAN: return length; case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: return length << 1; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_R4: return length << 2; case MONO_TYPE_I: case MONO_TYPE_U: return length * sizeof (gpointer); case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R8: return length << 3; default: return -1; } } static gint32 ves_icall_System_Buffer_ByteLengthInternal (MonoArray *array) { MONO_ARCH_SAVE_REGS; return mono_array_get_byte_length (array); } static gint8 ves_icall_System_Buffer_GetByteInternal (MonoArray *array, gint32 idx) { MONO_ARCH_SAVE_REGS; return mono_array_get (array, gint8, idx); } static void ves_icall_System_Buffer_SetByteInternal (MonoArray *array, gint32 idx, gint8 value) { MONO_ARCH_SAVE_REGS; mono_array_set (array, gint8, idx, value); } static MonoBoolean ves_icall_System_Buffer_BlockCopyInternal (MonoArray *src, gint32 src_offset, MonoArray *dest, gint32 dest_offset, gint32 count) { guint8 *src_buf, *dest_buf; MONO_ARCH_SAVE_REGS; /* watch out for integer overflow */ if ((src_offset > mono_array_get_byte_length (src) - count) || (dest_offset > mono_array_get_byte_length (dest) - count)) return FALSE; src_buf = (guint8 *)src->vector + src_offset; dest_buf = (guint8 *)dest->vector + dest_offset; if (src != dest) memcpy (dest_buf, src_buf, count); else memmove (dest_buf, src_buf, count); /* Source and dest are the same array */ return TRUE; } static MonoObject * ves_icall_Remoting_RealProxy_GetTransparentProxy (MonoObject *this, MonoString *class_name) { MonoDomain *domain = mono_object_domain (this); MonoObject *res; MonoRealProxy *rp = ((MonoRealProxy *)this); MonoTransparentProxy *tp; MonoType *type; MonoClass *klass; MONO_ARCH_SAVE_REGS; res = mono_object_new (domain, mono_defaults.transparent_proxy_class); tp = (MonoTransparentProxy*) res; MONO_OBJECT_SETREF (tp, rp, rp); type = ((MonoReflectionType *)rp->class_to_proxy)->type; klass = mono_class_from_mono_type (type); tp->custom_type_info = (mono_object_isinst (this, mono_defaults.iremotingtypeinfo_class) != NULL); tp->remote_class = mono_remote_class (domain, class_name, klass); res->vtable = mono_remote_class_vtable (domain, tp->remote_class, rp); return res; } static MonoReflectionType * ves_icall_Remoting_RealProxy_InternalGetProxyType (MonoTransparentProxy *tp) { return mono_type_get_object (mono_object_domain (tp), &tp->remote_class->proxy_class->byval_arg); } /* System.Environment */ MonoString* ves_icall_System_Environment_get_UserName (void) { MONO_ARCH_SAVE_REGS; /* using glib is more portable */ return mono_string_new (mono_domain_get (), g_get_user_name ()); } static MonoString * ves_icall_System_Environment_get_MachineName (void) { #if defined (PLATFORM_WIN32) gunichar2 *buf; guint32 len; MonoString *result; len = MAX_COMPUTERNAME_LENGTH + 1; buf = g_new (gunichar2, len); result = NULL; if (GetComputerName (buf, (PDWORD) &len)) result = mono_string_new_utf16 (mono_domain_get (), buf, len); g_free (buf); return result; #elif !defined(DISABLE_SOCKETS) gchar buf [256]; MonoString *result; if (gethostname (buf, sizeof (buf)) == 0) result = mono_string_new (mono_domain_get (), buf); else result = NULL; return result; #else return mono_string_new (mono_domain_get (), "mono"); #endif } static int ves_icall_System_Environment_get_Platform (void) { #if defined (PLATFORM_WIN32) /* Win32NT */ return 2; #elif defined(__MACH__) /* OSX */ if (mono_framework_version () < 2) return 128; // // For compatibility with our client code, this will be 4 for a while. // We will eventually move to 6 to match .NET, but it requires all client // code to be updated and the documentation everywhere to be updated // first. // return 4; #else /* Unix */ if (mono_framework_version () < 2) return 128; return 4; #endif } static MonoString * ves_icall_System_Environment_get_NewLine (void) { MONO_ARCH_SAVE_REGS; #if defined (PLATFORM_WIN32) return mono_string_new (mono_domain_get (), "\r\n"); #else return mono_string_new (mono_domain_get (), "\n"); #endif } static MonoString * ves_icall_System_Environment_GetEnvironmentVariable (MonoString *name) { const gchar *value; gchar *utf8_name; MONO_ARCH_SAVE_REGS; if (name == NULL) return NULL; utf8_name = mono_string_to_utf8 (name); /* FIXME: this should be ascii */ value = g_getenv (utf8_name); g_free (utf8_name); if (value == 0) return NULL; return mono_string_new (mono_domain_get (), value); } /* * There is no standard way to get at environ. */ #ifndef _MSC_VER #ifndef __MINGW32_VERSION #ifdef __APPLE__ /* Apple defines this in crt_externs.h but doesn't provide that header for * arm-apple-darwin9. We'll manually define the symbol on Apple as it does * in fact exist on all implementations (so far) */ gchar ***_NSGetEnviron(void); #define environ (*_NSGetEnviron()) #else extern char **environ; #endif #endif #endif static MonoArray * ves_icall_System_Environment_GetEnvironmentVariableNames (void) { #ifdef PLATFORM_WIN32 MonoArray *names; MonoDomain *domain; MonoString *str; WCHAR* env_strings; WCHAR* env_string; WCHAR* equal_str; int n = 0; env_strings = GetEnvironmentStrings(); if (env_strings) { env_string = env_strings; while (*env_string != '\0') { /* weird case that MS seems to skip */ if (*env_string != '=') n++; while (*env_string != '\0') env_string++; env_string++; } } domain = mono_domain_get (); names = mono_array_new (domain, mono_defaults.string_class, n); if (env_strings) { n = 0; env_string = env_strings; while (*env_string != '\0') { /* weird case that MS seems to skip */ if (*env_string != '=') { equal_str = wcschr(env_string, '='); g_assert(equal_str); str = mono_string_new_utf16 (domain, env_string, equal_str-env_string); mono_array_setref (names, n, str); n++; } while (*env_string != '\0') env_string++; env_string++; } FreeEnvironmentStrings (env_strings); } return names; #else MonoArray *names; MonoDomain *domain; MonoString *str; gchar **e, **parts; int n; MONO_ARCH_SAVE_REGS; n = 0; for (e = environ; *e != 0; ++ e) ++ n; domain = mono_domain_get (); names = mono_array_new (domain, mono_defaults.string_class, n); n = 0; for (e = environ; *e != 0; ++ e) { parts = g_strsplit (*e, "=", 2); if (*parts != 0) { str = mono_string_new (domain, *parts); mono_array_setref (names, n, str); } g_strfreev (parts); ++ n; } return names; #endif } /* * If your platform lacks setenv/unsetenv, you must upgrade your glib. */ #if !GLIB_CHECK_VERSION(2,4,0) #define g_setenv(a,b,c) setenv(a,b,c) #define g_unsetenv(a) unsetenv(a) #endif static void ves_icall_System_Environment_InternalSetEnvironmentVariable (MonoString *name, MonoString *value) { MonoError error; #ifdef PLATFORM_WIN32 gunichar2 *utf16_name, *utf16_value; #else gchar *utf8_name, *utf8_value; #endif MONO_ARCH_SAVE_REGS; #ifdef PLATFORM_WIN32 utf16_name = mono_string_to_utf16 (name); if ((value == NULL) || (mono_string_length (value) == 0) || (mono_string_chars (value)[0] == 0)) { SetEnvironmentVariable (utf16_name, NULL); g_free (utf16_name); return; } utf16_value = mono_string_to_utf16 (value); SetEnvironmentVariable (utf16_name, utf16_value); g_free (utf16_name); g_free (utf16_value); #else utf8_name = mono_string_to_utf8 (name); /* FIXME: this should be ascii */ if ((value == NULL) || (mono_string_length (value) == 0) || (mono_string_chars (value)[0] == 0)) { g_unsetenv (utf8_name); g_free (utf8_name); return; } utf8_value = mono_string_to_utf8_checked (value, &error); if (!mono_error_ok (&error)) { g_free (utf8_name); mono_error_raise_exception (&error); } g_setenv (utf8_name, utf8_value, TRUE); g_free (utf8_name); g_free (utf8_value); #endif } static void ves_icall_System_Environment_Exit (int result) { MONO_ARCH_SAVE_REGS; mono_threads_set_shutting_down (); mono_runtime_set_shutting_down (); /* This will kill the tp threads which cannot be suspended */ mono_thread_pool_cleanup (); /* Suspend all managed threads since the runtime is going away */ mono_thread_suspend_all_other_threads (); mono_runtime_quit (); /* we may need to do some cleanup here... */ exit (result); } static MonoString* ves_icall_System_Environment_GetGacPath (void) { return mono_string_new (mono_domain_get (), mono_assembly_getrootdir ()); } static MonoString* ves_icall_System_Environment_GetWindowsFolderPath (int folder) { #if defined (PLATFORM_WIN32) #ifndef CSIDL_FLAG_CREATE #define CSIDL_FLAG_CREATE 0x8000 #endif WCHAR path [MAX_PATH]; /* Create directory if no existing */ if (SUCCEEDED (SHGetFolderPathW (NULL, folder | CSIDL_FLAG_CREATE, NULL, 0, path))) { int len = 0; while (path [len]) ++ len; return mono_string_new_utf16 (mono_domain_get (), path, len); } #else g_warning ("ves_icall_System_Environment_GetWindowsFolderPath should only be called on Windows!"); #endif return mono_string_new (mono_domain_get (), ""); } static MonoArray * ves_icall_System_Environment_GetLogicalDrives (void) { gunichar2 buf [256], *ptr, *dname; gunichar2 *u16; guint initial_size = 127, size = 128; gint ndrives; MonoArray *result; MonoString *drivestr; MonoDomain *domain = mono_domain_get (); gint len; MONO_ARCH_SAVE_REGS; buf [0] = '\0'; ptr = buf; while (size > initial_size) { size = (guint) GetLogicalDriveStrings (initial_size, ptr); if (size > initial_size) { if (ptr != buf) g_free (ptr); ptr = g_malloc0 ((size + 1) * sizeof (gunichar2)); initial_size = size; size++; } } /* Count strings */ dname = ptr; ndrives = 0; do { while (*dname++); ndrives++; } while (*dname); dname = ptr; result = mono_array_new (domain, mono_defaults.string_class, ndrives); ndrives = 0; do { len = 0; u16 = dname; while (*u16) { u16++; len ++; } drivestr = mono_string_new_utf16 (domain, dname, len); mono_array_setref (result, ndrives++, drivestr); while (*dname++); } while (*dname); if (ptr != buf) g_free (ptr); return result; } static MonoString * ves_icall_System_Environment_InternalGetHome (void) { MONO_ARCH_SAVE_REGS; return mono_string_new (mono_domain_get (), g_get_home_dir ()); } static const char *encodings [] = { (char *) 1, "ascii", "us_ascii", "us", "ansi_x3.4_1968", "ansi_x3.4_1986", "cp367", "csascii", "ibm367", "iso_ir_6", "iso646_us", "iso_646.irv:1991", (char *) 2, "utf_7", "csunicode11utf7", "unicode_1_1_utf_7", "unicode_2_0_utf_7", "x_unicode_1_1_utf_7", "x_unicode_2_0_utf_7", (char *) 3, "utf_8", "unicode_1_1_utf_8", "unicode_2_0_utf_8", "x_unicode_1_1_utf_8", "x_unicode_2_0_utf_8", (char *) 4, "utf_16", "UTF_16LE", "ucs_2", "unicode", "iso_10646_ucs2", (char *) 5, "unicodefffe", "utf_16be", (char *) 6, "iso_8859_1", (char *) 0 }; /* * Returns the internal codepage, if the value of "int_code_page" is * 1 at entry, and we can not compute a suitable code page number, * returns the code page as a string */ static MonoString* ves_icall_System_Text_Encoding_InternalCodePage (gint32 *int_code_page) { const char *cset; const char *p; char *c; char *codepage = NULL; int code; int want_name = *int_code_page; int i; *int_code_page = -1; MONO_ARCH_SAVE_REGS; g_get_charset (&cset); c = codepage = strdup (cset); for (c = codepage; *c; c++){ if (isascii (*c) && isalpha (*c)) *c = tolower (*c); if (*c == '-') *c = '_'; } /* g_print ("charset: %s\n", cset); */ /* handle some common aliases */ p = encodings [0]; code = 0; for (i = 0; p != 0; ){ if ((gssize) p < 7){ code = (gssize) p; p = encodings [++i]; continue; } if (strcmp (p, codepage) == 0){ *int_code_page = code; break; } p = encodings [++i]; } if (strstr (codepage, "utf_8") != NULL) *int_code_page |= 0x10000000; free (codepage); if (want_name && *int_code_page == -1) return mono_string_new (mono_domain_get (), cset); else return NULL; } static MonoBoolean ves_icall_System_Environment_get_HasShutdownStarted (void) { if (mono_runtime_is_shutting_down ()) return TRUE; if (mono_domain_is_unloading (mono_domain_get ())) return TRUE; return FALSE; } static void ves_icall_System_Environment_BroadcastSettingChange (void) { #ifdef PLATFORM_WIN32 SendMessageTimeout (HWND_BROADCAST, WM_SETTINGCHANGE, NULL, L"Environment", SMTO_ABORTIFHUNG, 2000, 0); #endif } static void ves_icall_MonoMethodMessage_InitMessage (MonoMethodMessage *this, MonoReflectionMethod *method, MonoArray *out_args) { MONO_ARCH_SAVE_REGS; mono_message_init (mono_object_domain (this), this, method, out_args); } static MonoBoolean ves_icall_IsTransparentProxy (MonoObject *proxy) { MONO_ARCH_SAVE_REGS; if (!proxy) return 0; if (proxy->vtable->klass == mono_defaults.transparent_proxy_class) return 1; return 0; } static MonoReflectionMethod * ves_icall_Remoting_RemotingServices_GetVirtualMethod ( MonoReflectionType *rtype, MonoReflectionMethod *rmethod) { MonoClass *klass; MonoMethod *method; MonoMethod **vtable; MonoMethod *res = NULL; MONO_CHECK_ARG_NULL (rtype); MONO_CHECK_ARG_NULL (rmethod); method = rmethod->method; klass = mono_class_from_mono_type (rtype->type); if (MONO_CLASS_IS_INTERFACE (klass)) return NULL; if (method->flags & METHOD_ATTRIBUTE_STATIC) return NULL; if ((method->flags & METHOD_ATTRIBUTE_FINAL) || !(method->flags & METHOD_ATTRIBUTE_VIRTUAL)) { if (klass == method->klass || mono_class_is_subclass_of (klass, method->klass, FALSE)) return rmethod; else return NULL; } mono_class_setup_vtable (klass); vtable = klass->vtable; if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) { int offs = mono_class_interface_offset (klass, method->klass); if (offs >= 0) res = vtable [offs + method->slot]; } else { if (!(klass == method->klass || mono_class_is_subclass_of (klass, method->klass, FALSE))) return NULL; if (method->slot != -1) res = vtable [method->slot]; } if (!res) return NULL; return mono_method_get_object (mono_domain_get (), res, NULL); } static void ves_icall_System_Runtime_Activation_ActivationServices_EnableProxyActivation (MonoReflectionType *type, MonoBoolean enable) { MonoClass *klass; MonoVTable* vtable; MONO_ARCH_SAVE_REGS; klass = mono_class_from_mono_type (type->type); vtable = mono_class_vtable_full (mono_domain_get (), klass, TRUE); if (enable) vtable->remote = 1; else vtable->remote = 0; } static MonoObject * ves_icall_System_Runtime_Activation_ActivationServices_AllocateUninitializedClassInstance (MonoReflectionType *type) { MonoClass *klass; MonoDomain *domain; MONO_ARCH_SAVE_REGS; domain = mono_object_domain (type); klass = mono_class_from_mono_type (type->type); if (klass->rank >= 1) { g_assert (klass->rank == 1); return (MonoObject *) mono_array_new (domain, klass->element_class, 0); } else { /* Bypass remoting object creation check */ return mono_object_new_alloc_specific (mono_class_vtable_full (domain, klass, TRUE)); } } static MonoString * ves_icall_System_IO_get_temp_path (void) { MONO_ARCH_SAVE_REGS; return mono_string_new (mono_domain_get (), g_get_tmp_dir ()); } #ifndef PLATFORM_NO_DRIVEINFO static MonoBoolean ves_icall_System_IO_DriveInfo_GetDiskFreeSpace (MonoString *path_name, guint64 *free_bytes_avail, guint64 *total_number_of_bytes, guint64 *total_number_of_free_bytes, gint32 *error) { gboolean result; ULARGE_INTEGER wapi_free_bytes_avail; ULARGE_INTEGER wapi_total_number_of_bytes; ULARGE_INTEGER wapi_total_number_of_free_bytes; MONO_ARCH_SAVE_REGS; *error = ERROR_SUCCESS; result = GetDiskFreeSpaceEx (mono_string_chars (path_name), &wapi_free_bytes_avail, &wapi_total_number_of_bytes, &wapi_total_number_of_free_bytes); if (result) { *free_bytes_avail = wapi_free_bytes_avail.QuadPart; *total_number_of_bytes = wapi_total_number_of_bytes.QuadPart; *total_number_of_free_bytes = wapi_total_number_of_free_bytes.QuadPart; } else { *free_bytes_avail = 0; *total_number_of_bytes = 0; *total_number_of_free_bytes = 0; *error = GetLastError (); } return result; } static guint32 ves_icall_System_IO_DriveInfo_GetDriveType (MonoString *root_path_name) { MONO_ARCH_SAVE_REGS; return GetDriveType (mono_string_chars (root_path_name)); } #endif static gpointer ves_icall_RuntimeMethod_GetFunctionPointer (MonoMethod *method) { MONO_ARCH_SAVE_REGS; return mono_compile_method (method); } static MonoString * ves_icall_System_Configuration_DefaultConfig_get_machine_config_path (void) { MonoString *mcpath; gchar *path; MONO_ARCH_SAVE_REGS; path = g_build_path (G_DIR_SEPARATOR_S, mono_get_config_dir (), "mono", mono_get_runtime_info ()->framework_version, "machine.config", NULL); #if defined (PLATFORM_WIN32) /* Avoid mixing '/' and '\\' */ { gint i; for (i = strlen (path) - 1; i >= 0; i--) if (path [i] == '/') path [i] = '\\'; } #endif mcpath = mono_string_new (mono_domain_get (), path); g_free (path); return mcpath; } static MonoString * get_bundled_machine_config (void) { const gchar *machine_config; MONO_ARCH_SAVE_REGS; machine_config = mono_get_machine_config (); if (!machine_config) return NULL; return mono_string_new (mono_domain_get (), machine_config); } static MonoString * ves_icall_System_Web_Util_ICalls_get_machine_install_dir (void) { MonoString *ipath; gchar *path; MONO_ARCH_SAVE_REGS; path = g_path_get_dirname (mono_get_config_dir ()); #if defined (PLATFORM_WIN32) /* Avoid mixing '/' and '\\' */ { gint i; for (i = strlen (path) - 1; i >= 0; i--) if (path [i] == '/') path [i] = '\\'; } #endif ipath = mono_string_new (mono_domain_get (), path); g_free (path); return ipath; } static gboolean ves_icall_get_resources_ptr (MonoReflectionAssembly *assembly, gpointer *result, gint32 *size) { MonoPEResourceDataEntry *entry; MonoImage *image; MONO_ARCH_SAVE_REGS; if (!assembly || !result || !size) return FALSE; *result = NULL; *size = 0; image = assembly->assembly->image; entry = mono_image_lookup_resource (image, MONO_PE_RESOURCE_ID_ASPNET_STRING, 0, NULL); if (!entry) return FALSE; *result = mono_image_rva_map (image, entry->rde_data_offset); if (!(*result)) { g_free (entry); return FALSE; } *size = entry->rde_size; g_free (entry); return TRUE; } static MonoBoolean ves_icall_System_Diagnostics_Debugger_IsAttached_internal (void) { return mono_debug_using_mono_debugger () || mono_is_debugger_attached (); } static void ves_icall_System_Diagnostics_DefaultTraceListener_WriteWindowsDebugString (MonoString *message) { #if defined (PLATFORM_WIN32) OutputDebugString (mono_string_chars (message)); #else g_warning ("WriteWindowsDebugString called and PLATFORM_WIN32 not defined!\n"); #endif } /* Only used for value types */ static MonoObject * ves_icall_System_Activator_CreateInstanceInternal (MonoReflectionType *type) { MonoClass *klass; MonoDomain *domain; MONO_ARCH_SAVE_REGS; domain = mono_object_domain (type); klass = mono_class_from_mono_type (type->type); if (mono_class_is_nullable (klass)) /* No arguments -> null */ return NULL; return mono_object_new (domain, klass); } static MonoReflectionMethod * ves_icall_MonoMethod_get_base_definition (MonoReflectionMethod *m) { MonoClass *klass, *parent; MonoMethod *method = m->method; MonoMethod *result = NULL; MONO_ARCH_SAVE_REGS; if (method->klass == NULL) return m; if (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_CLASS_IS_INTERFACE (method->klass) || method->flags & METHOD_ATTRIBUTE_NEW_SLOT) return m; klass = method->klass; if (klass->generic_class) klass = klass->generic_class->container_class; /* At the end of the loop, klass points to the eldest class that has this virtual function slot. */ for (parent = klass->parent; parent != NULL; parent = parent->parent) { mono_class_setup_vtable (parent); if (parent->vtable_size <= method->slot) break; klass = parent; } if (klass == method->klass) return m; result = klass->vtable [method->slot]; if (result == NULL) { /* It is an abstract method */ gpointer iter = NULL; while ((result = mono_class_get_methods (klass, &iter))) if (result->slot == method->slot) break; } if (result == NULL) return m; return mono_method_get_object (mono_domain_get (), result, NULL); } static MonoString* ves_icall_MonoMethod_get_name (MonoReflectionMethod *m) { MonoMethod *method = m->method; MONO_OBJECT_SETREF (m, name, mono_string_new (mono_object_domain (m), method->name)); return m->name; } static void mono_ArgIterator_Setup (MonoArgIterator *iter, char* argsp, char* start) { MONO_ARCH_SAVE_REGS; iter->sig = *(MonoMethodSignature**)argsp; g_assert (iter->sig->sentinelpos <= iter->sig->param_count); g_assert (iter->sig->call_convention == MONO_CALL_VARARG); iter->next_arg = 0; /* FIXME: it's not documented what start is exactly... */ if (start) { iter->args = start; } else { iter->args = argsp + sizeof (gpointer); #ifndef MONO_ARCH_REGPARMS { guint32 i, arg_size; gint32 align; for (i = 0; i < iter->sig->sentinelpos; ++i) { arg_size = mono_type_stack_size (iter->sig->params [i], &align); iter->args = (char*)iter->args + arg_size; } } #endif } iter->num_args = iter->sig->param_count - iter->sig->sentinelpos; /* g_print ("sig %p, param_count: %d, sent: %d\n", iter->sig, iter->sig->param_count, iter->sig->sentinelpos); */ } static MonoTypedRef mono_ArgIterator_IntGetNextArg (MonoArgIterator *iter) { guint32 i, arg_size; gint32 align; MonoTypedRef res; MONO_ARCH_SAVE_REGS; i = iter->sig->sentinelpos + iter->next_arg; g_assert (i < iter->sig->param_count); res.type = iter->sig->params [i]; res.klass = mono_class_from_mono_type (res.type); res.value = iter->args; arg_size = mono_type_stack_size (res.type, &align); #if G_BYTE_ORDER != G_LITTLE_ENDIAN if (arg_size <= sizeof (gpointer)) { int dummy; int padding = arg_size - mono_type_size (res.type, &dummy); res.value = (guint8*)res.value + padding; } #endif iter->args = (char*)iter->args + arg_size; iter->next_arg++; /* g_print ("returning arg %d, type 0x%02x of size %d at %p\n", i, res.type->type, arg_size, res.value); */ return res; } static MonoTypedRef mono_ArgIterator_IntGetNextArgT (MonoArgIterator *iter, MonoType *type) { guint32 i, arg_size; gint32 align; MonoTypedRef res; MONO_ARCH_SAVE_REGS; i = iter->sig->sentinelpos + iter->next_arg; g_assert (i < iter->sig->param_count); while (i < iter->sig->param_count) { if (!mono_metadata_type_equal (type, iter->sig->params [i])) continue; res.type = iter->sig->params [i]; res.klass = mono_class_from_mono_type (res.type); /* FIXME: endianess issue... */ res.value = iter->args; arg_size = mono_type_stack_size (res.type, &align); iter->args = (char*)iter->args + arg_size; iter->next_arg++; /* g_print ("returning arg %d, type 0x%02x of size %d at %p\n", i, res.type->type, arg_size, res.value); */ return res; } /* g_print ("arg type 0x%02x not found\n", res.type->type); */ res.type = NULL; res.value = NULL; res.klass = NULL; return res; } static MonoType* mono_ArgIterator_IntGetNextArgType (MonoArgIterator *iter) { gint i; MONO_ARCH_SAVE_REGS; i = iter->sig->sentinelpos + iter->next_arg; g_assert (i < iter->sig->param_count); return iter->sig->params [i]; } static MonoObject* mono_TypedReference_ToObject (MonoTypedRef tref) { MONO_ARCH_SAVE_REGS; if (MONO_TYPE_IS_REFERENCE (tref.type)) { MonoObject** objp = tref.value; return *objp; } return mono_value_box (mono_domain_get (), tref.klass, tref.value); } static MonoObject* mono_TypedReference_ToObjectInternal (MonoType *type, gpointer value, MonoClass *klass) { MONO_ARCH_SAVE_REGS; if (MONO_TYPE_IS_REFERENCE (type)) { MonoObject** objp = value; return *objp; } return mono_value_box (mono_domain_get (), klass, value); } static void prelink_method (MonoMethod *method) { const char *exc_class, *exc_arg; if (!(method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) return; mono_lookup_pinvoke_call (method, &exc_class, &exc_arg); if (exc_class) { mono_raise_exception( mono_exception_from_name_msg (mono_defaults.corlib, "System", exc_class, exc_arg ) ); } /* create the wrapper, too? */ } static void ves_icall_System_Runtime_InteropServices_Marshal_Prelink (MonoReflectionMethod *method) { MONO_ARCH_SAVE_REGS; prelink_method (method->method); } static void ves_icall_System_Runtime_InteropServices_Marshal_PrelinkAll (MonoReflectionType *type) { MonoClass *klass = mono_class_from_mono_type (type->type); MonoMethod* m; gpointer iter = NULL; MONO_ARCH_SAVE_REGS; while ((m = mono_class_get_methods (klass, &iter))) prelink_method (m); } /* These parameters are "readonly" in corlib/System/NumberFormatter.cs */ static void ves_icall_System_NumberFormatter_GetFormatterTables (guint64 const **mantissas, gint32 const **exponents, gunichar2 const **digitLowerTable, gunichar2 const **digitUpperTable, gint64 const **tenPowersList, gint32 const **decHexDigits) { *mantissas = Formatter_MantissaBitsTable; *exponents = Formatter_TensExponentTable; *digitLowerTable = Formatter_DigitLowerTable; *digitUpperTable = Formatter_DigitUpperTable; *tenPowersList = Formatter_TenPowersList; *decHexDigits = Formatter_DecHexDigits; } /* These parameters are "readonly" in corlib/System/Char.cs */ static void ves_icall_System_Char_GetDataTablePointers (guint8 const **category_data, guint8 const **numeric_data, gdouble const **numeric_data_values, guint16 const **to_lower_data_low, guint16 const **to_lower_data_high, guint16 const **to_upper_data_low, guint16 const **to_upper_data_high) { *category_data = CategoryData; *numeric_data = NumericData; *numeric_data_values = NumericDataValues; *to_lower_data_low = ToLowerDataLow; *to_lower_data_high = ToLowerDataHigh; *to_upper_data_low = ToUpperDataLow; *to_upper_data_high = ToUpperDataHigh; } static gint32 ves_icall_MonoDebugger_GetMethodToken (MonoReflectionMethod *method) { return method->method->token; } /* * We return NULL for no modifiers so the corlib code can return Type.EmptyTypes * and avoid useless allocations. */ static MonoArray* type_array_from_modifiers (MonoImage *image, MonoType *type, int optional) { MonoArray *res; int i, count = 0; for (i = 0; i < type->num_mods; ++i) { if ((optional && !type->modifiers [i].required) || (!optional && type->modifiers [i].required)) count++; } if (!count) return NULL; res = mono_array_new (mono_domain_get (), mono_defaults.systemtype_class, count); count = 0; for (i = 0; i < type->num_mods; ++i) { if ((optional && !type->modifiers [i].required) || (!optional && type->modifiers [i].required)) { MonoClass *klass = mono_class_get (image, type->modifiers [i].token); mono_array_setref (res, count, mono_type_get_object (mono_domain_get (), &klass->byval_arg)); count++; } } return res; } static MonoArray* param_info_get_type_modifiers (MonoReflectionParameter *param, MonoBoolean optional) { MonoType *type = param->ClassImpl->type; MonoClass *member_class = mono_object_class (param->MemberImpl); MonoMethod *method = NULL; MonoImage *image; int pos; MonoMethodSignature *sig; if (mono_class_is_reflection_method_or_constructor (member_class)) { MonoReflectionMethod *rmethod = (MonoReflectionMethod*)param->MemberImpl; method = rmethod->method; } else if (member_class->image == mono_defaults.corlib && !strcmp ("MonoProperty", member_class->name)) { MonoReflectionProperty *prop = (MonoReflectionProperty *)param->MemberImpl; if (!(method = prop->property->get)) method = prop->property->set; g_assert (method); } else { char *type_name = mono_type_get_full_name (member_class); char *msg = g_strdup_printf ("Custom modifiers on a ParamInfo with member %s are not supported", type_name); MonoException *ex = mono_get_exception_not_supported (msg); g_free (type_name); g_free (msg); mono_raise_exception (ex); } image = method->klass->image; pos = param->PositionImpl; sig = mono_method_signature (method); if (pos == -1) type = sig->ret; else type = sig->params [pos]; return type_array_from_modifiers (image, type, optional); } static MonoType* get_property_type (MonoProperty *prop) { MonoMethodSignature *sig; if (prop->get) { sig = mono_method_signature (prop->get); return sig->ret; } else if (prop->set) { sig = mono_method_signature (prop->set); return sig->params [sig->param_count - 1]; } return NULL; } static MonoArray* property_info_get_type_modifiers (MonoReflectionProperty *property, MonoBoolean optional) { MonoType *type = get_property_type (property->property); MonoImage *image = property->klass->image; if (!type) return NULL; return type_array_from_modifiers (image, type, optional); } static MonoBoolean custom_attrs_defined_internal (MonoObject *obj, MonoReflectionType *attr_type) { MonoCustomAttrInfo *cinfo; gboolean found; cinfo = mono_reflection_get_custom_attrs_info (obj); if (!cinfo) return FALSE; found = mono_custom_attrs_has_attr (cinfo, mono_class_from_mono_type (attr_type->type)); if (!cinfo->cached) mono_custom_attrs_free (cinfo); return found; } static MonoArray* custom_attrs_get_by_type (MonoObject *obj, MonoReflectionType *attr_type) { MonoArray *res = mono_reflection_get_custom_attrs_by_type (obj, attr_type ? mono_class_from_mono_type (attr_type->type) : NULL); if (mono_loader_get_last_error ()) { mono_raise_exception (mono_loader_error_prepare_exception (mono_loader_get_last_error ())); g_assert_not_reached (); /* Not reached */ return NULL; } else { return res; } } static MonoString* ves_icall_Mono_Runtime_GetDisplayName (void) { char *info; MonoString *display_name; info = mono_get_runtime_callbacks ()->get_runtime_build_info (); display_name = mono_string_new (mono_domain_get (), info); g_free (info); return display_name; } static MonoString* ves_icall_System_ComponentModel_Win32Exception_W32ErrorMessage (guint32 code) { MonoString *message; guint32 ret; gunichar2 buf[256]; ret = FormatMessage (FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, code, 0, buf, 255, NULL); if (ret == 0) { message = mono_string_new (mono_domain_get (), "Error looking up error string"); } else { message = mono_string_new_utf16 (mono_domain_get (), buf, ret); } return message; } const static guchar dbase64 [] = { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 62, 128, 128, 128, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 128, 128, 128, 0, 128, 128, 128, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 128, 128, 128, 128, 128, 128, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51 }; static MonoArray * base64_to_byte_array (gunichar2 *start, gint ilength, MonoBoolean allowWhitespaceOnly) { gint ignored; gint i; gunichar2 c; gunichar2 last, prev_last, prev2_last; gint olength; MonoArray *result; guchar *res_ptr; gint a [4], b [4]; MonoException *exc; ignored = 0; last = prev_last = 0, prev2_last = 0; for (i = 0; i < ilength; i++) { c = start [i]; if (c >= sizeof (dbase64)) { exc = mono_exception_from_name_msg (mono_get_corlib (), "System", "FormatException", "Invalid character found."); mono_raise_exception (exc); } else if (isspace (c)) { ignored++; } else { prev2_last = prev_last; prev_last = last; last = c; } } olength = ilength - ignored; if (allowWhitespaceOnly && olength == 0) { return mono_array_new (mono_domain_get (), mono_defaults.byte_class, 0); } if ((olength & 3) != 0 || olength <= 0) { exc = mono_exception_from_name_msg (mono_get_corlib (), "System", "FormatException", "Invalid length."); mono_raise_exception (exc); } if (prev2_last == '=') { exc = mono_exception_from_name_msg (mono_get_corlib (), "System", "FormatException", "Invalid format."); mono_raise_exception (exc); } olength = (olength * 3) / 4; if (last == '=') olength--; if (prev_last == '=') olength--; result = mono_array_new (mono_domain_get (), mono_defaults.byte_class, olength); res_ptr = mono_array_addr (result, guchar, 0); for (i = 0; i < ilength; ) { int k; for (k = 0; k < 4 && i < ilength;) { c = start [i++]; if (isspace (c)) continue; a [k] = (guchar) c; if (((b [k] = dbase64 [c]) & 0x80) != 0) { exc = mono_exception_from_name_msg (mono_get_corlib (), "System", "FormatException", "Invalid character found."); mono_raise_exception (exc); } k++; } *res_ptr++ = (b [0] << 2) | (b [1] >> 4); if (a [2] != '=') *res_ptr++ = (b [1] << 4) | (b [2] >> 2); if (a [3] != '=') *res_ptr++ = (b [2] << 6) | b [3]; while (i < ilength && isspace (start [i])) i++; } return result; } static MonoArray * InternalFromBase64String (MonoString *str, MonoBoolean allowWhitespaceOnly) { MONO_ARCH_SAVE_REGS; return base64_to_byte_array (mono_string_chars (str), mono_string_length (str), allowWhitespaceOnly); } static MonoArray * InternalFromBase64CharArray (MonoArray *input, gint offset, gint length) { MONO_ARCH_SAVE_REGS; return base64_to_byte_array (mono_array_addr (input, gunichar2, offset), length, FALSE); } #define ICALL_TYPE(id,name,first) #define ICALL(id,name,func) Icall_ ## id, enum { #include "metadata/icall-def.h" Icall_last }; #undef ICALL_TYPE #undef ICALL #define ICALL_TYPE(id,name,first) Icall_type_ ## id, #define ICALL(id,name,func) enum { #include "metadata/icall-def.h" Icall_type_num }; #undef ICALL_TYPE #undef ICALL #define ICALL_TYPE(id,name,firstic) {(Icall_ ## firstic)}, #define ICALL(id,name,func) typedef struct { guint16 first_icall; } IcallTypeDesc; static const IcallTypeDesc icall_type_descs [] = { #include "metadata/icall-def.h" {Icall_last} }; #define icall_desc_num_icalls(desc) ((desc) [1].first_icall - (desc) [0].first_icall) #undef ICALL_TYPE #define ICALL_TYPE(id,name,first) #undef ICALL #ifdef HAVE_ARRAY_ELEM_INIT #define MSGSTRFIELD(line) MSGSTRFIELD1(line) #define MSGSTRFIELD1(line) str##line static const struct msgstrtn_t { #define ICALL(id,name,func) #undef ICALL_TYPE #define ICALL_TYPE(id,name,first) char MSGSTRFIELD(__LINE__) [sizeof (name)]; #include "metadata/icall-def.h" #undef ICALL_TYPE } icall_type_names_str = { #define ICALL_TYPE(id,name,first) (name), #include "metadata/icall-def.h" #undef ICALL_TYPE }; static const guint16 icall_type_names_idx [] = { #define ICALL_TYPE(id,name,first) [Icall_type_ ## id] = offsetof (struct msgstrtn_t, MSGSTRFIELD(__LINE__)), #include "metadata/icall-def.h" #undef ICALL_TYPE }; #define icall_type_name_get(id) ((const char*)&icall_type_names_str + icall_type_names_idx [(id)]) static const struct msgstr_t { #undef ICALL #define ICALL_TYPE(id,name,first) #define ICALL(id,name,func) char MSGSTRFIELD(__LINE__) [sizeof (name)]; #include "metadata/icall-def.h" #undef ICALL } icall_names_str = { #define ICALL(id,name,func) (name), #include "metadata/icall-def.h" #undef ICALL }; static const guint16 icall_names_idx [] = { #define ICALL(id,name,func) [Icall_ ## id] = offsetof (struct msgstr_t, MSGSTRFIELD(__LINE__)), #include "metadata/icall-def.h" #undef ICALL }; #define icall_name_get(id) ((const char*)&icall_names_str + icall_names_idx [(id)]) #else #undef ICALL_TYPE #undef ICALL #define ICALL_TYPE(id,name,first) name, #define ICALL(id,name,func) static const char* const icall_type_names [] = { #include "metadata/icall-def.h" NULL }; #define icall_type_name_get(id) (icall_type_names [(id)]) #undef ICALL_TYPE #undef ICALL #define ICALL_TYPE(id,name,first) #define ICALL(id,name,func) name, static const char* const icall_names [] = { #include "metadata/icall-def.h" NULL }; #define icall_name_get(id) icall_names [(id)] #endif /* !HAVE_ARRAY_ELEM_INIT */ #undef ICALL_TYPE #undef ICALL #define ICALL_TYPE(id,name,first) #define ICALL(id,name,func) func, static const gconstpointer icall_functions [] = { #include "metadata/icall-def.h" NULL }; static GHashTable *icall_hash = NULL; static GHashTable *jit_icall_hash_name = NULL; static GHashTable *jit_icall_hash_addr = NULL; void mono_icall_init (void) { int i = 0; /* check that tables are sorted: disable in release */ if (TRUE) { int j; const char *prev_class = NULL; const char *prev_method; for (i = 0; i < Icall_type_num; ++i) { const IcallTypeDesc *desc; int num_icalls; prev_method = NULL; if (prev_class && strcmp (prev_class, icall_type_name_get (i)) >= 0) g_print ("class %s should come before class %s\n", icall_type_name_get (i), prev_class); prev_class = icall_type_name_get (i); desc = &icall_type_descs [i]; num_icalls = icall_desc_num_icalls (desc); /*g_print ("class %s has %d icalls starting at %d\n", prev_class, num_icalls, desc->first_icall);*/ for (j = 0; j < num_icalls; ++j) { const char *methodn = icall_name_get (desc->first_icall + j); if (prev_method && strcmp (prev_method, methodn) >= 0) g_print ("method %s should come before method %s\n", methodn, prev_method); prev_method = methodn; } } } icall_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); } void mono_icall_cleanup (void) { g_hash_table_destroy (icall_hash); g_hash_table_destroy (jit_icall_hash_name); g_hash_table_destroy (jit_icall_hash_addr); } void mono_add_internal_call (const char *name, gconstpointer method) { mono_loader_lock (); g_hash_table_insert (icall_hash, g_strdup (name), (gpointer) method); mono_loader_unlock (); } #ifdef HAVE_ARRAY_ELEM_INIT static int compare_method_imap (const void *key, const void *elem) { const char* method_name = (const char*)&icall_names_str + (*(guint16*)elem); return strcmp (key, method_name); } static gpointer find_method_icall (const IcallTypeDesc *imap, const char *name) { const guint16 *nameslot = bsearch (name, icall_names_idx + imap->first_icall, icall_desc_num_icalls (imap), sizeof (icall_names_idx [0]), compare_method_imap); if (!nameslot) return NULL; return (gpointer)icall_functions [(nameslot - &icall_names_idx [0])]; } static int compare_class_imap (const void *key, const void *elem) { const char* class_name = (const char*)&icall_type_names_str + (*(guint16*)elem); return strcmp (key, class_name); } static const IcallTypeDesc* find_class_icalls (const char *name) { const guint16 *nameslot = bsearch (name, icall_type_names_idx, Icall_type_num, sizeof (icall_type_names_idx [0]), compare_class_imap); if (!nameslot) return NULL; return &icall_type_descs [nameslot - &icall_type_names_idx [0]]; } #else static int compare_method_imap (const void *key, const void *elem) { const char** method_name = (const char**)elem; return strcmp (key, *method_name); } static gpointer find_method_icall (const IcallTypeDesc *imap, const char *name) { const char **nameslot = bsearch (name, icall_names + imap->first_icall, icall_desc_num_icalls (imap), sizeof (icall_names [0]), compare_method_imap); if (!nameslot) return NULL; return (gpointer)icall_functions [(nameslot - icall_names)]; } static int compare_class_imap (const void *key, const void *elem) { const char** class_name = (const char**)elem; return strcmp (key, *class_name); } static const IcallTypeDesc* find_class_icalls (const char *name) { const char **nameslot = bsearch (name, icall_type_names, Icall_type_num, sizeof (icall_type_names [0]), compare_class_imap); if (!nameslot) return NULL; return &icall_type_descs [nameslot - icall_type_names]; } #endif /* * we should probably export this as an helper (handle nested types). * Returns the number of chars written in buf. */ static int concat_class_name (char *buf, int bufsize, MonoClass *klass) { int nspacelen, cnamelen; nspacelen = strlen (klass->name_space); cnamelen = strlen (klass->name); if (nspacelen + cnamelen + 2 > bufsize) return 0; if (nspacelen) { memcpy (buf, klass->name_space, nspacelen); buf [nspacelen ++] = '.'; } memcpy (buf + nspacelen, klass->name, cnamelen); buf [nspacelen + cnamelen] = 0; return nspacelen + cnamelen; } gpointer mono_lookup_internal_call (MonoMethod *method) { char *sigstart; char *tmpsig; char mname [2048]; int typelen = 0, mlen, siglen; gpointer res; const IcallTypeDesc *imap; g_assert (method != NULL); if (method->is_inflated) method = ((MonoMethodInflated *) method)->declaring; if (method->klass->nested_in) { int pos = concat_class_name (mname, sizeof (mname)-2, method->klass->nested_in); if (!pos) return NULL; mname [pos++] = '/'; mname [pos] = 0; typelen = concat_class_name (mname+pos, sizeof (mname)-pos-1, method->klass); if (!typelen) return NULL; typelen += pos; } else { typelen = concat_class_name (mname, sizeof (mname), method->klass); if (!typelen) return NULL; } imap = find_class_icalls (mname); mname [typelen] = ':'; mname [typelen + 1] = ':'; mlen = strlen (method->name); memcpy (mname + typelen + 2, method->name, mlen); sigstart = mname + typelen + 2 + mlen; *sigstart = 0; tmpsig = mono_signature_get_desc (mono_method_signature (method), TRUE); siglen = strlen (tmpsig); if (typelen + mlen + siglen + 6 > sizeof (mname)) return NULL; sigstart [0] = '('; memcpy (sigstart + 1, tmpsig, siglen); sigstart [siglen + 1] = ')'; sigstart [siglen + 2] = 0; g_free (tmpsig); mono_loader_lock (); res = g_hash_table_lookup (icall_hash, mname); if (res) { mono_loader_unlock (); return res; } /* try without signature */ *sigstart = 0; res = g_hash_table_lookup (icall_hash, mname); if (res) { mono_loader_unlock (); return res; } /* it wasn't found in the static call tables */ if (!imap) { mono_loader_unlock (); return NULL; } res = find_method_icall (imap, sigstart - mlen); if (res) { mono_loader_unlock (); return res; } /* try _with_ signature */ *sigstart = '('; res = find_method_icall (imap, sigstart - mlen); if (res) { mono_loader_unlock (); return res; } g_warning ("cant resolve internal call to \"%s\" (tested without signature also)", mname); g_print ("\nYour mono runtime and class libraries are out of sync.\n"); g_print ("The out of sync library is: %s\n", method->klass->image->name); g_print ("\nWhen you update one from svn you need to update, compile and install\nthe other too.\n"); g_print ("Do not report this as a bug unless you're sure you have updated correctly:\nyou probably have a broken mono install.\n"); g_print ("If you see other errors or faults after this message they are probably related\n"); g_print ("and you need to fix your mono install first.\n"); mono_loader_unlock (); return NULL; } static MonoType* type_from_typename (char *typename) { MonoClass *klass = NULL; /* assignment to shut GCC warning up */ if (!strcmp (typename, "int")) klass = mono_defaults.int_class; else if (!strcmp (typename, "ptr")) klass = mono_defaults.int_class; else if (!strcmp (typename, "void")) klass = mono_defaults.void_class; else if (!strcmp (typename, "int32")) klass = mono_defaults.int32_class; else if (!strcmp (typename, "uint32")) klass = mono_defaults.uint32_class; else if (!strcmp (typename, "int8")) klass = mono_defaults.sbyte_class; else if (!strcmp (typename, "uint8")) klass = mono_defaults.byte_class; else if (!strcmp (typename, "int16")) klass = mono_defaults.int16_class; else if (!strcmp (typename, "uint16")) klass = mono_defaults.uint16_class; else if (!strcmp (typename, "long")) klass = mono_defaults.int64_class; else if (!strcmp (typename, "ulong")) klass = mono_defaults.uint64_class; else if (!strcmp (typename, "float")) klass = mono_defaults.single_class; else if (!strcmp (typename, "double")) klass = mono_defaults.double_class; else if (!strcmp (typename, "object")) klass = mono_defaults.object_class; else if (!strcmp (typename, "obj")) klass = mono_defaults.object_class; else if (!strcmp (typename, "string")) klass = mono_defaults.string_class; else if (!strcmp (typename, "bool")) klass = mono_defaults.boolean_class; else if (!strcmp (typename, "boolean")) klass = mono_defaults.boolean_class; else { g_error ("%s", typename); g_assert_not_reached (); } return &klass->byval_arg; } MonoMethodSignature* mono_create_icall_signature (const char *sigstr) { gchar **parts; int i, len; gchar **tmp; MonoMethodSignature *res; mono_loader_lock (); res = g_hash_table_lookup (mono_defaults.corlib->helper_signatures, sigstr); if (res) { mono_loader_unlock (); return res; } parts = g_strsplit (sigstr, " ", 256); tmp = parts; len = 0; while (*tmp) { len ++; tmp ++; } res = mono_metadata_signature_alloc (mono_defaults.corlib, len - 1); res->pinvoke = 1; #ifdef PLATFORM_WIN32 /* * Under windows, the default pinvoke calling convention is STDCALL but * we need CDECL. */ res->call_convention = MONO_CALL_C; #endif res->ret = type_from_typename (parts [0]); for (i = 1; i < len; ++i) { res->params [i - 1] = type_from_typename (parts [i]); } g_strfreev (parts); g_hash_table_insert (mono_defaults.corlib->helper_signatures, (gpointer)sigstr, res); mono_loader_unlock (); return res; } MonoJitICallInfo * mono_find_jit_icall_by_name (const char *name) { MonoJitICallInfo *info; g_assert (jit_icall_hash_name); mono_loader_lock (); info = g_hash_table_lookup (jit_icall_hash_name, name); mono_loader_unlock (); return info; } MonoJitICallInfo * mono_find_jit_icall_by_addr (gconstpointer addr) { MonoJitICallInfo *info; g_assert (jit_icall_hash_addr); mono_loader_lock (); info = g_hash_table_lookup (jit_icall_hash_addr, (gpointer)addr); mono_loader_unlock (); return info; } /* * mono_get_jit_icall_info: * * Return the hashtable mapping JIT icall names to MonoJitICallInfo structures. The * caller should access it while holding the loader lock. */ GHashTable* mono_get_jit_icall_info (void) { return jit_icall_hash_name; } void mono_register_jit_icall_wrapper (MonoJitICallInfo *info, gconstpointer wrapper) { mono_loader_lock (); g_hash_table_insert (jit_icall_hash_addr, (gpointer)wrapper, info); mono_loader_unlock (); } MonoJitICallInfo * mono_register_jit_icall (gconstpointer func, const char *name, MonoMethodSignature *sig, gboolean is_save) { MonoJitICallInfo *info; g_assert (func); g_assert (name); mono_loader_lock (); if (!jit_icall_hash_name) { jit_icall_hash_name = g_hash_table_new_full (g_str_hash, g_str_equal, NULL, g_free); jit_icall_hash_addr = g_hash_table_new (NULL, NULL); } if (g_hash_table_lookup (jit_icall_hash_name, name)) { g_warning ("jit icall already defined \"%s\"\n", name); g_assert_not_reached (); } info = g_new0 (MonoJitICallInfo, 1); info->name = name; info->func = func; info->sig = sig; if (is_save) { info->wrapper = func; } else { info->wrapper = NULL; } g_hash_table_insert (jit_icall_hash_name, (gpointer)info->name, info); g_hash_table_insert (jit_icall_hash_addr, (gpointer)func, info); mono_loader_unlock (); return info; }
./CrossVul/dataset_final_sorted/CWE-362/c/good_3430_0
crossvul-cpp_data_good_5188_0
/* * cros_ec_dev - expose the Chrome OS Embedded Controller to user-space * * Copyright (C) 2014 Google, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/fs.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/uaccess.h> #include "cros_ec_dev.h" /* Device variables */ #define CROS_MAX_DEV 128 static int ec_major; static const struct attribute_group *cros_ec_groups[] = { &cros_ec_attr_group, &cros_ec_lightbar_attr_group, &cros_ec_vbc_attr_group, NULL, }; static struct class cros_class = { .owner = THIS_MODULE, .name = "chromeos", .dev_groups = cros_ec_groups, }; /* Basic communication */ static int ec_get_version(struct cros_ec_dev *ec, char *str, int maxlen) { struct ec_response_get_version *resp; static const char * const current_image_name[] = { "unknown", "read-only", "read-write", "invalid", }; struct cros_ec_command *msg; int ret; msg = kmalloc(sizeof(*msg) + sizeof(*resp), GFP_KERNEL); if (!msg) return -ENOMEM; msg->version = 0; msg->command = EC_CMD_GET_VERSION + ec->cmd_offset; msg->insize = sizeof(*resp); msg->outsize = 0; ret = cros_ec_cmd_xfer(ec->ec_dev, msg); if (ret < 0) goto exit; if (msg->result != EC_RES_SUCCESS) { snprintf(str, maxlen, "%s\nUnknown EC version: EC returned %d\n", CROS_EC_DEV_VERSION, msg->result); ret = -EINVAL; goto exit; } resp = (struct ec_response_get_version *)msg->data; if (resp->current_image >= ARRAY_SIZE(current_image_name)) resp->current_image = 3; /* invalid */ snprintf(str, maxlen, "%s\n%s\n%s\n%s\n", CROS_EC_DEV_VERSION, resp->version_string_ro, resp->version_string_rw, current_image_name[resp->current_image]); ret = 0; exit: kfree(msg); return ret; } /* Device file ops */ static int ec_device_open(struct inode *inode, struct file *filp) { struct cros_ec_dev *ec = container_of(inode->i_cdev, struct cros_ec_dev, cdev); filp->private_data = ec; nonseekable_open(inode, filp); return 0; } static int ec_device_release(struct inode *inode, struct file *filp) { return 0; } static ssize_t ec_device_read(struct file *filp, char __user *buffer, size_t length, loff_t *offset) { struct cros_ec_dev *ec = filp->private_data; char msg[sizeof(struct ec_response_get_version) + sizeof(CROS_EC_DEV_VERSION)]; size_t count; int ret; if (*offset != 0) return 0; ret = ec_get_version(ec, msg, sizeof(msg)); if (ret) return ret; count = min(length, strlen(msg)); if (copy_to_user(buffer, msg, count)) return -EFAULT; *offset = count; return count; } /* Ioctls */ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg) { long ret; struct cros_ec_command u_cmd; struct cros_ec_command *s_cmd; if (copy_from_user(&u_cmd, arg, sizeof(u_cmd))) return -EFAULT; if ((u_cmd.outsize > EC_MAX_MSG_BYTES) || (u_cmd.insize > EC_MAX_MSG_BYTES)) return -EINVAL; s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize), GFP_KERNEL); if (!s_cmd) return -ENOMEM; if (copy_from_user(s_cmd, arg, sizeof(*s_cmd) + u_cmd.outsize)) { ret = -EFAULT; goto exit; } if (u_cmd.outsize != s_cmd->outsize || u_cmd.insize != s_cmd->insize) { ret = -EINVAL; goto exit; } s_cmd->command += ec->cmd_offset; ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd); /* Only copy data to userland if data was received. */ if (ret < 0) goto exit; if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize)) ret = -EFAULT; exit: kfree(s_cmd); return ret; } static long ec_device_ioctl_readmem(struct cros_ec_dev *ec, void __user *arg) { struct cros_ec_device *ec_dev = ec->ec_dev; struct cros_ec_readmem s_mem = { }; long num; /* Not every platform supports direct reads */ if (!ec_dev->cmd_readmem) return -ENOTTY; if (copy_from_user(&s_mem, arg, sizeof(s_mem))) return -EFAULT; num = ec_dev->cmd_readmem(ec_dev, s_mem.offset, s_mem.bytes, s_mem.buffer); if (num <= 0) return num; if (copy_to_user((void __user *)arg, &s_mem, sizeof(s_mem))) return -EFAULT; return 0; } static long ec_device_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct cros_ec_dev *ec = filp->private_data; if (_IOC_TYPE(cmd) != CROS_EC_DEV_IOC) return -ENOTTY; switch (cmd) { case CROS_EC_DEV_IOCXCMD: return ec_device_ioctl_xcmd(ec, (void __user *)arg); case CROS_EC_DEV_IOCRDMEM: return ec_device_ioctl_readmem(ec, (void __user *)arg); } return -ENOTTY; } /* Module initialization */ static const struct file_operations fops = { .open = ec_device_open, .release = ec_device_release, .read = ec_device_read, .unlocked_ioctl = ec_device_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ec_device_ioctl, #endif }; static void __remove(struct device *dev) { struct cros_ec_dev *ec = container_of(dev, struct cros_ec_dev, class_dev); kfree(ec); } static int ec_device_probe(struct platform_device *pdev) { int retval = -ENOMEM; struct device *dev = &pdev->dev; struct cros_ec_platform *ec_platform = dev_get_platdata(dev); dev_t devno = MKDEV(ec_major, pdev->id); struct cros_ec_dev *ec = kzalloc(sizeof(*ec), GFP_KERNEL); if (!ec) return retval; dev_set_drvdata(dev, ec); ec->ec_dev = dev_get_drvdata(dev->parent); ec->dev = dev; ec->cmd_offset = ec_platform->cmd_offset; device_initialize(&ec->class_dev); cdev_init(&ec->cdev, &fops); /* * Add the character device * Link cdev to the class device to be sure device is not used * before unbinding it. */ ec->cdev.kobj.parent = &ec->class_dev.kobj; retval = cdev_add(&ec->cdev, devno, 1); if (retval) { dev_err(dev, ": failed to add character device\n"); goto cdev_add_failed; } /* * Add the class device * Link to the character device for creating the /dev entry * in devtmpfs. */ ec->class_dev.devt = ec->cdev.dev; ec->class_dev.class = &cros_class; ec->class_dev.parent = dev; ec->class_dev.release = __remove; retval = dev_set_name(&ec->class_dev, "%s", ec_platform->ec_name); if (retval) { dev_err(dev, "dev_set_name failed => %d\n", retval); goto set_named_failed; } retval = device_add(&ec->class_dev); if (retval) { dev_err(dev, "device_register failed => %d\n", retval); goto dev_reg_failed; } return 0; dev_reg_failed: set_named_failed: dev_set_drvdata(dev, NULL); cdev_del(&ec->cdev); cdev_add_failed: kfree(ec); return retval; } static int ec_device_remove(struct platform_device *pdev) { struct cros_ec_dev *ec = dev_get_drvdata(&pdev->dev); cdev_del(&ec->cdev); device_unregister(&ec->class_dev); return 0; } static const struct platform_device_id cros_ec_id[] = { { "cros-ec-ctl", 0 }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(platform, cros_ec_id); static struct platform_driver cros_ec_dev_driver = { .driver = { .name = "cros-ec-ctl", }, .probe = ec_device_probe, .remove = ec_device_remove, }; static int __init cros_ec_dev_init(void) { int ret; dev_t dev = 0; ret = class_register(&cros_class); if (ret) { pr_err(CROS_EC_DEV_NAME ": failed to register device class\n"); return ret; } /* Get a range of minor numbers (starting with 0) to work with */ ret = alloc_chrdev_region(&dev, 0, CROS_MAX_DEV, CROS_EC_DEV_NAME); if (ret < 0) { pr_err(CROS_EC_DEV_NAME ": alloc_chrdev_region() failed\n"); goto failed_chrdevreg; } ec_major = MAJOR(dev); /* Register the driver */ ret = platform_driver_register(&cros_ec_dev_driver); if (ret < 0) { pr_warn(CROS_EC_DEV_NAME ": can't register driver: %d\n", ret); goto failed_devreg; } return 0; failed_devreg: unregister_chrdev_region(MKDEV(ec_major, 0), CROS_MAX_DEV); failed_chrdevreg: class_unregister(&cros_class); return ret; } static void __exit cros_ec_dev_exit(void) { platform_driver_unregister(&cros_ec_dev_driver); unregister_chrdev(ec_major, CROS_EC_DEV_NAME); class_unregister(&cros_class); } module_init(cros_ec_dev_init); module_exit(cros_ec_dev_exit); MODULE_AUTHOR("Bill Richardson <wfrichar@chromium.org>"); MODULE_DESCRIPTION("Userspace interface to the Chrome OS Embedded Controller"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-362/c/good_5188_0
crossvul-cpp_data_bad_5339_0
400: Invalid request
./CrossVul/dataset_final_sorted/CWE-362/c/bad_5339_0
crossvul-cpp_data_good_1664_4
/* BEGIN_ICS_COPYRIGHT5 **************************************** Copyright (c) 2015, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ** END_ICS_COPYRIGHT5 ****************************************/ #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/un.h> #include <sys/stat.h> #include <signal.h> #include <unistd.h> #include "hsm_com_client_api.h" #include "hsm_config_client_api.h" #include "hsm_config_client_data.h" #include "iba/public/ibyteswap.h" #include "ssapi_internal.h" fm_mgr_config_errno_t fm_mgr_config_mgr_connect ( fm_config_conx_hdl *hdl, fm_mgr_type_t mgr ) { char s_path[256]; char c_path[256]; char *mgr_prefix; p_hsm_com_client_hdl_t *mgr_hdl; memset(s_path,0,sizeof(s_path)); memset(c_path,0,sizeof(c_path)); switch ( mgr ) { case FM_MGR_SM: mgr_prefix = HSM_FM_SCK_SM; mgr_hdl = &hdl->sm_hdl; break; case FM_MGR_PM: mgr_prefix = HSM_FM_SCK_PM; mgr_hdl = &hdl->pm_hdl; break; case FM_MGR_FE: mgr_prefix = HSM_FM_SCK_FE; mgr_hdl = &hdl->fe_hdl; break; default: return FM_CONF_INIT_ERR; } // Fill in the paths for the server and client sockets. sprintf(s_path,"%s%s%d",HSM_FM_SCK_PREFIX,mgr_prefix,hdl->instance); sprintf(c_path,"%s%s%d_C_XXXXXX",HSM_FM_SCK_PREFIX,mgr_prefix,hdl->instance); if ( *mgr_hdl == NULL ) { if ( hcom_client_init(mgr_hdl,s_path,c_path,32768) != HSM_COM_OK ) { return FM_CONF_INIT_ERR; } } if ( hcom_client_connect(*mgr_hdl) == HSM_COM_OK ) { hdl->conx_mask |= mgr; return FM_CONF_OK; } return FM_CONF_CONX_ERR; } // init fm_mgr_config_errno_t fm_mgr_config_init ( OUT p_fm_config_conx_hdlt *p_hdl, IN int instance, OPTIONAL IN char *rem_address, OPTIONAL IN char *community ) { fm_config_conx_hdl *hdl; fm_mgr_config_errno_t res = FM_CONF_OK; if ( (hdl = calloc(1,sizeof(fm_config_conx_hdl))) == NULL ) { res = FM_CONF_NO_MEM; goto cleanup; } hdl->instance = instance; *p_hdl = hdl; // connect to the snmp agent via localhost? if(!rem_address || (strcmp(rem_address,"localhost") == 0)) { if ( fm_mgr_config_mgr_connect(hdl, FM_MGR_SM) == FM_CONF_INIT_ERR ) { res = FM_CONF_INIT_ERR; goto cleanup; } if ( fm_mgr_config_mgr_connect(hdl, FM_MGR_PM) == FM_CONF_INIT_ERR ) { res = FM_CONF_INIT_ERR; goto cleanup; } if ( fm_mgr_config_mgr_connect(hdl, FM_MGR_FE) == FM_CONF_INIT_ERR ) { res = FM_CONF_INIT_ERR; goto cleanup; } } cleanup: return res; } // connect fm_mgr_config_errno_t fm_mgr_config_connect ( IN p_fm_config_conx_hdlt p_hdl ) { fm_config_conx_hdl *hdl = p_hdl; fm_mgr_config_errno_t res = FM_CONF_OK; int fail_count = 0; if ( (res = fm_mgr_config_mgr_connect(hdl, FM_MGR_SM)) == FM_CONF_INIT_ERR ) { res = FM_CONF_INIT_ERR; goto cleanup; } if(res != FM_CONF_OK) fail_count++; if ( (res = fm_mgr_config_mgr_connect(hdl, FM_MGR_PM)) == FM_CONF_INIT_ERR ) { res = FM_CONF_INIT_ERR; goto cleanup; } if(res != FM_CONF_OK) fail_count++; if ( (res = fm_mgr_config_mgr_connect(hdl, FM_MGR_FE)) == FM_CONF_INIT_ERR ) { res = FM_CONF_INIT_ERR; goto cleanup; } if(res != FM_CONF_OK) fail_count++; if(fail_count < 4){ res = FM_CONF_OK; } cleanup: return res; } fm_mgr_config_errno_t fm_mgr_general_query ( IN p_hsm_com_client_hdl_t client_hdl, IN fm_mgr_action_t action, IN fm_datatype_t data_type_id, IN int data_len, OUT void *data, OUT fm_msg_ret_code_t *ret_code ) { fm_config_datagram_t *dg; hsm_com_datagram_t com_dg; int len; hsm_com_errno_t com_res; len = data_len + (sizeof(fm_config_datagram_header_t)); if ( (dg = malloc(len)) == NULL ) { return FM_CONF_NO_MEM; } dg->header.action = action; dg->header.data_id = data_type_id; dg->header.data_len = data_len; memcpy(&dg->data[0],data,data_len); com_dg.buf = (char*)dg; com_dg.buf_size = len; com_dg.data_len = len; if ( (com_res = hcom_client_send_data(client_hdl,60,&com_dg,&com_dg)) != HSM_COM_OK ) { free(dg); if(com_res == HSM_COM_NOT_CONNECTED) { return FM_CONF_ERR_DISC; } return FM_CONF_NO_RESP; } if ( ret_code ) *ret_code = dg->header.ret_code; if ( dg->header.ret_code != FM_RET_OK ) { free(dg); return FM_CONF_ERROR; } else { if ( dg->header.data_len != data_len ) { free(dg); if ( ret_code ) *ret_code = FM_RET_BAD_RET_LEN; return FM_CONF_ERR_LEN; } memcpy(data,&dg->data[0],data_len); } free(dg); return FM_CONF_OK; } fm_mgr_config_errno_t fm_mgr_status_query ( IN p_hsm_com_client_hdl_t client_hdl, IN fm_mgr_action_t action, IN fm_datatype_t data_type_id, IN int data_len, OUT void *data, OUT fm_msg_ret_code_t *ret_code ) { fm_config_datagram_t *dg; hsm_com_datagram_t com_dg; int len; hsm_com_errno_t com_res; len = data_len + (sizeof(fm_config_datagram_header_t)); if ( (dg = malloc(len)) == NULL ) { return FM_CONF_NO_MEM; } dg->header.action = action; dg->header.data_id = data_type_id; dg->header.data_len = data_len; memcpy(&dg->data[0],data,data_len); com_dg.buf = (char*)dg; com_dg.buf_size = len; com_dg.data_len = len; if ( (com_res = hcom_client_send_data(client_hdl,10,&com_dg,&com_dg)) != HSM_COM_OK ) { free(dg); if(com_res == HSM_COM_NOT_CONNECTED) { return FM_CONF_ERR_DISC; } return FM_CONF_NO_RESP; } if ( ret_code ) *ret_code = dg->header.ret_code; if ( dg->header.ret_code != FM_RET_OK ) { free(dg); return FM_CONF_ERROR; } else { if ( dg->header.data_len != data_len ) { free(dg); if ( ret_code ) *ret_code = FM_RET_BAD_RET_LEN; return FM_CONF_ERR_LEN; } memcpy(data,&dg->data[0],data_len); } free(dg); return FM_CONF_OK; } p_hsm_com_client_hdl_t get_mgr_hdl ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_type_t mgr ) { switch ( mgr ) { case FM_MGR_SM: return hdl->sm_hdl; case FM_MGR_PM: return hdl->pm_hdl; case FM_MGR_FE: return hdl->fe_hdl; default: return NULL; } return NULL; } /* * simple local instance only queries */ fm_mgr_config_errno_t fm_mgr_simple_query ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_action_t action, IN fm_datatype_t data_type_id, IN fm_mgr_type_t mgr, IN int data_len, OUT void *data, OUT fm_msg_ret_code_t *ret_code ) { p_hsm_com_client_hdl_t client_hdl; if ( (client_hdl = get_mgr_hdl(hdl,mgr)) != NULL ) { return fm_mgr_general_query(client_hdl,action,data_type_id,data_len,data,ret_code); } return FM_CONF_ERROR; } fm_mgr_config_errno_t fm_mgr_commong_cfg_query ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_type_t mgr, IN fm_mgr_action_t action, OUT fm_config_common_t *info, OUT fm_msg_ret_code_t *ret_code ) { p_hsm_com_client_hdl_t client_hdl; if ( (client_hdl = get_mgr_hdl(hdl,mgr)) != NULL ) { return fm_mgr_general_query(client_hdl,action,FM_DT_COMMON,sizeof(fm_config_common_t),info,ret_code); } return FM_CONF_ERROR; } fm_mgr_config_errno_t fm_mgr_fe_cfg_query ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_action_t action, OUT fe_config_t *info, OUT fm_msg_ret_code_t *ret_code ) { p_hsm_com_client_hdl_t client_hdl; if ( (client_hdl = get_mgr_hdl(hdl,FM_MGR_FE)) != NULL ) { return fm_mgr_general_query(client_hdl,action,FM_DT_FE_CFG,sizeof(fe_config_t),info,ret_code); } return FM_CONF_ERROR; } fm_mgr_config_errno_t fm_mgr_pm_cfg_query ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_action_t action, OUT pm_config_t *info, OUT fm_msg_ret_code_t *ret_code ) { p_hsm_com_client_hdl_t client_hdl; if ( (client_hdl = get_mgr_hdl(hdl,FM_MGR_PM)) != NULL ) { return fm_mgr_general_query(client_hdl,action,FM_DT_PM_CFG,sizeof(pm_config_t),info,ret_code); } return FM_CONF_ERROR; } fm_mgr_config_errno_t fm_mgr_sm_cfg_query ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_action_t action, OUT sm_config_t *info, OUT fm_msg_ret_code_t *ret_code ) { p_hsm_com_client_hdl_t client_hdl; if ( (client_hdl = get_mgr_hdl(hdl,FM_MGR_SM)) != NULL ) { return fm_mgr_general_query(client_hdl,action,FM_DT_SM_CFG,sizeof(sm_config_t),info,ret_code); } return FM_CONF_ERROR; } fm_mgr_config_errno_t fm_sm_status_query ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_action_t action, OUT fm_sm_status_t *info, OUT fm_msg_ret_code_t *ret_code ) { p_hsm_com_client_hdl_t client_hdl; if ( (client_hdl = get_mgr_hdl(hdl,FM_MGR_SM)) != NULL ) { return fm_mgr_general_query(client_hdl,action,FM_DT_SM_STATUS,sizeof(*info),info,ret_code); } return FM_CONF_ERROR; } fm_mgr_config_errno_t fm_pm_status_query ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_action_t action, OUT fm_pm_status_t *info, OUT fm_msg_ret_code_t *ret_code ) { p_hsm_com_client_hdl_t client_hdl; if ( (client_hdl = get_mgr_hdl(hdl,FM_MGR_PM)) != NULL ) { return fm_mgr_general_query(client_hdl,action,FM_DT_PM_STATUS,sizeof(*info),info,ret_code); } return FM_CONF_ERROR; } fm_mgr_config_errno_t fm_fe_status_query ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_action_t action, OUT fm_fe_status_t *info, OUT fm_msg_ret_code_t *ret_code ) { p_hsm_com_client_hdl_t client_hdl; if ( (client_hdl = get_mgr_hdl(hdl,FM_MGR_FE)) != NULL ) { return fm_mgr_general_query(client_hdl,action,FM_DT_FE_STATUS,sizeof(*info),info,ret_code); } return FM_CONF_ERROR; } const char* fm_mgr_get_error_str ( IN fm_mgr_config_errno_t err ) { switch(err){ case FM_CONF_ERR_LEN: return "Response data legth invalid"; case FM_CONF_ERR_VERSION: return "Client/Server version mismatch"; case FM_CONF_ERR_DISC: return "Not connected"; case FM_CONF_TEST: return "Test message"; case FM_CONF_OK: return "Ok"; case FM_CONF_ERROR: return "General error"; case FM_CONF_NO_RESOURCES: return "Out of resources"; case FM_CONF_NO_MEM: return "No memory"; case FM_CONF_PATH_ERR: return "Invalid path"; case FM_CONF_BAD: return "Bad argument"; case FM_CONF_BIND_ERR: return "Could not bind socket"; case FM_CONF_SOCK_ERR: return "Could not create socket"; case FM_CONF_CHMOD_ERR: return "Invalid permissions on socket"; case FM_CONF_CONX_ERR: return "Connection Error"; case FM_CONF_SEND_ERR: return "Send error"; case FM_CONF_INIT_ERR: return "Could not initalize resource"; case FM_CONF_NO_RESP: return "No Response"; case FM_CONF_MAX_ERROR_NUM: default: return "Unknown error"; } return "Unknown error"; } const char* fm_mgr_get_resp_error_str ( IN fm_msg_ret_code_t err ) { switch(err){ case FM_RET_BAD_RET_LEN: return "Return data length invalid"; case FM_RET_OK: return "Ok"; case FM_RET_DT_NOT_SUPPORTED: return "Data type not supported"; case FM_RET_ACT_NOT_SUPPORTED: return "Action not supported"; case FM_RET_INVALID: return "Invalid"; case FM_RET_BAD_LEN: return "Send data length invalid"; case FM_RET_BUSY: return "Busy"; case FM_RET_NOT_FOUND: return "Record not found"; case FM_RET_NO_NEXT: return "No next instance"; case FM_RET_NOT_MASTER: return "SM is not master"; case FM_RET_NOSUCHOBJECT: return "SNMP Err: No such object"; case FM_RET_NOSUCHINSTANCE: return "SNMP Err: No such instance"; case FM_RET_ENDOFMIBVIEW: return "SNMP Err: End of view"; case FM_RET_ERR_NOERROR: return "SNMP Err: No error"; case FM_RET_ERR_TOOBIG: return "SNMP Err: Object too big"; case FM_RET_ERR_NOSUCHNAME: return "SNMP Err: no such name"; case FM_RET_ERR_BADVALUE: return "SNMP Err: Bad value"; case FM_RET_ERR_READONLY: return "SNMP Err: Read only"; case FM_RET_ERR_GENERR: return "SNMP Err: General Error"; case FM_RET_ERR_NOACCESS: return "SNMP Err: No Access"; case FM_RET_ERR_WRONGTYPE: return "SNMP Err: Wrong Type"; case FM_RET_ERR_WRONGLENGTH: return "SNMP Err: Wrong length"; case FM_RET_ERR_WRONGENCODING: return "SNMP Err: Wrong encoding"; case FM_RET_ERR_WRONGVALUE: return "SNMP Err: Wrong Value"; case FM_RET_ERR_NOCREATION: return "SNMP Err: No Creation"; case FM_RET_ERR_INCONSISTENTVALUE: return "SNMP Err: Inconsistent value"; case FM_RET_ERR_RESOURCEUNAVAILABLE: return "SNMP Err: Resource Unavailable"; case FM_RET_ERR_COMMITFAILED: return "SNMP Err: Commit failed"; case FM_RET_ERR_UNDOFAILED: return "SNMP Err: Undo failed"; case FM_RET_ERR_AUTHORIZATIONERROR: return "SNMP Err: Authorization error"; case FM_RET_ERR_NOTWRITABLE: return "SNMP Err: Not Writable"; case FM_RET_TIMEOUT: return "SNMP Err: Timeout"; case FM_RET_UNKNOWN_DT: return "Unknown Datatype"; case FM_RET_END_OF_TABLE: return "End of Table"; case FM_RET_INTERNAL_ERR: return "Internal Error"; case FM_RET_CONX_CLOSED: return "Connection Closed"; } return "Unknown code"; } /* The current error map is copied to the pointer provided */ fm_mgr_config_errno_t fm_mgr_config_get_error_map ( IN p_fm_config_conx_hdlt hdl, OUT fm_error_map_t *error_map ) { if(error_map){ memcpy(error_map,&hdl->error_map,sizeof(fm_error_map_t)); return FM_CONF_OK; } return FM_CONF_ERROR; } fm_mgr_config_errno_t fm_mgr_config_clear_error_map ( IN p_fm_config_conx_hdlt hdl ) { if(hdl->error_map.err_set) memset(&hdl->error_map,0,sizeof(hdl->error_map)); return FM_CONF_OK; } /* The current error map is copied to the pointer provided */ fm_mgr_config_errno_t fm_mgr_config_get_error_map_entry ( IN p_fm_config_conx_hdlt hdl, IN uint64_t mask, OUT fm_mgr_config_errno_t *error_code ) { int i; for(i=0;i<64;i++){ if(mask & 0x01){ if((mask >> 1)){ return FM_CONF_ERROR; } *error_code = hdl->error_map.map[i]; return FM_CONF_OK; } mask >>= 1; } return FM_CONF_ERROR; } /* The current error map is copied to the pointer provided */ fm_mgr_config_errno_t fm_mgr_config_set_error_map_entry ( IN p_fm_config_conx_hdlt hdl, IN uint64_t mask, IN fm_mgr_config_errno_t error_code ) { int i; for(i=0;i<64;i++){ if(mask & 0x01){ if((mask >> 1)){ return FM_CONF_ERROR; } hdl->error_map.err_set = 1; hdl->error_map.map[i] = error_code; return FM_CONF_OK; } mask >>= 1; } return FM_CONF_ERROR; }
./CrossVul/dataset_final_sorted/CWE-362/c/good_1664_4
crossvul-cpp_data_bad_3465_0
/* * Memory merging support. * * This code enables dynamic sharing of identical pages found in different * memory areas, even if they are not shared by fork() * * Copyright (C) 2008-2009 Red Hat, Inc. * Authors: * Izik Eidus * Andrea Arcangeli * Chris Wright * Hugh Dickins * * This work is licensed under the terms of the GNU GPL, version 2. */ #include <linux/errno.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/mman.h> #include <linux/sched.h> #include <linux/rwsem.h> #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/spinlock.h> #include <linux/jhash.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/rbtree.h> #include <linux/memory.h> #include <linux/mmu_notifier.h> #include <linux/swap.h> #include <linux/ksm.h> #include <linux/hash.h> #include <linux/freezer.h> #include <linux/oom.h> #include <asm/tlbflush.h> #include "internal.h" /* * A few notes about the KSM scanning process, * to make it easier to understand the data structures below: * * In order to reduce excessive scanning, KSM sorts the memory pages by their * contents into a data structure that holds pointers to the pages' locations. * * Since the contents of the pages may change at any moment, KSM cannot just * insert the pages into a normal sorted tree and expect it to find anything. * Therefore KSM uses two data structures - the stable and the unstable tree. * * The stable tree holds pointers to all the merged pages (ksm pages), sorted * by their contents. Because each such page is write-protected, searching on * this tree is fully assured to be working (except when pages are unmapped), * and therefore this tree is called the stable tree. * * In addition to the stable tree, KSM uses a second data structure called the * unstable tree: this tree holds pointers to pages which have been found to * be "unchanged for a period of time". The unstable tree sorts these pages * by their contents, but since they are not write-protected, KSM cannot rely * upon the unstable tree to work correctly - the unstable tree is liable to * be corrupted as its contents are modified, and so it is called unstable. * * KSM solves this problem by several techniques: * * 1) The unstable tree is flushed every time KSM completes scanning all * memory areas, and then the tree is rebuilt again from the beginning. * 2) KSM will only insert into the unstable tree, pages whose hash value * has not changed since the previous scan of all memory areas. * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the * colors of the nodes and not on their contents, assuring that even when * the tree gets "corrupted" it won't get out of balance, so scanning time * remains the same (also, searching and inserting nodes in an rbtree uses * the same algorithm, so we have no overhead when we flush and rebuild). * 4) KSM never flushes the stable tree, which means that even if it were to * take 10 attempts to find a page in the unstable tree, once it is found, * it is secured in the stable tree. (When we scan a new page, we first * compare it against the stable tree, and then against the unstable tree.) */ /** * struct mm_slot - ksm information per mm that is being scanned * @link: link to the mm_slots hash list * @mm_list: link into the mm_slots list, rooted in ksm_mm_head * @rmap_list: head for this mm_slot's singly-linked list of rmap_items * @mm: the mm that this information is valid for */ struct mm_slot { struct hlist_node link; struct list_head mm_list; struct rmap_item *rmap_list; struct mm_struct *mm; }; /** * struct ksm_scan - cursor for scanning * @mm_slot: the current mm_slot we are scanning * @address: the next address inside that to be scanned * @rmap_list: link to the next rmap to be scanned in the rmap_list * @seqnr: count of completed full scans (needed when removing unstable node) * * There is only the one ksm_scan instance of this cursor structure. */ struct ksm_scan { struct mm_slot *mm_slot; unsigned long address; struct rmap_item **rmap_list; unsigned long seqnr; }; /** * struct stable_node - node of the stable rbtree * @node: rb node of this ksm page in the stable tree * @hlist: hlist head of rmap_items using this ksm page * @kpfn: page frame number of this ksm page */ struct stable_node { struct rb_node node; struct hlist_head hlist; unsigned long kpfn; }; /** * struct rmap_item - reverse mapping item for virtual addresses * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree * @mm: the memory structure this rmap_item is pointing into * @address: the virtual address this rmap_item tracks (+ flags in low bits) * @oldchecksum: previous checksum of the page at that virtual address * @node: rb node of this rmap_item in the unstable tree * @head: pointer to stable_node heading this list in the stable tree * @hlist: link into hlist of rmap_items hanging off that stable_node */ struct rmap_item { struct rmap_item *rmap_list; struct anon_vma *anon_vma; /* when stable */ struct mm_struct *mm; unsigned long address; /* + low bits used for flags below */ unsigned int oldchecksum; /* when unstable */ union { struct rb_node node; /* when node of unstable tree */ struct { /* when listed from stable tree */ struct stable_node *head; struct hlist_node hlist; }; }; }; #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ #define STABLE_FLAG 0x200 /* is listed from the stable tree */ /* The stable and unstable tree heads */ static struct rb_root root_stable_tree = RB_ROOT; static struct rb_root root_unstable_tree = RB_ROOT; #define MM_SLOTS_HASH_SHIFT 10 #define MM_SLOTS_HASH_HEADS (1 << MM_SLOTS_HASH_SHIFT) static struct hlist_head mm_slots_hash[MM_SLOTS_HASH_HEADS]; static struct mm_slot ksm_mm_head = { .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), }; static struct ksm_scan ksm_scan = { .mm_slot = &ksm_mm_head, }; static struct kmem_cache *rmap_item_cache; static struct kmem_cache *stable_node_cache; static struct kmem_cache *mm_slot_cache; /* The number of nodes in the stable tree */ static unsigned long ksm_pages_shared; /* The number of page slots additionally sharing those nodes */ static unsigned long ksm_pages_sharing; /* The number of nodes in the unstable tree */ static unsigned long ksm_pages_unshared; /* The number of rmap_items in use: to calculate pages_volatile */ static unsigned long ksm_rmap_items; /* Number of pages ksmd should scan in one batch */ static unsigned int ksm_thread_pages_to_scan = 100; /* Milliseconds ksmd should sleep between batches */ static unsigned int ksm_thread_sleep_millisecs = 20; #define KSM_RUN_STOP 0 #define KSM_RUN_MERGE 1 #define KSM_RUN_UNMERGE 2 static unsigned int ksm_run = KSM_RUN_STOP; static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); static DEFINE_MUTEX(ksm_thread_mutex); static DEFINE_SPINLOCK(ksm_mmlist_lock); #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ sizeof(struct __struct), __alignof__(struct __struct),\ (__flags), NULL) static int __init ksm_slab_init(void) { rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); if (!rmap_item_cache) goto out; stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); if (!stable_node_cache) goto out_free1; mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); if (!mm_slot_cache) goto out_free2; return 0; out_free2: kmem_cache_destroy(stable_node_cache); out_free1: kmem_cache_destroy(rmap_item_cache); out: return -ENOMEM; } static void __init ksm_slab_free(void) { kmem_cache_destroy(mm_slot_cache); kmem_cache_destroy(stable_node_cache); kmem_cache_destroy(rmap_item_cache); mm_slot_cache = NULL; } static inline struct rmap_item *alloc_rmap_item(void) { struct rmap_item *rmap_item; rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); if (rmap_item) ksm_rmap_items++; return rmap_item; } static inline void free_rmap_item(struct rmap_item *rmap_item) { ksm_rmap_items--; rmap_item->mm = NULL; /* debug safety */ kmem_cache_free(rmap_item_cache, rmap_item); } static inline struct stable_node *alloc_stable_node(void) { return kmem_cache_alloc(stable_node_cache, GFP_KERNEL); } static inline void free_stable_node(struct stable_node *stable_node) { kmem_cache_free(stable_node_cache, stable_node); } static inline struct mm_slot *alloc_mm_slot(void) { if (!mm_slot_cache) /* initialization failed */ return NULL; return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); } static inline void free_mm_slot(struct mm_slot *mm_slot) { kmem_cache_free(mm_slot_cache, mm_slot); } static struct mm_slot *get_mm_slot(struct mm_struct *mm) { struct mm_slot *mm_slot; struct hlist_head *bucket; struct hlist_node *node; bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)]; hlist_for_each_entry(mm_slot, node, bucket, link) { if (mm == mm_slot->mm) return mm_slot; } return NULL; } static void insert_to_mm_slots_hash(struct mm_struct *mm, struct mm_slot *mm_slot) { struct hlist_head *bucket; bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)]; mm_slot->mm = mm; hlist_add_head(&mm_slot->link, bucket); } static inline int in_stable_tree(struct rmap_item *rmap_item) { return rmap_item->address & STABLE_FLAG; } /* * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's * page tables after it has passed through ksm_exit() - which, if necessary, * takes mmap_sem briefly to serialize against them. ksm_exit() does not set * a special flag: they can just back out as soon as mm_users goes to zero. * ksm_test_exit() is used throughout to make this test for exit: in some * places for correctness, in some places just to avoid unnecessary work. */ static inline bool ksm_test_exit(struct mm_struct *mm) { return atomic_read(&mm->mm_users) == 0; } /* * We use break_ksm to break COW on a ksm page: it's a stripped down * * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1) * put_page(page); * * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, * in case the application has unmapped and remapped mm,addr meanwhile. * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. */ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) { struct page *page; int ret = 0; do { cond_resched(); page = follow_page(vma, addr, FOLL_GET); if (IS_ERR_OR_NULL(page)) break; if (PageKsm(page)) ret = handle_mm_fault(vma->vm_mm, vma, addr, FAULT_FLAG_WRITE); else ret = VM_FAULT_WRITE; put_page(page); } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); /* * We must loop because handle_mm_fault() may back out if there's * any difficulty e.g. if pte accessed bit gets updated concurrently. * * VM_FAULT_WRITE is what we have been hoping for: it indicates that * COW has been broken, even if the vma does not permit VM_WRITE; * but note that a concurrent fault might break PageKsm for us. * * VM_FAULT_SIGBUS could occur if we race with truncation of the * backing file, which also invalidates anonymous pages: that's * okay, that truncation will have unmapped the PageKsm for us. * * VM_FAULT_OOM: at the time of writing (late July 2009), setting * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the * current task has TIF_MEMDIE set, and will be OOM killed on return * to user; and ksmd, having no mm, would never be chosen for that. * * But if the mm is in a limited mem_cgroup, then the fault may fail * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and * even ksmd can fail in this way - though it's usually breaking ksm * just to undo a merge it made a moment before, so unlikely to oom. * * That's a pity: we might therefore have more kernel pages allocated * than we're counting as nodes in the stable tree; but ksm_do_scan * will retry to break_cow on each pass, so should recover the page * in due course. The important thing is to not let VM_MERGEABLE * be cleared while any such pages might remain in the area. */ return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; } static void break_cow(struct rmap_item *rmap_item) { struct mm_struct *mm = rmap_item->mm; unsigned long addr = rmap_item->address; struct vm_area_struct *vma; /* * It is not an accident that whenever we want to break COW * to undo, we also need to drop a reference to the anon_vma. */ put_anon_vma(rmap_item->anon_vma); down_read(&mm->mmap_sem); if (ksm_test_exit(mm)) goto out; vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) goto out; if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) goto out; break_ksm(vma, addr); out: up_read(&mm->mmap_sem); } static struct page *page_trans_compound_anon(struct page *page) { if (PageTransCompound(page)) { struct page *head = compound_trans_head(page); /* * head may actually be splitted and freed from under * us but it's ok here. */ if (PageAnon(head)) return head; } return NULL; } static struct page *get_mergeable_page(struct rmap_item *rmap_item) { struct mm_struct *mm = rmap_item->mm; unsigned long addr = rmap_item->address; struct vm_area_struct *vma; struct page *page; down_read(&mm->mmap_sem); if (ksm_test_exit(mm)) goto out; vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) goto out; if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) goto out; page = follow_page(vma, addr, FOLL_GET); if (IS_ERR_OR_NULL(page)) goto out; if (PageAnon(page) || page_trans_compound_anon(page)) { flush_anon_page(vma, page, addr); flush_dcache_page(page); } else { put_page(page); out: page = NULL; } up_read(&mm->mmap_sem); return page; } static void remove_node_from_stable_tree(struct stable_node *stable_node) { struct rmap_item *rmap_item; struct hlist_node *hlist; hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { if (rmap_item->hlist.next) ksm_pages_sharing--; else ksm_pages_shared--; put_anon_vma(rmap_item->anon_vma); rmap_item->address &= PAGE_MASK; cond_resched(); } rb_erase(&stable_node->node, &root_stable_tree); free_stable_node(stable_node); } /* * get_ksm_page: checks if the page indicated by the stable node * is still its ksm page, despite having held no reference to it. * In which case we can trust the content of the page, and it * returns the gotten page; but if the page has now been zapped, * remove the stale node from the stable tree and return NULL. * * You would expect the stable_node to hold a reference to the ksm page. * But if it increments the page's count, swapping out has to wait for * ksmd to come around again before it can free the page, which may take * seconds or even minutes: much too unresponsive. So instead we use a * "keyhole reference": access to the ksm page from the stable node peeps * out through its keyhole to see if that page still holds the right key, * pointing back to this stable node. This relies on freeing a PageAnon * page to reset its page->mapping to NULL, and relies on no other use of * a page to put something that might look like our key in page->mapping. * * include/linux/pagemap.h page_cache_get_speculative() is a good reference, * but this is different - made simpler by ksm_thread_mutex being held, but * interesting for assuming that no other use of the struct page could ever * put our expected_mapping into page->mapping (or a field of the union which * coincides with page->mapping). The RCU calls are not for KSM at all, but * to keep the page_count protocol described with page_cache_get_speculative. * * Note: it is possible that get_ksm_page() will return NULL one moment, * then page the next, if the page is in between page_freeze_refs() and * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page * is on its way to being freed; but it is an anomaly to bear in mind. */ static struct page *get_ksm_page(struct stable_node *stable_node) { struct page *page; void *expected_mapping; page = pfn_to_page(stable_node->kpfn); expected_mapping = (void *)stable_node + (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); rcu_read_lock(); if (page->mapping != expected_mapping) goto stale; if (!get_page_unless_zero(page)) goto stale; if (page->mapping != expected_mapping) { put_page(page); goto stale; } rcu_read_unlock(); return page; stale: rcu_read_unlock(); remove_node_from_stable_tree(stable_node); return NULL; } /* * Removing rmap_item from stable or unstable tree. * This function will clean the information from the stable/unstable tree. */ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) { if (rmap_item->address & STABLE_FLAG) { struct stable_node *stable_node; struct page *page; stable_node = rmap_item->head; page = get_ksm_page(stable_node); if (!page) goto out; lock_page(page); hlist_del(&rmap_item->hlist); unlock_page(page); put_page(page); if (stable_node->hlist.first) ksm_pages_sharing--; else ksm_pages_shared--; put_anon_vma(rmap_item->anon_vma); rmap_item->address &= PAGE_MASK; } else if (rmap_item->address & UNSTABLE_FLAG) { unsigned char age; /* * Usually ksmd can and must skip the rb_erase, because * root_unstable_tree was already reset to RB_ROOT. * But be careful when an mm is exiting: do the rb_erase * if this rmap_item was inserted by this scan, rather * than left over from before. */ age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); BUG_ON(age > 1); if (!age) rb_erase(&rmap_item->node, &root_unstable_tree); ksm_pages_unshared--; rmap_item->address &= PAGE_MASK; } out: cond_resched(); /* we're called from many long loops */ } static void remove_trailing_rmap_items(struct mm_slot *mm_slot, struct rmap_item **rmap_list) { while (*rmap_list) { struct rmap_item *rmap_item = *rmap_list; *rmap_list = rmap_item->rmap_list; remove_rmap_item_from_tree(rmap_item); free_rmap_item(rmap_item); } } /* * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather * than check every pte of a given vma, the locking doesn't quite work for * that - an rmap_item is assigned to the stable tree after inserting ksm * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing * rmap_items from parent to child at fork time (so as not to waste time * if exit comes before the next scan reaches it). * * Similarly, although we'd like to remove rmap_items (so updating counts * and freeing memory) when unmerging an area, it's easier to leave that * to the next pass of ksmd - consider, for example, how ksmd might be * in cmp_and_merge_page on one of the rmap_items we would be removing. */ static int unmerge_ksm_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end) { unsigned long addr; int err = 0; for (addr = start; addr < end && !err; addr += PAGE_SIZE) { if (ksm_test_exit(vma->vm_mm)) break; if (signal_pending(current)) err = -ERESTARTSYS; else err = break_ksm(vma, addr); } return err; } #ifdef CONFIG_SYSFS /* * Only called through the sysfs control interface: */ static int unmerge_and_remove_all_rmap_items(void) { struct mm_slot *mm_slot; struct mm_struct *mm; struct vm_area_struct *vma; int err = 0; spin_lock(&ksm_mmlist_lock); ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, struct mm_slot, mm_list); spin_unlock(&ksm_mmlist_lock); for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { mm = mm_slot->mm; down_read(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) { if (ksm_test_exit(mm)) break; if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) continue; err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end); if (err) goto error; } remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); spin_lock(&ksm_mmlist_lock); ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, struct mm_slot, mm_list); if (ksm_test_exit(mm)) { hlist_del(&mm_slot->link); list_del(&mm_slot->mm_list); spin_unlock(&ksm_mmlist_lock); free_mm_slot(mm_slot); clear_bit(MMF_VM_MERGEABLE, &mm->flags); up_read(&mm->mmap_sem); mmdrop(mm); } else { spin_unlock(&ksm_mmlist_lock); up_read(&mm->mmap_sem); } } ksm_scan.seqnr = 0; return 0; error: up_read(&mm->mmap_sem); spin_lock(&ksm_mmlist_lock); ksm_scan.mm_slot = &ksm_mm_head; spin_unlock(&ksm_mmlist_lock); return err; } #endif /* CONFIG_SYSFS */ static u32 calc_checksum(struct page *page) { u32 checksum; void *addr = kmap_atomic(page, KM_USER0); checksum = jhash2(addr, PAGE_SIZE / 4, 17); kunmap_atomic(addr, KM_USER0); return checksum; } static int memcmp_pages(struct page *page1, struct page *page2) { char *addr1, *addr2; int ret; addr1 = kmap_atomic(page1, KM_USER0); addr2 = kmap_atomic(page2, KM_USER1); ret = memcmp(addr1, addr2, PAGE_SIZE); kunmap_atomic(addr2, KM_USER1); kunmap_atomic(addr1, KM_USER0); return ret; } static inline int pages_identical(struct page *page1, struct page *page2) { return !memcmp_pages(page1, page2); } static int write_protect_page(struct vm_area_struct *vma, struct page *page, pte_t *orig_pte) { struct mm_struct *mm = vma->vm_mm; unsigned long addr; pte_t *ptep; spinlock_t *ptl; int swapped; int err = -EFAULT; addr = page_address_in_vma(page, vma); if (addr == -EFAULT) goto out; BUG_ON(PageTransCompound(page)); ptep = page_check_address(page, mm, addr, &ptl, 0); if (!ptep) goto out; if (pte_write(*ptep) || pte_dirty(*ptep)) { pte_t entry; swapped = PageSwapCache(page); flush_cache_page(vma, addr, page_to_pfn(page)); /* * Ok this is tricky, when get_user_pages_fast() run it doesn't * take any lock, therefore the check that we are going to make * with the pagecount against the mapcount is racey and * O_DIRECT can happen right after the check. * So we clear the pte and flush the tlb before the check * this assure us that no O_DIRECT can happen after the check * or in the middle of the check. */ entry = ptep_clear_flush(vma, addr, ptep); /* * Check that no O_DIRECT or similar I/O is in progress on the * page */ if (page_mapcount(page) + 1 + swapped != page_count(page)) { set_pte_at(mm, addr, ptep, entry); goto out_unlock; } if (pte_dirty(entry)) set_page_dirty(page); entry = pte_mkclean(pte_wrprotect(entry)); set_pte_at_notify(mm, addr, ptep, entry); } *orig_pte = *ptep; err = 0; out_unlock: pte_unmap_unlock(ptep, ptl); out: return err; } /** * replace_page - replace page in vma by new ksm page * @vma: vma that holds the pte pointing to page * @page: the page we are replacing by kpage * @kpage: the ksm page we replace page by * @orig_pte: the original value of the pte * * Returns 0 on success, -EFAULT on failure. */ static int replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage, pte_t orig_pte) { struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *ptep; spinlock_t *ptl; unsigned long addr; int err = -EFAULT; addr = page_address_in_vma(page, vma); if (addr == -EFAULT) goto out; pgd = pgd_offset(mm, addr); if (!pgd_present(*pgd)) goto out; pud = pud_offset(pgd, addr); if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, addr); BUG_ON(pmd_trans_huge(*pmd)); if (!pmd_present(*pmd)) goto out; ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!pte_same(*ptep, orig_pte)) { pte_unmap_unlock(ptep, ptl); goto out; } get_page(kpage); page_add_anon_rmap(kpage, vma, addr); flush_cache_page(vma, addr, pte_pfn(*ptep)); ptep_clear_flush(vma, addr, ptep); set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); page_remove_rmap(page); if (!page_mapped(page)) try_to_free_swap(page); put_page(page); pte_unmap_unlock(ptep, ptl); err = 0; out: return err; } static int page_trans_compound_anon_split(struct page *page) { int ret = 0; struct page *transhuge_head = page_trans_compound_anon(page); if (transhuge_head) { /* Get the reference on the head to split it. */ if (get_page_unless_zero(transhuge_head)) { /* * Recheck we got the reference while the head * was still anonymous. */ if (PageAnon(transhuge_head)) ret = split_huge_page(transhuge_head); else /* * Retry later if split_huge_page run * from under us. */ ret = 1; put_page(transhuge_head); } else /* Retry later if split_huge_page run from under us. */ ret = 1; } return ret; } /* * try_to_merge_one_page - take two pages and merge them into one * @vma: the vma that holds the pte pointing to page * @page: the PageAnon page that we want to replace with kpage * @kpage: the PageKsm page that we want to map instead of page, * or NULL the first time when we want to use page as kpage. * * This function returns 0 if the pages were merged, -EFAULT otherwise. */ static int try_to_merge_one_page(struct vm_area_struct *vma, struct page *page, struct page *kpage) { pte_t orig_pte = __pte(0); int err = -EFAULT; if (page == kpage) /* ksm page forked */ return 0; if (!(vma->vm_flags & VM_MERGEABLE)) goto out; if (PageTransCompound(page) && page_trans_compound_anon_split(page)) goto out; BUG_ON(PageTransCompound(page)); if (!PageAnon(page)) goto out; /* * We need the page lock to read a stable PageSwapCache in * write_protect_page(). We use trylock_page() instead of * lock_page() because we don't want to wait here - we * prefer to continue scanning and merging different pages, * then come back to this page when it is unlocked. */ if (!trylock_page(page)) goto out; /* * If this anonymous page is mapped only here, its pte may need * to be write-protected. If it's mapped elsewhere, all of its * ptes are necessarily already write-protected. But in either * case, we need to lock and check page_count is not raised. */ if (write_protect_page(vma, page, &orig_pte) == 0) { if (!kpage) { /* * While we hold page lock, upgrade page from * PageAnon+anon_vma to PageKsm+NULL stable_node: * stable_tree_insert() will update stable_node. */ set_page_stable_node(page, NULL); mark_page_accessed(page); err = 0; } else if (pages_identical(page, kpage)) err = replace_page(vma, page, kpage, orig_pte); } if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { munlock_vma_page(page); if (!PageMlocked(kpage)) { unlock_page(page); lock_page(kpage); mlock_vma_page(kpage); page = kpage; /* for final unlock */ } } unlock_page(page); out: return err; } /* * try_to_merge_with_ksm_page - like try_to_merge_two_pages, * but no new kernel page is allocated: kpage must already be a ksm page. * * This function returns 0 if the pages were merged, -EFAULT otherwise. */ static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, struct page *page, struct page *kpage) { struct mm_struct *mm = rmap_item->mm; struct vm_area_struct *vma; int err = -EFAULT; down_read(&mm->mmap_sem); if (ksm_test_exit(mm)) goto out; vma = find_vma(mm, rmap_item->address); if (!vma || vma->vm_start > rmap_item->address) goto out; err = try_to_merge_one_page(vma, page, kpage); if (err) goto out; /* Must get reference to anon_vma while still holding mmap_sem */ rmap_item->anon_vma = vma->anon_vma; get_anon_vma(vma->anon_vma); out: up_read(&mm->mmap_sem); return err; } /* * try_to_merge_two_pages - take two identical pages and prepare them * to be merged into one page. * * This function returns the kpage if we successfully merged two identical * pages into one ksm page, NULL otherwise. * * Note that this function upgrades page to ksm page: if one of the pages * is already a ksm page, try_to_merge_with_ksm_page should be used. */ static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, struct page *page, struct rmap_item *tree_rmap_item, struct page *tree_page) { int err; err = try_to_merge_with_ksm_page(rmap_item, page, NULL); if (!err) { err = try_to_merge_with_ksm_page(tree_rmap_item, tree_page, page); /* * If that fails, we have a ksm page with only one pte * pointing to it: so break it. */ if (err) break_cow(rmap_item); } return err ? NULL : page; } /* * stable_tree_search - search for page inside the stable tree * * This function checks if there is a page inside the stable tree * with identical content to the page that we are scanning right now. * * This function returns the stable tree node of identical content if found, * NULL otherwise. */ static struct page *stable_tree_search(struct page *page) { struct rb_node *node = root_stable_tree.rb_node; struct stable_node *stable_node; stable_node = page_stable_node(page); if (stable_node) { /* ksm page forked */ get_page(page); return page; } while (node) { struct page *tree_page; int ret; cond_resched(); stable_node = rb_entry(node, struct stable_node, node); tree_page = get_ksm_page(stable_node); if (!tree_page) return NULL; ret = memcmp_pages(page, tree_page); if (ret < 0) { put_page(tree_page); node = node->rb_left; } else if (ret > 0) { put_page(tree_page); node = node->rb_right; } else return tree_page; } return NULL; } /* * stable_tree_insert - insert rmap_item pointing to new ksm page * into the stable tree. * * This function returns the stable tree node just allocated on success, * NULL otherwise. */ static struct stable_node *stable_tree_insert(struct page *kpage) { struct rb_node **new = &root_stable_tree.rb_node; struct rb_node *parent = NULL; struct stable_node *stable_node; while (*new) { struct page *tree_page; int ret; cond_resched(); stable_node = rb_entry(*new, struct stable_node, node); tree_page = get_ksm_page(stable_node); if (!tree_page) return NULL; ret = memcmp_pages(kpage, tree_page); put_page(tree_page); parent = *new; if (ret < 0) new = &parent->rb_left; else if (ret > 0) new = &parent->rb_right; else { /* * It is not a bug that stable_tree_search() didn't * find this node: because at that time our page was * not yet write-protected, so may have changed since. */ return NULL; } } stable_node = alloc_stable_node(); if (!stable_node) return NULL; rb_link_node(&stable_node->node, parent, new); rb_insert_color(&stable_node->node, &root_stable_tree); INIT_HLIST_HEAD(&stable_node->hlist); stable_node->kpfn = page_to_pfn(kpage); set_page_stable_node(kpage, stable_node); return stable_node; } /* * unstable_tree_search_insert - search for identical page, * else insert rmap_item into the unstable tree. * * This function searches for a page in the unstable tree identical to the * page currently being scanned; and if no identical page is found in the * tree, we insert rmap_item as a new object into the unstable tree. * * This function returns pointer to rmap_item found to be identical * to the currently scanned page, NULL otherwise. * * This function does both searching and inserting, because they share * the same walking algorithm in an rbtree. */ static struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, struct page *page, struct page **tree_pagep) { struct rb_node **new = &root_unstable_tree.rb_node; struct rb_node *parent = NULL; while (*new) { struct rmap_item *tree_rmap_item; struct page *tree_page; int ret; cond_resched(); tree_rmap_item = rb_entry(*new, struct rmap_item, node); tree_page = get_mergeable_page(tree_rmap_item); if (IS_ERR_OR_NULL(tree_page)) return NULL; /* * Don't substitute a ksm page for a forked page. */ if (page == tree_page) { put_page(tree_page); return NULL; } ret = memcmp_pages(page, tree_page); parent = *new; if (ret < 0) { put_page(tree_page); new = &parent->rb_left; } else if (ret > 0) { put_page(tree_page); new = &parent->rb_right; } else { *tree_pagep = tree_page; return tree_rmap_item; } } rmap_item->address |= UNSTABLE_FLAG; rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); rb_link_node(&rmap_item->node, parent, new); rb_insert_color(&rmap_item->node, &root_unstable_tree); ksm_pages_unshared++; return NULL; } /* * stable_tree_append - add another rmap_item to the linked list of * rmap_items hanging off a given node of the stable tree, all sharing * the same ksm page. */ static void stable_tree_append(struct rmap_item *rmap_item, struct stable_node *stable_node) { rmap_item->head = stable_node; rmap_item->address |= STABLE_FLAG; hlist_add_head(&rmap_item->hlist, &stable_node->hlist); if (rmap_item->hlist.next) ksm_pages_sharing++; else ksm_pages_shared++; } /* * cmp_and_merge_page - first see if page can be merged into the stable tree; * if not, compare checksum to previous and if it's the same, see if page can * be inserted into the unstable tree, or merged with a page already there and * both transferred to the stable tree. * * @page: the page that we are searching identical page to. * @rmap_item: the reverse mapping into the virtual address of this page */ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) { struct rmap_item *tree_rmap_item; struct page *tree_page = NULL; struct stable_node *stable_node; struct page *kpage; unsigned int checksum; int err; remove_rmap_item_from_tree(rmap_item); /* We first start with searching the page inside the stable tree */ kpage = stable_tree_search(page); if (kpage) { err = try_to_merge_with_ksm_page(rmap_item, page, kpage); if (!err) { /* * The page was successfully merged: * add its rmap_item to the stable tree. */ lock_page(kpage); stable_tree_append(rmap_item, page_stable_node(kpage)); unlock_page(kpage); } put_page(kpage); return; } /* * If the hash value of the page has changed from the last time * we calculated it, this page is changing frequently: therefore we * don't want to insert it in the unstable tree, and we don't want * to waste our time searching for something identical to it there. */ checksum = calc_checksum(page); if (rmap_item->oldchecksum != checksum) { rmap_item->oldchecksum = checksum; return; } tree_rmap_item = unstable_tree_search_insert(rmap_item, page, &tree_page); if (tree_rmap_item) { kpage = try_to_merge_two_pages(rmap_item, page, tree_rmap_item, tree_page); put_page(tree_page); /* * As soon as we merge this page, we want to remove the * rmap_item of the page we have merged with from the unstable * tree, and insert it instead as new node in the stable tree. */ if (kpage) { remove_rmap_item_from_tree(tree_rmap_item); lock_page(kpage); stable_node = stable_tree_insert(kpage); if (stable_node) { stable_tree_append(tree_rmap_item, stable_node); stable_tree_append(rmap_item, stable_node); } unlock_page(kpage); /* * If we fail to insert the page into the stable tree, * we will have 2 virtual addresses that are pointing * to a ksm page left outside the stable tree, * in which case we need to break_cow on both. */ if (!stable_node) { break_cow(tree_rmap_item); break_cow(rmap_item); } } } } static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, struct rmap_item **rmap_list, unsigned long addr) { struct rmap_item *rmap_item; while (*rmap_list) { rmap_item = *rmap_list; if ((rmap_item->address & PAGE_MASK) == addr) return rmap_item; if (rmap_item->address > addr) break; *rmap_list = rmap_item->rmap_list; remove_rmap_item_from_tree(rmap_item); free_rmap_item(rmap_item); } rmap_item = alloc_rmap_item(); if (rmap_item) { /* It has already been zeroed */ rmap_item->mm = mm_slot->mm; rmap_item->address = addr; rmap_item->rmap_list = *rmap_list; *rmap_list = rmap_item; } return rmap_item; } static struct rmap_item *scan_get_next_rmap_item(struct page **page) { struct mm_struct *mm; struct mm_slot *slot; struct vm_area_struct *vma; struct rmap_item *rmap_item; if (list_empty(&ksm_mm_head.mm_list)) return NULL; slot = ksm_scan.mm_slot; if (slot == &ksm_mm_head) { /* * A number of pages can hang around indefinitely on per-cpu * pagevecs, raised page count preventing write_protect_page * from merging them. Though it doesn't really matter much, * it is puzzling to see some stuck in pages_volatile until * other activity jostles them out, and they also prevented * LTP's KSM test from succeeding deterministically; so drain * them here (here rather than on entry to ksm_do_scan(), * so we don't IPI too often when pages_to_scan is set low). */ lru_add_drain_all(); root_unstable_tree = RB_ROOT; spin_lock(&ksm_mmlist_lock); slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); ksm_scan.mm_slot = slot; spin_unlock(&ksm_mmlist_lock); next_mm: ksm_scan.address = 0; ksm_scan.rmap_list = &slot->rmap_list; } mm = slot->mm; down_read(&mm->mmap_sem); if (ksm_test_exit(mm)) vma = NULL; else vma = find_vma(mm, ksm_scan.address); for (; vma; vma = vma->vm_next) { if (!(vma->vm_flags & VM_MERGEABLE)) continue; if (ksm_scan.address < vma->vm_start) ksm_scan.address = vma->vm_start; if (!vma->anon_vma) ksm_scan.address = vma->vm_end; while (ksm_scan.address < vma->vm_end) { if (ksm_test_exit(mm)) break; *page = follow_page(vma, ksm_scan.address, FOLL_GET); if (IS_ERR_OR_NULL(*page)) { ksm_scan.address += PAGE_SIZE; cond_resched(); continue; } if (PageAnon(*page) || page_trans_compound_anon(*page)) { flush_anon_page(vma, *page, ksm_scan.address); flush_dcache_page(*page); rmap_item = get_next_rmap_item(slot, ksm_scan.rmap_list, ksm_scan.address); if (rmap_item) { ksm_scan.rmap_list = &rmap_item->rmap_list; ksm_scan.address += PAGE_SIZE; } else put_page(*page); up_read(&mm->mmap_sem); return rmap_item; } put_page(*page); ksm_scan.address += PAGE_SIZE; cond_resched(); } } if (ksm_test_exit(mm)) { ksm_scan.address = 0; ksm_scan.rmap_list = &slot->rmap_list; } /* * Nuke all the rmap_items that are above this current rmap: * because there were no VM_MERGEABLE vmas with such addresses. */ remove_trailing_rmap_items(slot, ksm_scan.rmap_list); spin_lock(&ksm_mmlist_lock); ksm_scan.mm_slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); if (ksm_scan.address == 0) { /* * We've completed a full scan of all vmas, holding mmap_sem * throughout, and found no VM_MERGEABLE: so do the same as * __ksm_exit does to remove this mm from all our lists now. * This applies either when cleaning up after __ksm_exit * (but beware: we can reach here even before __ksm_exit), * or when all VM_MERGEABLE areas have been unmapped (and * mmap_sem then protects against race with MADV_MERGEABLE). */ hlist_del(&slot->link); list_del(&slot->mm_list); spin_unlock(&ksm_mmlist_lock); free_mm_slot(slot); clear_bit(MMF_VM_MERGEABLE, &mm->flags); up_read(&mm->mmap_sem); mmdrop(mm); } else { spin_unlock(&ksm_mmlist_lock); up_read(&mm->mmap_sem); } /* Repeat until we've completed scanning the whole list */ slot = ksm_scan.mm_slot; if (slot != &ksm_mm_head) goto next_mm; ksm_scan.seqnr++; return NULL; } /** * ksm_do_scan - the ksm scanner main worker function. * @scan_npages - number of pages we want to scan before we return. */ static void ksm_do_scan(unsigned int scan_npages) { struct rmap_item *rmap_item; struct page *uninitialized_var(page); while (scan_npages-- && likely(!freezing(current))) { cond_resched(); rmap_item = scan_get_next_rmap_item(&page); if (!rmap_item) return; if (!PageKsm(page) || !in_stable_tree(rmap_item)) cmp_and_merge_page(page, rmap_item); put_page(page); } } static int ksmd_should_run(void) { return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); } static int ksm_scan_thread(void *nothing) { set_freezable(); set_user_nice(current, 5); while (!kthread_should_stop()) { mutex_lock(&ksm_thread_mutex); if (ksmd_should_run()) ksm_do_scan(ksm_thread_pages_to_scan); mutex_unlock(&ksm_thread_mutex); try_to_freeze(); if (ksmd_should_run()) { schedule_timeout_interruptible( msecs_to_jiffies(ksm_thread_sleep_millisecs)); } else { wait_event_freezable(ksm_thread_wait, ksmd_should_run() || kthread_should_stop()); } } return 0; } int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) { struct mm_struct *mm = vma->vm_mm; int err; switch (advice) { case MADV_MERGEABLE: /* * Be somewhat over-protective for now! */ if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | VM_NONLINEAR | VM_MIXEDMAP | VM_SAO)) return 0; /* just ignore the advice */ if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { err = __ksm_enter(mm); if (err) return err; } *vm_flags |= VM_MERGEABLE; break; case MADV_UNMERGEABLE: if (!(*vm_flags & VM_MERGEABLE)) return 0; /* just ignore the advice */ if (vma->anon_vma) { err = unmerge_ksm_pages(vma, start, end); if (err) return err; } *vm_flags &= ~VM_MERGEABLE; break; } return 0; } int __ksm_enter(struct mm_struct *mm) { struct mm_slot *mm_slot; int needs_wakeup; mm_slot = alloc_mm_slot(); if (!mm_slot) return -ENOMEM; /* Check ksm_run too? Would need tighter locking */ needs_wakeup = list_empty(&ksm_mm_head.mm_list); spin_lock(&ksm_mmlist_lock); insert_to_mm_slots_hash(mm, mm_slot); /* * Insert just behind the scanning cursor, to let the area settle * down a little; when fork is followed by immediate exec, we don't * want ksmd to waste time setting up and tearing down an rmap_list. */ list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); spin_unlock(&ksm_mmlist_lock); set_bit(MMF_VM_MERGEABLE, &mm->flags); atomic_inc(&mm->mm_count); if (needs_wakeup) wake_up_interruptible(&ksm_thread_wait); return 0; } void __ksm_exit(struct mm_struct *mm) { struct mm_slot *mm_slot; int easy_to_free = 0; /* * This process is exiting: if it's straightforward (as is the * case when ksmd was never running), free mm_slot immediately. * But if it's at the cursor or has rmap_items linked to it, use * mmap_sem to synchronize with any break_cows before pagetables * are freed, and leave the mm_slot on the list for ksmd to free. * Beware: ksm may already have noticed it exiting and freed the slot. */ spin_lock(&ksm_mmlist_lock); mm_slot = get_mm_slot(mm); if (mm_slot && ksm_scan.mm_slot != mm_slot) { if (!mm_slot->rmap_list) { hlist_del(&mm_slot->link); list_del(&mm_slot->mm_list); easy_to_free = 1; } else { list_move(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); } } spin_unlock(&ksm_mmlist_lock); if (easy_to_free) { free_mm_slot(mm_slot); clear_bit(MMF_VM_MERGEABLE, &mm->flags); mmdrop(mm); } else if (mm_slot) { down_write(&mm->mmap_sem); up_write(&mm->mmap_sem); } } struct page *ksm_does_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address) { struct page *new_page; new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); if (new_page) { copy_user_highpage(new_page, page, address, vma); SetPageDirty(new_page); __SetPageUptodate(new_page); SetPageSwapBacked(new_page); __set_page_locked(new_page); if (page_evictable(new_page, vma)) lru_cache_add_lru(new_page, LRU_ACTIVE_ANON); else add_page_to_unevictable_list(new_page); } return new_page; } int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, unsigned long *vm_flags) { struct stable_node *stable_node; struct rmap_item *rmap_item; struct hlist_node *hlist; unsigned int mapcount = page_mapcount(page); int referenced = 0; int search_new_forks = 0; VM_BUG_ON(!PageKsm(page)); VM_BUG_ON(!PageLocked(page)); stable_node = page_stable_node(page); if (!stable_node) return 0; again: hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { struct anon_vma *anon_vma = rmap_item->anon_vma; struct anon_vma_chain *vmac; struct vm_area_struct *vma; anon_vma_lock(anon_vma); list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { vma = vmac->vma; if (rmap_item->address < vma->vm_start || rmap_item->address >= vma->vm_end) continue; /* * Initially we examine only the vma which covers this * rmap_item; but later, if there is still work to do, * we examine covering vmas in other mms: in case they * were forked from the original since ksmd passed. */ if ((rmap_item->mm == vma->vm_mm) == search_new_forks) continue; if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) continue; referenced += page_referenced_one(page, vma, rmap_item->address, &mapcount, vm_flags); if (!search_new_forks || !mapcount) break; } anon_vma_unlock(anon_vma); if (!mapcount) goto out; } if (!search_new_forks++) goto again; out: return referenced; } int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) { struct stable_node *stable_node; struct hlist_node *hlist; struct rmap_item *rmap_item; int ret = SWAP_AGAIN; int search_new_forks = 0; VM_BUG_ON(!PageKsm(page)); VM_BUG_ON(!PageLocked(page)); stable_node = page_stable_node(page); if (!stable_node) return SWAP_FAIL; again: hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { struct anon_vma *anon_vma = rmap_item->anon_vma; struct anon_vma_chain *vmac; struct vm_area_struct *vma; anon_vma_lock(anon_vma); list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { vma = vmac->vma; if (rmap_item->address < vma->vm_start || rmap_item->address >= vma->vm_end) continue; /* * Initially we examine only the vma which covers this * rmap_item; but later, if there is still work to do, * we examine covering vmas in other mms: in case they * were forked from the original since ksmd passed. */ if ((rmap_item->mm == vma->vm_mm) == search_new_forks) continue; ret = try_to_unmap_one(page, vma, rmap_item->address, flags); if (ret != SWAP_AGAIN || !page_mapped(page)) { anon_vma_unlock(anon_vma); goto out; } } anon_vma_unlock(anon_vma); } if (!search_new_forks++) goto again; out: return ret; } #ifdef CONFIG_MIGRATION int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, struct vm_area_struct *, unsigned long, void *), void *arg) { struct stable_node *stable_node; struct hlist_node *hlist; struct rmap_item *rmap_item; int ret = SWAP_AGAIN; int search_new_forks = 0; VM_BUG_ON(!PageKsm(page)); VM_BUG_ON(!PageLocked(page)); stable_node = page_stable_node(page); if (!stable_node) return ret; again: hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { struct anon_vma *anon_vma = rmap_item->anon_vma; struct anon_vma_chain *vmac; struct vm_area_struct *vma; anon_vma_lock(anon_vma); list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { vma = vmac->vma; if (rmap_item->address < vma->vm_start || rmap_item->address >= vma->vm_end) continue; /* * Initially we examine only the vma which covers this * rmap_item; but later, if there is still work to do, * we examine covering vmas in other mms: in case they * were forked from the original since ksmd passed. */ if ((rmap_item->mm == vma->vm_mm) == search_new_forks) continue; ret = rmap_one(page, vma, rmap_item->address, arg); if (ret != SWAP_AGAIN) { anon_vma_unlock(anon_vma); goto out; } } anon_vma_unlock(anon_vma); } if (!search_new_forks++) goto again; out: return ret; } void ksm_migrate_page(struct page *newpage, struct page *oldpage) { struct stable_node *stable_node; VM_BUG_ON(!PageLocked(oldpage)); VM_BUG_ON(!PageLocked(newpage)); VM_BUG_ON(newpage->mapping != oldpage->mapping); stable_node = page_stable_node(newpage); if (stable_node) { VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); stable_node->kpfn = page_to_pfn(newpage); } } #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_MEMORY_HOTREMOVE static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn, unsigned long end_pfn) { struct rb_node *node; for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) { struct stable_node *stable_node; stable_node = rb_entry(node, struct stable_node, node); if (stable_node->kpfn >= start_pfn && stable_node->kpfn < end_pfn) return stable_node; } return NULL; } static int ksm_memory_callback(struct notifier_block *self, unsigned long action, void *arg) { struct memory_notify *mn = arg; struct stable_node *stable_node; switch (action) { case MEM_GOING_OFFLINE: /* * Keep it very simple for now: just lock out ksmd and * MADV_UNMERGEABLE while any memory is going offline. * mutex_lock_nested() is necessary because lockdep was alarmed * that here we take ksm_thread_mutex inside notifier chain * mutex, and later take notifier chain mutex inside * ksm_thread_mutex to unlock it. But that's safe because both * are inside mem_hotplug_mutex. */ mutex_lock_nested(&ksm_thread_mutex, SINGLE_DEPTH_NESTING); break; case MEM_OFFLINE: /* * Most of the work is done by page migration; but there might * be a few stable_nodes left over, still pointing to struct * pages which have been offlined: prune those from the tree. */ while ((stable_node = ksm_check_stable_tree(mn->start_pfn, mn->start_pfn + mn->nr_pages)) != NULL) remove_node_from_stable_tree(stable_node); /* fallthrough */ case MEM_CANCEL_OFFLINE: mutex_unlock(&ksm_thread_mutex); break; } return NOTIFY_OK; } #endif /* CONFIG_MEMORY_HOTREMOVE */ #ifdef CONFIG_SYSFS /* * This all compiles without CONFIG_SYSFS, but is a waste of space. */ #define KSM_ATTR_RO(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) #define KSM_ATTR(_name) \ static struct kobj_attribute _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) static ssize_t sleep_millisecs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs); } static ssize_t sleep_millisecs_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned long msecs; int err; err = strict_strtoul(buf, 10, &msecs); if (err || msecs > UINT_MAX) return -EINVAL; ksm_thread_sleep_millisecs = msecs; return count; } KSM_ATTR(sleep_millisecs); static ssize_t pages_to_scan_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", ksm_thread_pages_to_scan); } static ssize_t pages_to_scan_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long nr_pages; err = strict_strtoul(buf, 10, &nr_pages); if (err || nr_pages > UINT_MAX) return -EINVAL; ksm_thread_pages_to_scan = nr_pages; return count; } KSM_ATTR(pages_to_scan); static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", ksm_run); } static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long flags; err = strict_strtoul(buf, 10, &flags); if (err || flags > UINT_MAX) return -EINVAL; if (flags > KSM_RUN_UNMERGE) return -EINVAL; /* * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, * breaking COW to free the pages_shared (but leaves mm_slots * on the list for when ksmd may be set running again). */ mutex_lock(&ksm_thread_mutex); if (ksm_run != flags) { ksm_run = flags; if (flags & KSM_RUN_UNMERGE) { int oom_score_adj; oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX); err = unmerge_and_remove_all_rmap_items(); test_set_oom_score_adj(oom_score_adj); if (err) { ksm_run = KSM_RUN_STOP; count = err; } } } mutex_unlock(&ksm_thread_mutex); if (flags & KSM_RUN_MERGE) wake_up_interruptible(&ksm_thread_wait); return count; } KSM_ATTR(run); static ssize_t pages_shared_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", ksm_pages_shared); } KSM_ATTR_RO(pages_shared); static ssize_t pages_sharing_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", ksm_pages_sharing); } KSM_ATTR_RO(pages_sharing); static ssize_t pages_unshared_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", ksm_pages_unshared); } KSM_ATTR_RO(pages_unshared); static ssize_t pages_volatile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { long ksm_pages_volatile; ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared - ksm_pages_sharing - ksm_pages_unshared; /* * It was not worth any locking to calculate that statistic, * but it might therefore sometimes be negative: conceal that. */ if (ksm_pages_volatile < 0) ksm_pages_volatile = 0; return sprintf(buf, "%ld\n", ksm_pages_volatile); } KSM_ATTR_RO(pages_volatile); static ssize_t full_scans_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", ksm_scan.seqnr); } KSM_ATTR_RO(full_scans); static struct attribute *ksm_attrs[] = { &sleep_millisecs_attr.attr, &pages_to_scan_attr.attr, &run_attr.attr, &pages_shared_attr.attr, &pages_sharing_attr.attr, &pages_unshared_attr.attr, &pages_volatile_attr.attr, &full_scans_attr.attr, NULL, }; static struct attribute_group ksm_attr_group = { .attrs = ksm_attrs, .name = "ksm", }; #endif /* CONFIG_SYSFS */ static int __init ksm_init(void) { struct task_struct *ksm_thread; int err; err = ksm_slab_init(); if (err) goto out; ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); if (IS_ERR(ksm_thread)) { printk(KERN_ERR "ksm: creating kthread failed\n"); err = PTR_ERR(ksm_thread); goto out_free; } #ifdef CONFIG_SYSFS err = sysfs_create_group(mm_kobj, &ksm_attr_group); if (err) { printk(KERN_ERR "ksm: register sysfs failed\n"); kthread_stop(ksm_thread); goto out_free; } #else ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ #endif /* CONFIG_SYSFS */ #ifdef CONFIG_MEMORY_HOTREMOVE /* * Choose a high priority since the callback takes ksm_thread_mutex: * later callbacks could only be taking locks which nest within that. */ hotplug_memory_notifier(ksm_memory_callback, 100); #endif return 0; out_free: ksm_slab_free(); out: return err; } module_init(ksm_init)
./CrossVul/dataset_final_sorted/CWE-362/c/bad_3465_0
crossvul-cpp_data_bad_1865_3
/* * Block multiqueue core code * * Copyright (C) 2013-2014 Jens Axboe * Copyright (C) 2013-2014 Christoph Hellwig */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/backing-dev.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/smp.h> #include <linux/llist.h> #include <linux/list_sort.h> #include <linux/cpu.h> #include <linux/cache.h> #include <linux/sched/sysctl.h> #include <linux/delay.h> #include <linux/crash_dump.h> #include <trace/events/block.h> #include <linux/blk-mq.h> #include "blk.h" #include "blk-mq.h" #include "blk-mq-tag.h" static DEFINE_MUTEX(all_q_mutex); static LIST_HEAD(all_q_list); static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); /* * Check if any of the ctx's have pending work in this hardware queue */ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) { unsigned int i; for (i = 0; i < hctx->ctx_map.size; i++) if (hctx->ctx_map.map[i].word) return true; return false; } static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word]; } #define CTX_TO_BIT(hctx, ctx) \ ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1)) /* * Mark this ctx as having pending work in this hardware queue */ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { struct blk_align_bitmap *bm = get_bm(hctx, ctx); if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word)) set_bit(CTX_TO_BIT(hctx, ctx), &bm->word); } static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { struct blk_align_bitmap *bm = get_bm(hctx, ctx); clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); } static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp) { while (true) { int ret; if (percpu_ref_tryget_live(&q->mq_usage_counter)) return 0; if (!(gfp & __GFP_WAIT)) return -EBUSY; ret = wait_event_interruptible(q->mq_freeze_wq, !atomic_read(&q->mq_freeze_depth) || blk_queue_dying(q)); if (blk_queue_dying(q)) return -ENODEV; if (ret) return ret; } } static void blk_mq_queue_exit(struct request_queue *q) { percpu_ref_put(&q->mq_usage_counter); } static void blk_mq_usage_counter_release(struct percpu_ref *ref) { struct request_queue *q = container_of(ref, struct request_queue, mq_usage_counter); wake_up_all(&q->mq_freeze_wq); } void blk_mq_freeze_queue_start(struct request_queue *q) { int freeze_depth; freeze_depth = atomic_inc_return(&q->mq_freeze_depth); if (freeze_depth == 1) { percpu_ref_kill(&q->mq_usage_counter); blk_mq_run_hw_queues(q, false); } } EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); static void blk_mq_freeze_queue_wait(struct request_queue *q) { wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); } /* * Guarantee no request is in use, so we can change any data structure of * the queue afterward. */ void blk_mq_freeze_queue(struct request_queue *q) { blk_mq_freeze_queue_start(q); blk_mq_freeze_queue_wait(q); } EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); void blk_mq_unfreeze_queue(struct request_queue *q) { int freeze_depth; freeze_depth = atomic_dec_return(&q->mq_freeze_depth); WARN_ON_ONCE(freeze_depth < 0); if (!freeze_depth) { percpu_ref_reinit(&q->mq_usage_counter); wake_up_all(&q->mq_freeze_wq); } } EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); void blk_mq_wake_waiters(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned int i; queue_for_each_hw_ctx(q, hctx, i) if (blk_mq_hw_queue_mapped(hctx)) blk_mq_tag_wakeup_all(hctx->tags, true); /* * If we are called because the queue has now been marked as * dying, we need to ensure that processes currently waiting on * the queue are notified as well. */ wake_up_all(&q->mq_freeze_wq); } bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) { return blk_mq_has_free_tags(hctx->tags); } EXPORT_SYMBOL(blk_mq_can_queue); static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, struct request *rq, unsigned int rw_flags) { if (blk_queue_io_stat(q)) rw_flags |= REQ_IO_STAT; INIT_LIST_HEAD(&rq->queuelist); /* csd/requeue_work/fifo_time is initialized before use */ rq->q = q; rq->mq_ctx = ctx; rq->cmd_flags |= rw_flags; /* do not touch atomic flags, it needs atomic ops against the timer */ rq->cpu = -1; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); rq->rq_disk = NULL; rq->part = NULL; rq->start_time = jiffies; #ifdef CONFIG_BLK_CGROUP rq->rl = NULL; set_start_time_ns(rq); rq->io_start_time_ns = 0; #endif rq->nr_phys_segments = 0; #if defined(CONFIG_BLK_DEV_INTEGRITY) rq->nr_integrity_segments = 0; #endif rq->special = NULL; /* tag was already set */ rq->errors = 0; rq->cmd = rq->__cmd; rq->extra_len = 0; rq->sense_len = 0; rq->resid_len = 0; rq->sense = NULL; INIT_LIST_HEAD(&rq->timeout_list); rq->timeout = 0; rq->end_io = NULL; rq->end_io_data = NULL; rq->next_rq = NULL; ctx->rq_dispatched[rw_is_sync(rw_flags)]++; } static struct request * __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) { struct request *rq; unsigned int tag; tag = blk_mq_get_tag(data); if (tag != BLK_MQ_TAG_FAIL) { rq = data->hctx->tags->rqs[tag]; if (blk_mq_tag_busy(data->hctx)) { rq->cmd_flags = REQ_MQ_INFLIGHT; atomic_inc(&data->hctx->nr_active); } rq->tag = tag; blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw); return rq; } return NULL; } struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved) { struct blk_mq_ctx *ctx; struct blk_mq_hw_ctx *hctx; struct request *rq; struct blk_mq_alloc_data alloc_data; int ret; ret = blk_mq_queue_enter(q, gfp); if (ret) return ERR_PTR(ret); ctx = blk_mq_get_ctx(q); hctx = q->mq_ops->map_queue(q, ctx->cpu); blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT, reserved, ctx, hctx); rq = __blk_mq_alloc_request(&alloc_data, rw); if (!rq && (gfp & __GFP_WAIT)) { __blk_mq_run_hw_queue(hctx); blk_mq_put_ctx(ctx); ctx = blk_mq_get_ctx(q); hctx = q->mq_ops->map_queue(q, ctx->cpu); blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx, hctx); rq = __blk_mq_alloc_request(&alloc_data, rw); ctx = alloc_data.ctx; } blk_mq_put_ctx(ctx); if (!rq) { blk_mq_queue_exit(q); return ERR_PTR(-EWOULDBLOCK); } return rq; } EXPORT_SYMBOL(blk_mq_alloc_request); static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct request *rq) { const int tag = rq->tag; struct request_queue *q = rq->q; if (rq->cmd_flags & REQ_MQ_INFLIGHT) atomic_dec(&hctx->nr_active); rq->cmd_flags = 0; clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); blk_mq_put_tag(hctx, tag, &ctx->last_tag); blk_mq_queue_exit(q); } void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq) { struct blk_mq_ctx *ctx = rq->mq_ctx; ctx->rq_completed[rq_is_sync(rq)]++; __blk_mq_free_request(hctx, ctx, rq); } EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request); void blk_mq_free_request(struct request *rq) { struct blk_mq_hw_ctx *hctx; struct request_queue *q = rq->q; hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); blk_mq_free_hctx_request(hctx, rq); } EXPORT_SYMBOL_GPL(blk_mq_free_request); inline void __blk_mq_end_request(struct request *rq, int error) { blk_account_io_done(rq); if (rq->end_io) { rq->end_io(rq, error); } else { if (unlikely(blk_bidi_rq(rq))) blk_mq_free_request(rq->next_rq); blk_mq_free_request(rq); } } EXPORT_SYMBOL(__blk_mq_end_request); void blk_mq_end_request(struct request *rq, int error) { if (blk_update_request(rq, error, blk_rq_bytes(rq))) BUG(); __blk_mq_end_request(rq, error); } EXPORT_SYMBOL(blk_mq_end_request); static void __blk_mq_complete_request_remote(void *data) { struct request *rq = data; rq->q->softirq_done_fn(rq); } static void blk_mq_ipi_complete_request(struct request *rq) { struct blk_mq_ctx *ctx = rq->mq_ctx; bool shared = false; int cpu; if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { rq->q->softirq_done_fn(rq); return; } cpu = get_cpu(); if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) shared = cpus_share_cache(cpu, ctx->cpu); if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { rq->csd.func = __blk_mq_complete_request_remote; rq->csd.info = rq; rq->csd.flags = 0; smp_call_function_single_async(ctx->cpu, &rq->csd); } else { rq->q->softirq_done_fn(rq); } put_cpu(); } void __blk_mq_complete_request(struct request *rq) { struct request_queue *q = rq->q; if (!q->softirq_done_fn) blk_mq_end_request(rq, rq->errors); else blk_mq_ipi_complete_request(rq); } /** * blk_mq_complete_request - end I/O on a request * @rq: the request being processed * * Description: * Ends all I/O on a request. It does not handle partial completions. * The actual completion happens out-of-order, through a IPI handler. **/ void blk_mq_complete_request(struct request *rq) { struct request_queue *q = rq->q; if (unlikely(blk_should_fake_timeout(q))) return; if (!blk_mark_rq_complete(rq)) __blk_mq_complete_request(rq); } EXPORT_SYMBOL(blk_mq_complete_request); int blk_mq_request_started(struct request *rq) { return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); } EXPORT_SYMBOL_GPL(blk_mq_request_started); void blk_mq_start_request(struct request *rq) { struct request_queue *q = rq->q; trace_block_rq_issue(q, rq); rq->resid_len = blk_rq_bytes(rq); if (unlikely(blk_bidi_rq(rq))) rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq); blk_add_timer(rq); /* * Ensure that ->deadline is visible before set the started * flag and clear the completed flag. */ smp_mb__before_atomic(); /* * Mark us as started and clear complete. Complete might have been * set if requeue raced with timeout, which then marked it as * complete. So be sure to clear complete again when we start * the request, otherwise we'll ignore the completion event. */ if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); if (q->dma_drain_size && blk_rq_bytes(rq)) { /* * Make sure space for the drain appears. We know we can do * this because max_hw_segments has been adjusted to be one * fewer than the device can handle. */ rq->nr_phys_segments++; } } EXPORT_SYMBOL(blk_mq_start_request); static void __blk_mq_requeue_request(struct request *rq) { struct request_queue *q = rq->q; trace_block_rq_requeue(q, rq); if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { if (q->dma_drain_size && blk_rq_bytes(rq)) rq->nr_phys_segments--; } } void blk_mq_requeue_request(struct request *rq) { __blk_mq_requeue_request(rq); BUG_ON(blk_queued_rq(rq)); blk_mq_add_to_requeue_list(rq, true); } EXPORT_SYMBOL(blk_mq_requeue_request); static void blk_mq_requeue_work(struct work_struct *work) { struct request_queue *q = container_of(work, struct request_queue, requeue_work); LIST_HEAD(rq_list); struct request *rq, *next; unsigned long flags; spin_lock_irqsave(&q->requeue_lock, flags); list_splice_init(&q->requeue_list, &rq_list); spin_unlock_irqrestore(&q->requeue_lock, flags); list_for_each_entry_safe(rq, next, &rq_list, queuelist) { if (!(rq->cmd_flags & REQ_SOFTBARRIER)) continue; rq->cmd_flags &= ~REQ_SOFTBARRIER; list_del_init(&rq->queuelist); blk_mq_insert_request(rq, true, false, false); } while (!list_empty(&rq_list)) { rq = list_entry(rq_list.next, struct request, queuelist); list_del_init(&rq->queuelist); blk_mq_insert_request(rq, false, false, false); } /* * Use the start variant of queue running here, so that running * the requeue work will kick stopped queues. */ blk_mq_start_hw_queues(q); } void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) { struct request_queue *q = rq->q; unsigned long flags; /* * We abuse this flag that is otherwise used by the I/O scheduler to * request head insertation from the workqueue. */ BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER); spin_lock_irqsave(&q->requeue_lock, flags); if (at_head) { rq->cmd_flags |= REQ_SOFTBARRIER; list_add(&rq->queuelist, &q->requeue_list); } else { list_add_tail(&rq->queuelist, &q->requeue_list); } spin_unlock_irqrestore(&q->requeue_lock, flags); } EXPORT_SYMBOL(blk_mq_add_to_requeue_list); void blk_mq_cancel_requeue_work(struct request_queue *q) { cancel_work_sync(&q->requeue_work); } EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work); void blk_mq_kick_requeue_list(struct request_queue *q) { kblockd_schedule_work(&q->requeue_work); } EXPORT_SYMBOL(blk_mq_kick_requeue_list); void blk_mq_abort_requeue_list(struct request_queue *q) { unsigned long flags; LIST_HEAD(rq_list); spin_lock_irqsave(&q->requeue_lock, flags); list_splice_init(&q->requeue_list, &rq_list); spin_unlock_irqrestore(&q->requeue_lock, flags); while (!list_empty(&rq_list)) { struct request *rq; rq = list_first_entry(&rq_list, struct request, queuelist); list_del_init(&rq->queuelist); rq->errors = -EIO; blk_mq_end_request(rq, rq->errors); } } EXPORT_SYMBOL(blk_mq_abort_requeue_list); static inline bool is_flush_request(struct request *rq, struct blk_flush_queue *fq, unsigned int tag) { return ((rq->cmd_flags & REQ_FLUSH_SEQ) && fq->flush_rq->tag == tag); } struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) { struct request *rq = tags->rqs[tag]; /* mq_ctx of flush rq is always cloned from the corresponding req */ struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx); if (!is_flush_request(rq, fq, tag)) return rq; return fq->flush_rq; } EXPORT_SYMBOL(blk_mq_tag_to_rq); struct blk_mq_timeout_data { unsigned long next; unsigned int next_set; }; void blk_mq_rq_timed_out(struct request *req, bool reserved) { struct blk_mq_ops *ops = req->q->mq_ops; enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER; /* * We know that complete is set at this point. If STARTED isn't set * anymore, then the request isn't active and the "timeout" should * just be ignored. This can happen due to the bitflag ordering. * Timeout first checks if STARTED is set, and if it is, assumes * the request is active. But if we race with completion, then * we both flags will get cleared. So check here again, and ignore * a timeout event with a request that isn't active. */ if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags)) return; if (ops->timeout) ret = ops->timeout(req, reserved); switch (ret) { case BLK_EH_HANDLED: __blk_mq_complete_request(req); break; case BLK_EH_RESET_TIMER: blk_add_timer(req); blk_clear_rq_complete(req); break; case BLK_EH_NOT_HANDLED: break; default: printk(KERN_ERR "block: bad eh return: %d\n", ret); break; } } static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, struct request *rq, void *priv, bool reserved) { struct blk_mq_timeout_data *data = priv; if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { /* * If a request wasn't started before the queue was * marked dying, kill it here or it'll go unnoticed. */ if (unlikely(blk_queue_dying(rq->q))) { rq->errors = -EIO; blk_mq_complete_request(rq); } return; } if (rq->cmd_flags & REQ_NO_TIMEOUT) return; if (time_after_eq(jiffies, rq->deadline)) { if (!blk_mark_rq_complete(rq)) blk_mq_rq_timed_out(rq, reserved); } else if (!data->next_set || time_after(data->next, rq->deadline)) { data->next = rq->deadline; data->next_set = 1; } } static void blk_mq_rq_timer(unsigned long priv) { struct request_queue *q = (struct request_queue *)priv; struct blk_mq_timeout_data data = { .next = 0, .next_set = 0, }; struct blk_mq_hw_ctx *hctx; int i; queue_for_each_hw_ctx(q, hctx, i) { /* * If not software queues are currently mapped to this * hardware queue, there's nothing to check */ if (!blk_mq_hw_queue_mapped(hctx)) continue; blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data); } if (data.next_set) { data.next = blk_rq_timeout(round_jiffies_up(data.next)); mod_timer(&q->timeout, data.next); } else { queue_for_each_hw_ctx(q, hctx, i) { /* the hctx may be unmapped, so check it here */ if (blk_mq_hw_queue_mapped(hctx)) blk_mq_tag_idle(hctx); } } } /* * Reverse check our software queue for entries that we could potentially * merge with. Currently includes a hand-wavy stop count of 8, to not spend * too much time checking for merges. */ static bool blk_mq_attempt_merge(struct request_queue *q, struct blk_mq_ctx *ctx, struct bio *bio) { struct request *rq; int checked = 8; list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) { int el_ret; if (!checked--) break; if (!blk_rq_merge_ok(rq, bio)) continue; el_ret = blk_try_merge(rq, bio); if (el_ret == ELEVATOR_BACK_MERGE) { if (bio_attempt_back_merge(q, rq, bio)) { ctx->rq_merged++; return true; } break; } else if (el_ret == ELEVATOR_FRONT_MERGE) { if (bio_attempt_front_merge(q, rq, bio)) { ctx->rq_merged++; return true; } break; } } return false; } /* * Process software queues that have been marked busy, splicing them * to the for-dispatch */ static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) { struct blk_mq_ctx *ctx; int i; for (i = 0; i < hctx->ctx_map.size; i++) { struct blk_align_bitmap *bm = &hctx->ctx_map.map[i]; unsigned int off, bit; if (!bm->word) continue; bit = 0; off = i * hctx->ctx_map.bits_per_word; do { bit = find_next_bit(&bm->word, bm->depth, bit); if (bit >= bm->depth) break; ctx = hctx->ctxs[bit + off]; clear_bit(bit, &bm->word); spin_lock(&ctx->lock); list_splice_tail_init(&ctx->rq_list, list); spin_unlock(&ctx->lock); bit++; } while (1); } } /* * Run this hardware queue, pulling any software queues mapped to it in. * Note that this function currently has various problems around ordering * of IO. In particular, we'd like FIFO behaviour on handling existing * items on the hctx->dispatch list. Ignore that for now. */ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; struct request *rq; LIST_HEAD(rq_list); LIST_HEAD(driver_list); struct list_head *dptr; int queued; WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)); if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) return; hctx->run++; /* * Touch any software queue that has pending entries. */ flush_busy_ctxs(hctx, &rq_list); /* * If we have previous entries on our dispatch list, grab them * and stuff them at the front for more fair dispatch. */ if (!list_empty_careful(&hctx->dispatch)) { spin_lock(&hctx->lock); if (!list_empty(&hctx->dispatch)) list_splice_init(&hctx->dispatch, &rq_list); spin_unlock(&hctx->lock); } /* * Start off with dptr being NULL, so we start the first request * immediately, even if we have more pending. */ dptr = NULL; /* * Now process all the entries, sending them to the driver. */ queued = 0; while (!list_empty(&rq_list)) { struct blk_mq_queue_data bd; int ret; rq = list_first_entry(&rq_list, struct request, queuelist); list_del_init(&rq->queuelist); bd.rq = rq; bd.list = dptr; bd.last = list_empty(&rq_list); ret = q->mq_ops->queue_rq(hctx, &bd); switch (ret) { case BLK_MQ_RQ_QUEUE_OK: queued++; continue; case BLK_MQ_RQ_QUEUE_BUSY: list_add(&rq->queuelist, &rq_list); __blk_mq_requeue_request(rq); break; default: pr_err("blk-mq: bad return on queue: %d\n", ret); case BLK_MQ_RQ_QUEUE_ERROR: rq->errors = -EIO; blk_mq_end_request(rq, rq->errors); break; } if (ret == BLK_MQ_RQ_QUEUE_BUSY) break; /* * We've done the first request. If we have more than 1 * left in the list, set dptr to defer issue. */ if (!dptr && rq_list.next != rq_list.prev) dptr = &driver_list; } if (!queued) hctx->dispatched[0]++; else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1))) hctx->dispatched[ilog2(queued) + 1]++; /* * Any items that need requeuing? Stuff them into hctx->dispatch, * that is where we will continue on next queue run. */ if (!list_empty(&rq_list)) { spin_lock(&hctx->lock); list_splice(&rq_list, &hctx->dispatch); spin_unlock(&hctx->lock); /* * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but * it's possible the queue is stopped and restarted again * before this. Queue restart will dispatch requests. And since * requests in rq_list aren't added into hctx->dispatch yet, * the requests in rq_list might get lost. * * blk_mq_run_hw_queue() already checks the STOPPED bit **/ blk_mq_run_hw_queue(hctx, true); } } /* * It'd be great if the workqueue API had a way to pass * in a mask and had some smarts for more clever placement. * For now we just round-robin here, switching for every * BLK_MQ_CPU_WORK_BATCH queued items. */ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) { if (hctx->queue->nr_hw_queues == 1) return WORK_CPU_UNBOUND; if (--hctx->next_cpu_batch <= 0) { int cpu = hctx->next_cpu, next_cpu; next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask); if (next_cpu >= nr_cpu_ids) next_cpu = cpumask_first(hctx->cpumask); hctx->next_cpu = next_cpu; hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; return cpu; } return hctx->next_cpu; } void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) { if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) || !blk_mq_hw_queue_mapped(hctx))) return; if (!async) { int cpu = get_cpu(); if (cpumask_test_cpu(cpu, hctx->cpumask)) { __blk_mq_run_hw_queue(hctx); put_cpu(); return; } put_cpu(); } kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, 0); } void blk_mq_run_hw_queues(struct request_queue *q, bool async) { struct blk_mq_hw_ctx *hctx; int i; queue_for_each_hw_ctx(q, hctx, i) { if ((!blk_mq_hctx_has_pending(hctx) && list_empty_careful(&hctx->dispatch)) || test_bit(BLK_MQ_S_STOPPED, &hctx->state)) continue; blk_mq_run_hw_queue(hctx, async); } } EXPORT_SYMBOL(blk_mq_run_hw_queues); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) { cancel_delayed_work(&hctx->run_work); cancel_delayed_work(&hctx->delay_work); set_bit(BLK_MQ_S_STOPPED, &hctx->state); } EXPORT_SYMBOL(blk_mq_stop_hw_queue); void blk_mq_stop_hw_queues(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; int i; queue_for_each_hw_ctx(q, hctx, i) blk_mq_stop_hw_queue(hctx); } EXPORT_SYMBOL(blk_mq_stop_hw_queues); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) { clear_bit(BLK_MQ_S_STOPPED, &hctx->state); blk_mq_run_hw_queue(hctx, false); } EXPORT_SYMBOL(blk_mq_start_hw_queue); void blk_mq_start_hw_queues(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; int i; queue_for_each_hw_ctx(q, hctx, i) blk_mq_start_hw_queue(hctx); } EXPORT_SYMBOL(blk_mq_start_hw_queues); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) { struct blk_mq_hw_ctx *hctx; int i; queue_for_each_hw_ctx(q, hctx, i) { if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state)) continue; clear_bit(BLK_MQ_S_STOPPED, &hctx->state); blk_mq_run_hw_queue(hctx, async); } } EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); static void blk_mq_run_work_fn(struct work_struct *work) { struct blk_mq_hw_ctx *hctx; hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); __blk_mq_run_hw_queue(hctx); } static void blk_mq_delay_work_fn(struct work_struct *work) { struct blk_mq_hw_ctx *hctx; hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work); if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state)) __blk_mq_run_hw_queue(hctx); } void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) { if (unlikely(!blk_mq_hw_queue_mapped(hctx))) return; kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->delay_work, msecs_to_jiffies(msecs)); } EXPORT_SYMBOL(blk_mq_delay_queue); static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head) { struct blk_mq_ctx *ctx = rq->mq_ctx; trace_block_rq_insert(hctx->queue, rq); if (at_head) list_add(&rq->queuelist, &ctx->rq_list); else list_add_tail(&rq->queuelist, &ctx->rq_list); blk_mq_hctx_mark_pending(hctx, ctx); } void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, bool async) { struct request_queue *q = rq->q; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx; current_ctx = blk_mq_get_ctx(q); if (!cpu_online(ctx->cpu)) rq->mq_ctx = ctx = current_ctx; hctx = q->mq_ops->map_queue(q, ctx->cpu); spin_lock(&ctx->lock); __blk_mq_insert_request(hctx, rq, at_head); spin_unlock(&ctx->lock); if (run_queue) blk_mq_run_hw_queue(hctx, async); blk_mq_put_ctx(current_ctx); } static void blk_mq_insert_requests(struct request_queue *q, struct blk_mq_ctx *ctx, struct list_head *list, int depth, bool from_schedule) { struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *current_ctx; trace_block_unplug(q, depth, !from_schedule); current_ctx = blk_mq_get_ctx(q); if (!cpu_online(ctx->cpu)) ctx = current_ctx; hctx = q->mq_ops->map_queue(q, ctx->cpu); /* * preemption doesn't flush plug list, so it's possible ctx->cpu is * offline now */ spin_lock(&ctx->lock); while (!list_empty(list)) { struct request *rq; rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); rq->mq_ctx = ctx; __blk_mq_insert_request(hctx, rq, false); } spin_unlock(&ctx->lock); blk_mq_run_hw_queue(hctx, from_schedule); blk_mq_put_ctx(current_ctx); } static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) { struct request *rqa = container_of(a, struct request, queuelist); struct request *rqb = container_of(b, struct request, queuelist); return !(rqa->mq_ctx < rqb->mq_ctx || (rqa->mq_ctx == rqb->mq_ctx && blk_rq_pos(rqa) < blk_rq_pos(rqb))); } void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct blk_mq_ctx *this_ctx; struct request_queue *this_q; struct request *rq; LIST_HEAD(list); LIST_HEAD(ctx_list); unsigned int depth; list_splice_init(&plug->mq_list, &list); list_sort(NULL, &list, plug_ctx_cmp); this_q = NULL; this_ctx = NULL; depth = 0; while (!list_empty(&list)) { rq = list_entry_rq(list.next); list_del_init(&rq->queuelist); BUG_ON(!rq->q); if (rq->mq_ctx != this_ctx) { if (this_ctx) { blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth, from_schedule); } this_ctx = rq->mq_ctx; this_q = rq->q; depth = 0; } depth++; list_add_tail(&rq->queuelist, &ctx_list); } /* * If 'this_ctx' is set, we know we have entries to complete * on 'ctx_list'. Do those. */ if (this_ctx) { blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth, from_schedule); } } static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) { init_request_from_bio(rq, bio); if (blk_do_io_stat(rq)) blk_account_io_start(rq, 1); } static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx) { return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) && !blk_queue_nomerges(hctx->queue); } static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct request *rq, struct bio *bio) { if (!hctx_allow_merges(hctx)) { blk_mq_bio_to_request(rq, bio); spin_lock(&ctx->lock); insert_rq: __blk_mq_insert_request(hctx, rq, false); spin_unlock(&ctx->lock); return false; } else { struct request_queue *q = hctx->queue; spin_lock(&ctx->lock); if (!blk_mq_attempt_merge(q, ctx, bio)) { blk_mq_bio_to_request(rq, bio); goto insert_rq; } spin_unlock(&ctx->lock); __blk_mq_free_request(hctx, ctx, rq); return true; } } struct blk_map_ctx { struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; }; static struct request *blk_mq_map_request(struct request_queue *q, struct bio *bio, struct blk_map_ctx *data) { struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; struct request *rq; int rw = bio_data_dir(bio); struct blk_mq_alloc_data alloc_data; if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) { bio_io_error(bio); return NULL; } ctx = blk_mq_get_ctx(q); hctx = q->mq_ops->map_queue(q, ctx->cpu); if (rw_is_sync(bio->bi_rw)) rw |= REQ_SYNC; trace_block_getrq(q, bio, rw); blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx, hctx); rq = __blk_mq_alloc_request(&alloc_data, rw); if (unlikely(!rq)) { __blk_mq_run_hw_queue(hctx); blk_mq_put_ctx(ctx); trace_block_sleeprq(q, bio, rw); ctx = blk_mq_get_ctx(q); hctx = q->mq_ops->map_queue(q, ctx->cpu); blk_mq_set_alloc_data(&alloc_data, q, __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx); rq = __blk_mq_alloc_request(&alloc_data, rw); ctx = alloc_data.ctx; hctx = alloc_data.hctx; } hctx->queued++; data->hctx = hctx; data->ctx = ctx; return rq; } static int blk_mq_direct_issue_request(struct request *rq) { int ret; struct request_queue *q = rq->q; struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); struct blk_mq_queue_data bd = { .rq = rq, .list = NULL, .last = 1 }; /* * For OK queue, we are done. For error, kill it. Any other * error (busy), just add it to our list as we previously * would have done */ ret = q->mq_ops->queue_rq(hctx, &bd); if (ret == BLK_MQ_RQ_QUEUE_OK) return 0; else { __blk_mq_requeue_request(rq); if (ret == BLK_MQ_RQ_QUEUE_ERROR) { rq->errors = -EIO; blk_mq_end_request(rq, rq->errors); return 0; } return -1; } } /* * Multiple hardware queue variant. This will not use per-process plugs, * but will attempt to bypass the hctx queueing if we can go straight to * hardware for SYNC IO. */ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) { const int is_sync = rw_is_sync(bio->bi_rw); const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); struct blk_map_ctx data; struct request *rq; unsigned int request_count = 0; struct blk_plug *plug; struct request *same_queue_rq = NULL; blk_queue_bounce(q, &bio); if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { bio_io_error(bio); return; } blk_queue_split(q, &bio, q->bio_split); if (!is_flush_fua && !blk_queue_nomerges(q) && blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) return; rq = blk_mq_map_request(q, bio, &data); if (unlikely(!rq)) return; if (unlikely(is_flush_fua)) { blk_mq_bio_to_request(rq, bio); blk_insert_flush(rq); goto run_queue; } plug = current->plug; /* * If the driver supports defer issued based on 'last', then * queue it up like normal since we can potentially save some * CPU this way. */ if (((plug && !blk_queue_nomerges(q)) || is_sync) && !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) { struct request *old_rq = NULL; blk_mq_bio_to_request(rq, bio); /* * we do limited pluging. If bio can be merged, do merge. * Otherwise the existing request in the plug list will be * issued. So the plug list will have one request at most */ if (plug) { /* * The plug list might get flushed before this. If that * happens, same_queue_rq is invalid and plug list is empty **/ if (same_queue_rq && !list_empty(&plug->mq_list)) { old_rq = same_queue_rq; list_del_init(&old_rq->queuelist); } list_add_tail(&rq->queuelist, &plug->mq_list); } else /* is_sync */ old_rq = rq; blk_mq_put_ctx(data.ctx); if (!old_rq) return; if (!blk_mq_direct_issue_request(old_rq)) return; blk_mq_insert_request(old_rq, false, true, true); return; } if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { /* * For a SYNC request, send it to the hardware immediately. For * an ASYNC request, just ensure that we run it later on. The * latter allows for merging opportunities and more efficient * dispatching. */ run_queue: blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); } blk_mq_put_ctx(data.ctx); } /* * Single hardware queue variant. This will attempt to use any per-process * plug for merging and IO deferral. */ static void blk_sq_make_request(struct request_queue *q, struct bio *bio) { const int is_sync = rw_is_sync(bio->bi_rw); const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); struct blk_plug *plug; unsigned int request_count = 0; struct blk_map_ctx data; struct request *rq; blk_queue_bounce(q, &bio); if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { bio_io_error(bio); return; } blk_queue_split(q, &bio, q->bio_split); if (!is_flush_fua && !blk_queue_nomerges(q) && blk_attempt_plug_merge(q, bio, &request_count, NULL)) return; rq = blk_mq_map_request(q, bio, &data); if (unlikely(!rq)) return; if (unlikely(is_flush_fua)) { blk_mq_bio_to_request(rq, bio); blk_insert_flush(rq); goto run_queue; } /* * A task plug currently exists. Since this is completely lockless, * utilize that to temporarily store requests until the task is * either done or scheduled away. */ plug = current->plug; if (plug) { blk_mq_bio_to_request(rq, bio); if (list_empty(&plug->mq_list)) trace_block_plug(q); else if (request_count >= BLK_MAX_REQUEST_COUNT) { blk_flush_plug_list(plug, false); trace_block_plug(q); } list_add_tail(&rq->queuelist, &plug->mq_list); blk_mq_put_ctx(data.ctx); return; } if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { /* * For a SYNC request, send it to the hardware immediately. For * an ASYNC request, just ensure that we run it later on. The * latter allows for merging opportunities and more efficient * dispatching. */ run_queue: blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); } blk_mq_put_ctx(data.ctx); } /* * Default mapping to a software queue, since we use one per CPU. */ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) { return q->queue_hw_ctx[q->mq_map[cpu]]; } EXPORT_SYMBOL(blk_mq_map_queue); static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) { struct page *page; if (tags->rqs && set->ops->exit_request) { int i; for (i = 0; i < tags->nr_tags; i++) { if (!tags->rqs[i]) continue; set->ops->exit_request(set->driver_data, tags->rqs[i], hctx_idx, i); tags->rqs[i] = NULL; } } while (!list_empty(&tags->page_list)) { page = list_first_entry(&tags->page_list, struct page, lru); list_del_init(&page->lru); __free_pages(page, page->private); } kfree(tags->rqs); blk_mq_free_tags(tags); } static size_t order_to_size(unsigned int order) { return (size_t)PAGE_SIZE << order; } static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, unsigned int hctx_idx) { struct blk_mq_tags *tags; unsigned int i, j, entries_per_page, max_order = 4; size_t rq_size, left; tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags, set->numa_node, BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); if (!tags) return NULL; INIT_LIST_HEAD(&tags->page_list); tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *), GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, set->numa_node); if (!tags->rqs) { blk_mq_free_tags(tags); return NULL; } /* * rq_size is the size of the request plus driver payload, rounded * to the cacheline size */ rq_size = round_up(sizeof(struct request) + set->cmd_size, cache_line_size()); left = rq_size * set->queue_depth; for (i = 0; i < set->queue_depth; ) { int this_order = max_order; struct page *page; int to_do; void *p; while (left < order_to_size(this_order - 1) && this_order) this_order--; do { page = alloc_pages_node(set->numa_node, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, this_order); if (page) break; if (!this_order--) break; if (order_to_size(this_order) < rq_size) break; } while (1); if (!page) goto fail; page->private = this_order; list_add_tail(&page->lru, &tags->page_list); p = page_address(page); entries_per_page = order_to_size(this_order) / rq_size; to_do = min(entries_per_page, set->queue_depth - i); left -= to_do * rq_size; for (j = 0; j < to_do; j++) { tags->rqs[i] = p; if (set->ops->init_request) { if (set->ops->init_request(set->driver_data, tags->rqs[i], hctx_idx, i, set->numa_node)) { tags->rqs[i] = NULL; goto fail; } } p += rq_size; i++; } } return tags; fail: blk_mq_free_rq_map(set, tags, hctx_idx); return NULL; } static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap) { kfree(bitmap->map); } static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node) { unsigned int bpw = 8, total, num_maps, i; bitmap->bits_per_word = bpw; num_maps = ALIGN(nr_cpu_ids, bpw) / bpw; bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap), GFP_KERNEL, node); if (!bitmap->map) return -ENOMEM; total = nr_cpu_ids; for (i = 0; i < num_maps; i++) { bitmap->map[i].depth = min(total, bitmap->bits_per_word); total -= bitmap->map[i].depth; } return 0; } static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) { struct request_queue *q = hctx->queue; struct blk_mq_ctx *ctx; LIST_HEAD(tmp); /* * Move ctx entries to new CPU, if this one is going away. */ ctx = __blk_mq_get_ctx(q, cpu); spin_lock(&ctx->lock); if (!list_empty(&ctx->rq_list)) { list_splice_init(&ctx->rq_list, &tmp); blk_mq_hctx_clear_pending(hctx, ctx); } spin_unlock(&ctx->lock); if (list_empty(&tmp)) return NOTIFY_OK; ctx = blk_mq_get_ctx(q); spin_lock(&ctx->lock); while (!list_empty(&tmp)) { struct request *rq; rq = list_first_entry(&tmp, struct request, queuelist); rq->mq_ctx = ctx; list_move_tail(&rq->queuelist, &ctx->rq_list); } hctx = q->mq_ops->map_queue(q, ctx->cpu); blk_mq_hctx_mark_pending(hctx, ctx); spin_unlock(&ctx->lock); blk_mq_run_hw_queue(hctx, true); blk_mq_put_ctx(ctx); return NOTIFY_OK; } static int blk_mq_hctx_notify(void *data, unsigned long action, unsigned int cpu) { struct blk_mq_hw_ctx *hctx = data; if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) return blk_mq_hctx_cpu_offline(hctx, cpu); /* * In case of CPU online, tags may be reallocated * in blk_mq_map_swqueue() after mapping is updated. */ return NOTIFY_OK; } /* hctx->ctxs will be freed in queue's release handler */ static void blk_mq_exit_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { unsigned flush_start_tag = set->queue_depth; blk_mq_tag_idle(hctx); if (set->ops->exit_request) set->ops->exit_request(set->driver_data, hctx->fq->flush_rq, hctx_idx, flush_start_tag + hctx_idx); if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); blk_free_flush_queue(hctx->fq); blk_mq_free_bitmap(&hctx->ctx_map); } static void blk_mq_exit_hw_queues(struct request_queue *q, struct blk_mq_tag_set *set, int nr_queue) { struct blk_mq_hw_ctx *hctx; unsigned int i; queue_for_each_hw_ctx(q, hctx, i) { if (i == nr_queue) break; blk_mq_exit_hctx(q, set, hctx, i); } } static void blk_mq_free_hw_queues(struct request_queue *q, struct blk_mq_tag_set *set) { struct blk_mq_hw_ctx *hctx; unsigned int i; queue_for_each_hw_ctx(q, hctx, i) free_cpumask_var(hctx->cpumask); } static int blk_mq_init_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) { int node; unsigned flush_start_tag = set->queue_depth; node = hctx->numa_node; if (node == NUMA_NO_NODE) node = hctx->numa_node = set->numa_node; INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); spin_lock_init(&hctx->lock); INIT_LIST_HEAD(&hctx->dispatch); hctx->queue = q; hctx->queue_num = hctx_idx; hctx->flags = set->flags; blk_mq_init_cpu_notifier(&hctx->cpu_notifier, blk_mq_hctx_notify, hctx); blk_mq_register_cpu_notifier(&hctx->cpu_notifier); hctx->tags = set->tags[hctx_idx]; /* * Allocate space for all possible cpus to avoid allocation at * runtime */ hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), GFP_KERNEL, node); if (!hctx->ctxs) goto unregister_cpu_notifier; if (blk_mq_alloc_bitmap(&hctx->ctx_map, node)) goto free_ctxs; hctx->nr_ctx = 0; if (set->ops->init_hctx && set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) goto free_bitmap; hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); if (!hctx->fq) goto exit_hctx; if (set->ops->init_request && set->ops->init_request(set->driver_data, hctx->fq->flush_rq, hctx_idx, flush_start_tag + hctx_idx, node)) goto free_fq; return 0; free_fq: kfree(hctx->fq); exit_hctx: if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); free_bitmap: blk_mq_free_bitmap(&hctx->ctx_map); free_ctxs: kfree(hctx->ctxs); unregister_cpu_notifier: blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); return -1; } static int blk_mq_init_hw_queues(struct request_queue *q, struct blk_mq_tag_set *set) { struct blk_mq_hw_ctx *hctx; unsigned int i; /* * Initialize hardware queues */ queue_for_each_hw_ctx(q, hctx, i) { if (blk_mq_init_hctx(q, set, hctx, i)) break; } if (i == q->nr_hw_queues) return 0; /* * Init failed */ blk_mq_exit_hw_queues(q, set, i); return 1; } static void blk_mq_init_cpu_queues(struct request_queue *q, unsigned int nr_hw_queues) { unsigned int i; for_each_possible_cpu(i) { struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); struct blk_mq_hw_ctx *hctx; memset(__ctx, 0, sizeof(*__ctx)); __ctx->cpu = i; spin_lock_init(&__ctx->lock); INIT_LIST_HEAD(&__ctx->rq_list); __ctx->queue = q; /* If the cpu isn't online, the cpu is mapped to first hctx */ if (!cpu_online(i)) continue; hctx = q->mq_ops->map_queue(q, i); /* * Set local node, IFF we have more than one hw queue. If * not, we remain on the home node of the device */ if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) hctx->numa_node = cpu_to_node(i); } } static void blk_mq_map_swqueue(struct request_queue *q) { unsigned int i; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; struct blk_mq_tag_set *set = q->tag_set; queue_for_each_hw_ctx(q, hctx, i) { cpumask_clear(hctx->cpumask); hctx->nr_ctx = 0; } /* * Map software to hardware queues */ queue_for_each_ctx(q, ctx, i) { /* If the cpu isn't online, the cpu is mapped to first hctx */ if (!cpu_online(i)) continue; hctx = q->mq_ops->map_queue(q, i); cpumask_set_cpu(i, hctx->cpumask); cpumask_set_cpu(i, hctx->tags->cpumask); ctx->index_hw = hctx->nr_ctx; hctx->ctxs[hctx->nr_ctx++] = ctx; } queue_for_each_hw_ctx(q, hctx, i) { struct blk_mq_ctxmap *map = &hctx->ctx_map; /* * If no software queues are mapped to this hardware queue, * disable it and free the request entries. */ if (!hctx->nr_ctx) { if (set->tags[i]) { blk_mq_free_rq_map(set, set->tags[i], i); set->tags[i] = NULL; } hctx->tags = NULL; continue; } /* unmapped hw queue can be remapped after CPU topo changed */ if (!set->tags[i]) set->tags[i] = blk_mq_init_rq_map(set, i); hctx->tags = set->tags[i]; WARN_ON(!hctx->tags); /* * Set the map size to the number of mapped software queues. * This is more accurate and more efficient than looping * over all possibly mapped software queues. */ map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word); /* * Initialize batch roundrobin counts */ hctx->next_cpu = cpumask_first(hctx->cpumask); hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; } } static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set) { struct blk_mq_hw_ctx *hctx; struct request_queue *q; bool shared; int i; if (set->tag_list.next == set->tag_list.prev) shared = false; else shared = true; list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_freeze_queue(q); queue_for_each_hw_ctx(q, hctx, i) { if (shared) hctx->flags |= BLK_MQ_F_TAG_SHARED; else hctx->flags &= ~BLK_MQ_F_TAG_SHARED; } blk_mq_unfreeze_queue(q); } } static void blk_mq_del_queue_tag_set(struct request_queue *q) { struct blk_mq_tag_set *set = q->tag_set; mutex_lock(&set->tag_list_lock); list_del_init(&q->tag_set_list); blk_mq_update_tag_set_depth(set); mutex_unlock(&set->tag_list_lock); } static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, struct request_queue *q) { q->tag_set = set; mutex_lock(&set->tag_list_lock); list_add_tail(&q->tag_set_list, &set->tag_list); blk_mq_update_tag_set_depth(set); mutex_unlock(&set->tag_list_lock); } /* * It is the actual release handler for mq, but we do it from * request queue's release handler for avoiding use-after-free * and headache because q->mq_kobj shouldn't have been introduced, * but we can't group ctx/kctx kobj without it. */ void blk_mq_release(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned int i; /* hctx kobj stays in hctx */ queue_for_each_hw_ctx(q, hctx, i) { if (!hctx) continue; kfree(hctx->ctxs); kfree(hctx); } kfree(q->queue_hw_ctx); /* ctx kobj stays in queue_ctx */ free_percpu(q->queue_ctx); } struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) { struct request_queue *uninit_q, *q; uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node); if (!uninit_q) return ERR_PTR(-ENOMEM); q = blk_mq_init_allocated_queue(set, uninit_q); if (IS_ERR(q)) blk_cleanup_queue(uninit_q); return q; } EXPORT_SYMBOL(blk_mq_init_queue); struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *q) { struct blk_mq_hw_ctx **hctxs; struct blk_mq_ctx __percpu *ctx; unsigned int *map; int i; ctx = alloc_percpu(struct blk_mq_ctx); if (!ctx) return ERR_PTR(-ENOMEM); hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL, set->numa_node); if (!hctxs) goto err_percpu; map = blk_mq_make_queue_map(set); if (!map) goto err_map; for (i = 0; i < set->nr_hw_queues; i++) { int node = blk_mq_hw_queue_to_node(map, i); hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node); if (!hctxs[i]) goto err_hctxs; if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL, node)) goto err_hctxs; atomic_set(&hctxs[i]->nr_active, 0); hctxs[i]->numa_node = node; hctxs[i]->queue_num = i; } /* * Init percpu_ref in atomic mode so that it's faster to shutdown. * See blk_register_queue() for details. */ if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) goto err_hctxs; setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); q->nr_queues = nr_cpu_ids; q->nr_hw_queues = set->nr_hw_queues; q->mq_map = map; q->queue_ctx = ctx; q->queue_hw_ctx = hctxs; q->mq_ops = set->ops; q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; if (!(set->flags & BLK_MQ_F_SG_MERGE)) q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE; q->sg_reserved_size = INT_MAX; INIT_WORK(&q->requeue_work, blk_mq_requeue_work); INIT_LIST_HEAD(&q->requeue_list); spin_lock_init(&q->requeue_lock); if (q->nr_hw_queues > 1) blk_queue_make_request(q, blk_mq_make_request); else blk_queue_make_request(q, blk_sq_make_request); /* * Do this after blk_queue_make_request() overrides it... */ q->nr_requests = set->queue_depth; if (set->ops->complete) blk_queue_softirq_done(q, set->ops->complete); blk_mq_init_cpu_queues(q, set->nr_hw_queues); if (blk_mq_init_hw_queues(q, set)) goto err_hctxs; mutex_lock(&all_q_mutex); list_add_tail(&q->all_q_node, &all_q_list); mutex_unlock(&all_q_mutex); blk_mq_add_queue_tag_set(set, q); blk_mq_map_swqueue(q); return q; err_hctxs: kfree(map); for (i = 0; i < set->nr_hw_queues; i++) { if (!hctxs[i]) break; free_cpumask_var(hctxs[i]->cpumask); kfree(hctxs[i]); } err_map: kfree(hctxs); err_percpu: free_percpu(ctx); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(blk_mq_init_allocated_queue); void blk_mq_free_queue(struct request_queue *q) { struct blk_mq_tag_set *set = q->tag_set; blk_mq_del_queue_tag_set(q); blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); blk_mq_free_hw_queues(q, set); percpu_ref_exit(&q->mq_usage_counter); kfree(q->mq_map); q->mq_map = NULL; mutex_lock(&all_q_mutex); list_del_init(&q->all_q_node); mutex_unlock(&all_q_mutex); } /* Basically redo blk_mq_init_queue with queue frozen */ static void blk_mq_queue_reinit(struct request_queue *q) { WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); blk_mq_sysfs_unregister(q); blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues); /* * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe * we should change hctx numa_node according to new topology (this * involves free and re-allocate memory, worthy doing?) */ blk_mq_map_swqueue(q); blk_mq_sysfs_register(q); } static int blk_mq_queue_reinit_notify(struct notifier_block *nb, unsigned long action, void *hcpu) { struct request_queue *q; /* * Before new mappings are established, hotadded cpu might already * start handling requests. This doesn't break anything as we map * offline CPUs to first hardware queue. We will re-init the queue * below to get optimal settings. */ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN && action != CPU_ONLINE && action != CPU_ONLINE_FROZEN) return NOTIFY_OK; mutex_lock(&all_q_mutex); /* * We need to freeze and reinit all existing queues. Freezing * involves synchronous wait for an RCU grace period and doing it * one by one may take a long time. Start freezing all queues in * one swoop and then wait for the completions so that freezing can * take place in parallel. */ list_for_each_entry(q, &all_q_list, all_q_node) blk_mq_freeze_queue_start(q); list_for_each_entry(q, &all_q_list, all_q_node) { blk_mq_freeze_queue_wait(q); /* * timeout handler can't touch hw queue during the * reinitialization */ del_timer_sync(&q->timeout); } list_for_each_entry(q, &all_q_list, all_q_node) blk_mq_queue_reinit(q); list_for_each_entry(q, &all_q_list, all_q_node) blk_mq_unfreeze_queue(q); mutex_unlock(&all_q_mutex); return NOTIFY_OK; } static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) { int i; for (i = 0; i < set->nr_hw_queues; i++) { set->tags[i] = blk_mq_init_rq_map(set, i); if (!set->tags[i]) goto out_unwind; } return 0; out_unwind: while (--i >= 0) blk_mq_free_rq_map(set, set->tags[i], i); return -ENOMEM; } /* * Allocate the request maps associated with this tag_set. Note that this * may reduce the depth asked for, if memory is tight. set->queue_depth * will be updated to reflect the allocated depth. */ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) { unsigned int depth; int err; depth = set->queue_depth; do { err = __blk_mq_alloc_rq_maps(set); if (!err) break; set->queue_depth >>= 1; if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { err = -ENOMEM; break; } } while (set->queue_depth); if (!set->queue_depth || err) { pr_err("blk-mq: failed to allocate request map\n"); return -ENOMEM; } if (depth != set->queue_depth) pr_info("blk-mq: reduced tag depth (%u -> %u)\n", depth, set->queue_depth); return 0; } struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags) { return tags->cpumask; } EXPORT_SYMBOL_GPL(blk_mq_tags_cpumask); /* * Alloc a tag set to be associated with one or more request queues. * May fail with EINVAL for various error conditions. May adjust the * requested depth down, if if it too large. In that case, the set * value will be stored in set->queue_depth. */ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) { BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); if (!set->nr_hw_queues) return -EINVAL; if (!set->queue_depth) return -EINVAL; if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) return -EINVAL; if (!set->ops->queue_rq || !set->ops->map_queue) return -EINVAL; if (set->queue_depth > BLK_MQ_MAX_DEPTH) { pr_info("blk-mq: reduced tag depth to %u\n", BLK_MQ_MAX_DEPTH); set->queue_depth = BLK_MQ_MAX_DEPTH; } /* * If a crashdump is active, then we are potentially in a very * memory constrained environment. Limit us to 1 queue and * 64 tags to prevent using too much memory. */ if (is_kdump_kernel()) { set->nr_hw_queues = 1; set->queue_depth = min(64U, set->queue_depth); } set->tags = kmalloc_node(set->nr_hw_queues * sizeof(struct blk_mq_tags *), GFP_KERNEL, set->numa_node); if (!set->tags) return -ENOMEM; if (blk_mq_alloc_rq_maps(set)) goto enomem; mutex_init(&set->tag_list_lock); INIT_LIST_HEAD(&set->tag_list); return 0; enomem: kfree(set->tags); set->tags = NULL; return -ENOMEM; } EXPORT_SYMBOL(blk_mq_alloc_tag_set); void blk_mq_free_tag_set(struct blk_mq_tag_set *set) { int i; for (i = 0; i < set->nr_hw_queues; i++) { if (set->tags[i]) { blk_mq_free_rq_map(set, set->tags[i], i); free_cpumask_var(set->tags[i]->cpumask); } } kfree(set->tags); set->tags = NULL; } EXPORT_SYMBOL(blk_mq_free_tag_set); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) { struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_hw_ctx *hctx; int i, ret; if (!set || nr > set->queue_depth) return -EINVAL; ret = 0; queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_tag_update_depth(hctx->tags, nr); if (ret) break; } if (!ret) q->nr_requests = nr; return ret; } void blk_mq_disable_hotplug(void) { mutex_lock(&all_q_mutex); } void blk_mq_enable_hotplug(void) { mutex_unlock(&all_q_mutex); } static int __init blk_mq_init(void) { blk_mq_cpu_init(); hotcpu_notifier(blk_mq_queue_reinit_notify, 0); return 0; } subsys_initcall(blk_mq_init);
./CrossVul/dataset_final_sorted/CWE-362/c/bad_1865_3
crossvul-cpp_data_bad_2034_0
/* * n_tty.c --- implements the N_TTY line discipline. * * This code used to be in tty_io.c, but things are getting hairy * enough that it made sense to split things off. (The N_TTY * processing has changed so much that it's hardly recognizable, * anyway...) * * Note that the open routine for N_TTY is guaranteed never to return * an error. This is because Linux will fall back to setting a line * to N_TTY if it can not switch to any other line discipline. * * Written by Theodore Ts'o, Copyright 1994. * * This file also contains code originally written by Linus Torvalds, * Copyright 1991, 1992, 1993, and by Julian Cowley, Copyright 1994. * * This file may be redistributed under the terms of the GNU General Public * License. * * Reduced memory usage for older ARM systems - Russell King. * * 2000/01/20 Fixed SMP locking on put_tty_queue using bits of * the patch by Andrew J. Kroll <ag784@freenet.buffalo.edu> * who actually finally proved there really was a race. * * 2002/03/18 Implemented n_tty_wakeup to send SIGIO POLL_OUTs to * waiting writing processes-Sapan Bhatia <sapan@corewars.org>. * Also fixed a bug in BLOCKING mode where n_tty_write returns * EAGAIN */ #include <linux/types.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/fcntl.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/timer.h> #include <linux/ctype.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/bitops.h> #include <linux/audit.h> #include <linux/file.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/ratelimit.h> #include <linux/vmalloc.h> /* number of characters left in xmit buffer before select has we have room */ #define WAKEUP_CHARS 256 /* * This defines the low- and high-watermarks for throttling and * unthrottling the TTY driver. These watermarks are used for * controlling the space in the read buffer. */ #define TTY_THRESHOLD_THROTTLE 128 /* now based on remaining room */ #define TTY_THRESHOLD_UNTHROTTLE 128 /* * Special byte codes used in the echo buffer to represent operations * or special handling of characters. Bytes in the echo buffer that * are not part of such special blocks are treated as normal character * codes. */ #define ECHO_OP_START 0xff #define ECHO_OP_MOVE_BACK_COL 0x80 #define ECHO_OP_SET_CANON_COL 0x81 #define ECHO_OP_ERASE_TAB 0x82 #define ECHO_COMMIT_WATERMARK 256 #define ECHO_BLOCK 256 #define ECHO_DISCARD_WATERMARK N_TTY_BUF_SIZE - (ECHO_BLOCK + 32) #undef N_TTY_TRACE #ifdef N_TTY_TRACE # define n_tty_trace(f, args...) trace_printk(f, ##args) #else # define n_tty_trace(f, args...) #endif struct n_tty_data { /* producer-published */ size_t read_head; size_t canon_head; size_t echo_head; size_t echo_commit; size_t echo_mark; DECLARE_BITMAP(char_map, 256); /* private to n_tty_receive_overrun (single-threaded) */ unsigned long overrun_time; int num_overrun; /* non-atomic */ bool no_room; /* must hold exclusive termios_rwsem to reset these */ unsigned char lnext:1, erasing:1, raw:1, real_raw:1, icanon:1; unsigned char push:1; /* shared by producer and consumer */ char read_buf[N_TTY_BUF_SIZE]; DECLARE_BITMAP(read_flags, N_TTY_BUF_SIZE); unsigned char echo_buf[N_TTY_BUF_SIZE]; int minimum_to_wake; /* consumer-published */ size_t read_tail; size_t line_start; /* protected by output lock */ unsigned int column; unsigned int canon_column; size_t echo_tail; struct mutex atomic_read_lock; struct mutex output_lock; }; static inline size_t read_cnt(struct n_tty_data *ldata) { return ldata->read_head - ldata->read_tail; } static inline unsigned char read_buf(struct n_tty_data *ldata, size_t i) { return ldata->read_buf[i & (N_TTY_BUF_SIZE - 1)]; } static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i) { return &ldata->read_buf[i & (N_TTY_BUF_SIZE - 1)]; } static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i) { return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)]; } static inline unsigned char *echo_buf_addr(struct n_tty_data *ldata, size_t i) { return &ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)]; } static inline int tty_put_user(struct tty_struct *tty, unsigned char x, unsigned char __user *ptr) { struct n_tty_data *ldata = tty->disc_data; tty_audit_add_data(tty, &x, 1, ldata->icanon); return put_user(x, ptr); } static int receive_room(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; int left; if (I_PARMRK(tty)) { /* Multiply read_cnt by 3, since each byte might take up to * three times as many spaces when PARMRK is set (depending on * its flags, e.g. parity error). */ left = N_TTY_BUF_SIZE - read_cnt(ldata) * 3 - 1; } else left = N_TTY_BUF_SIZE - read_cnt(ldata) - 1; /* * If we are doing input canonicalization, and there are no * pending newlines, let characters through without limit, so * that erase characters will be handled. Other excess * characters will be beeped. */ if (left <= 0) left = ldata->icanon && ldata->canon_head == ldata->read_tail; return left; } /** * n_tty_set_room - receive space * @tty: terminal * * Re-schedules the flip buffer work if space just became available. * * Caller holds exclusive termios_rwsem * or * n_tty_read()/consumer path: * holds non-exclusive termios_rwsem */ static void n_tty_set_room(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; /* Did this open up the receive buffer? We may need to flip */ if (unlikely(ldata->no_room) && receive_room(tty)) { ldata->no_room = 0; WARN_RATELIMIT(tty->port->itty == NULL, "scheduling with invalid itty\n"); /* see if ldisc has been killed - if so, this means that * even though the ldisc has been halted and ->buf.work * cancelled, ->buf.work is about to be rescheduled */ WARN_RATELIMIT(test_bit(TTY_LDISC_HALTED, &tty->flags), "scheduling buffer work for halted ldisc\n"); queue_work(system_unbound_wq, &tty->port->buf.work); } } static ssize_t chars_in_buffer(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; ssize_t n = 0; if (!ldata->icanon) n = read_cnt(ldata); else n = ldata->canon_head - ldata->read_tail; return n; } /** * n_tty_write_wakeup - asynchronous I/O notifier * @tty: tty device * * Required for the ptys, serial driver etc. since processes * that attach themselves to the master and rely on ASYNC * IO must be woken up */ static void n_tty_write_wakeup(struct tty_struct *tty) { if (tty->fasync && test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) kill_fasync(&tty->fasync, SIGIO, POLL_OUT); } static void n_tty_check_throttle(struct tty_struct *tty) { if (tty->driver->type == TTY_DRIVER_TYPE_PTY) return; /* * Check the remaining room for the input canonicalization * mode. We don't want to throttle the driver if we're in * canonical mode and don't have a newline yet! */ while (1) { int throttled; tty_set_flow_change(tty, TTY_THROTTLE_SAFE); if (receive_room(tty) >= TTY_THRESHOLD_THROTTLE) break; throttled = tty_throttle_safe(tty); if (!throttled) break; } __tty_set_flow_change(tty, 0); } static void n_tty_check_unthrottle(struct tty_struct *tty) { if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->link->ldisc->ops->write_wakeup == n_tty_write_wakeup) { if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE) return; if (!tty->count) return; n_tty_set_room(tty); n_tty_write_wakeup(tty->link); if (waitqueue_active(&tty->link->write_wait)) wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT); return; } /* If there is enough space in the read buffer now, let the * low-level driver know. We use chars_in_buffer() to * check the buffer, as it now knows about canonical mode. * Otherwise, if the driver is throttled and the line is * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode, * we won't get any more characters. */ while (1) { int unthrottled; tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE); if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE) break; if (!tty->count) break; n_tty_set_room(tty); unthrottled = tty_unthrottle_safe(tty); if (!unthrottled) break; } __tty_set_flow_change(tty, 0); } /** * put_tty_queue - add character to tty * @c: character * @ldata: n_tty data * * Add a character to the tty read_buf queue. * * n_tty_receive_buf()/producer path: * caller holds non-exclusive termios_rwsem * modifies read_head * * read_head is only considered 'published' if canonical mode is * not active. */ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata) { *read_buf_addr(ldata, ldata->read_head++) = c; } /** * reset_buffer_flags - reset buffer state * @tty: terminal to reset * * Reset the read buffer counters and clear the flags. * Called from n_tty_open() and n_tty_flush_buffer(). * * Locking: caller holds exclusive termios_rwsem * (or locking is not required) */ static void reset_buffer_flags(struct n_tty_data *ldata) { ldata->read_head = ldata->canon_head = ldata->read_tail = 0; ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0; ldata->echo_mark = 0; ldata->line_start = 0; ldata->erasing = 0; bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); ldata->push = 0; } static void n_tty_packet_mode_flush(struct tty_struct *tty) { unsigned long flags; spin_lock_irqsave(&tty->ctrl_lock, flags); if (tty->link->packet) { tty->ctrl_status |= TIOCPKT_FLUSHREAD; if (waitqueue_active(&tty->link->read_wait)) wake_up_interruptible(&tty->link->read_wait); } spin_unlock_irqrestore(&tty->ctrl_lock, flags); } /** * n_tty_flush_buffer - clean input queue * @tty: terminal device * * Flush the input buffer. Called when the tty layer wants the * buffer flushed (eg at hangup) or when the N_TTY line discipline * internally has to clean the pending queue (for example some signals). * * Holds termios_rwsem to exclude producer/consumer while * buffer indices are reset. * * Locking: ctrl_lock, exclusive termios_rwsem */ static void n_tty_flush_buffer(struct tty_struct *tty) { down_write(&tty->termios_rwsem); reset_buffer_flags(tty->disc_data); n_tty_set_room(tty); if (tty->link) n_tty_packet_mode_flush(tty); up_write(&tty->termios_rwsem); } /** * n_tty_chars_in_buffer - report available bytes * @tty: tty device * * Report the number of characters buffered to be delivered to user * at this instant in time. * * Locking: exclusive termios_rwsem */ static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty) { ssize_t n; WARN_ONCE(1, "%s is deprecated and scheduled for removal.", __func__); down_write(&tty->termios_rwsem); n = chars_in_buffer(tty); up_write(&tty->termios_rwsem); return n; } /** * is_utf8_continuation - utf8 multibyte check * @c: byte to check * * Returns true if the utf8 character 'c' is a multibyte continuation * character. We use this to correctly compute the on screen size * of the character when printing */ static inline int is_utf8_continuation(unsigned char c) { return (c & 0xc0) == 0x80; } /** * is_continuation - multibyte check * @c: byte to check * * Returns true if the utf8 character 'c' is a multibyte continuation * character and the terminal is in unicode mode. */ static inline int is_continuation(unsigned char c, struct tty_struct *tty) { return I_IUTF8(tty) && is_utf8_continuation(c); } /** * do_output_char - output one character * @c: character (or partial unicode symbol) * @tty: terminal device * @space: space available in tty driver write buffer * * This is a helper function that handles one output character * (including special characters like TAB, CR, LF, etc.), * doing OPOST processing and putting the results in the * tty driver's write buffer. * * Note that Linux currently ignores TABDLY, CRDLY, VTDLY, FFDLY * and NLDLY. They simply aren't relevant in the world today. * If you ever need them, add them here. * * Returns the number of bytes of buffer space used or -1 if * no space left. * * Locking: should be called under the output_lock to protect * the column state and space left in the buffer */ static int do_output_char(unsigned char c, struct tty_struct *tty, int space) { struct n_tty_data *ldata = tty->disc_data; int spaces; if (!space) return -1; switch (c) { case '\n': if (O_ONLRET(tty)) ldata->column = 0; if (O_ONLCR(tty)) { if (space < 2) return -1; ldata->canon_column = ldata->column = 0; tty->ops->write(tty, "\r\n", 2); return 2; } ldata->canon_column = ldata->column; break; case '\r': if (O_ONOCR(tty) && ldata->column == 0) return 0; if (O_OCRNL(tty)) { c = '\n'; if (O_ONLRET(tty)) ldata->canon_column = ldata->column = 0; break; } ldata->canon_column = ldata->column = 0; break; case '\t': spaces = 8 - (ldata->column & 7); if (O_TABDLY(tty) == XTABS) { if (space < spaces) return -1; ldata->column += spaces; tty->ops->write(tty, " ", spaces); return spaces; } ldata->column += spaces; break; case '\b': if (ldata->column > 0) ldata->column--; break; default: if (!iscntrl(c)) { if (O_OLCUC(tty)) c = toupper(c); if (!is_continuation(c, tty)) ldata->column++; } break; } tty_put_char(tty, c); return 1; } /** * process_output - output post processor * @c: character (or partial unicode symbol) * @tty: terminal device * * Output one character with OPOST processing. * Returns -1 when the output device is full and the character * must be retried. * * Locking: output_lock to protect column state and space left * (also, this is called from n_tty_write under the * tty layer write lock) */ static int process_output(unsigned char c, struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; int space, retval; mutex_lock(&ldata->output_lock); space = tty_write_room(tty); retval = do_output_char(c, tty, space); mutex_unlock(&ldata->output_lock); if (retval < 0) return -1; else return 0; } /** * process_output_block - block post processor * @tty: terminal device * @buf: character buffer * @nr: number of bytes to output * * Output a block of characters with OPOST processing. * Returns the number of characters output. * * This path is used to speed up block console writes, among other * things when processing blocks of output data. It handles only * the simple cases normally found and helps to generate blocks of * symbols for the console driver and thus improve performance. * * Locking: output_lock to protect column state and space left * (also, this is called from n_tty_write under the * tty layer write lock) */ static ssize_t process_output_block(struct tty_struct *tty, const unsigned char *buf, unsigned int nr) { struct n_tty_data *ldata = tty->disc_data; int space; int i; const unsigned char *cp; mutex_lock(&ldata->output_lock); space = tty_write_room(tty); if (!space) { mutex_unlock(&ldata->output_lock); return 0; } if (nr > space) nr = space; for (i = 0, cp = buf; i < nr; i++, cp++) { unsigned char c = *cp; switch (c) { case '\n': if (O_ONLRET(tty)) ldata->column = 0; if (O_ONLCR(tty)) goto break_out; ldata->canon_column = ldata->column; break; case '\r': if (O_ONOCR(tty) && ldata->column == 0) goto break_out; if (O_OCRNL(tty)) goto break_out; ldata->canon_column = ldata->column = 0; break; case '\t': goto break_out; case '\b': if (ldata->column > 0) ldata->column--; break; default: if (!iscntrl(c)) { if (O_OLCUC(tty)) goto break_out; if (!is_continuation(c, tty)) ldata->column++; } break; } } break_out: i = tty->ops->write(tty, buf, i); mutex_unlock(&ldata->output_lock); return i; } /** * process_echoes - write pending echo characters * @tty: terminal device * * Write previously buffered echo (and other ldisc-generated) * characters to the tty. * * Characters generated by the ldisc (including echoes) need to * be buffered because the driver's write buffer can fill during * heavy program output. Echoing straight to the driver will * often fail under these conditions, causing lost characters and * resulting mismatches of ldisc state information. * * Since the ldisc state must represent the characters actually sent * to the driver at the time of the write, operations like certain * changes in column state are also saved in the buffer and executed * here. * * A circular fifo buffer is used so that the most recent characters * are prioritized. Also, when control characters are echoed with a * prefixed "^", the pair is treated atomically and thus not separated. * * Locking: callers must hold output_lock */ static size_t __process_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; int space, old_space; size_t tail; unsigned char c; old_space = space = tty_write_room(tty); tail = ldata->echo_tail; while (ldata->echo_commit != tail) { c = echo_buf(ldata, tail); if (c == ECHO_OP_START) { unsigned char op; int no_space_left = 0; /* * If the buffer byte is the start of a multi-byte * operation, get the next byte, which is either the * op code or a control character value. */ op = echo_buf(ldata, tail + 1); switch (op) { unsigned int num_chars, num_bs; case ECHO_OP_ERASE_TAB: num_chars = echo_buf(ldata, tail + 2); /* * Determine how many columns to go back * in order to erase the tab. * This depends on the number of columns * used by other characters within the tab * area. If this (modulo 8) count is from * the start of input rather than from a * previous tab, we offset by canon column. * Otherwise, tab spacing is normal. */ if (!(num_chars & 0x80)) num_chars += ldata->canon_column; num_bs = 8 - (num_chars & 7); if (num_bs > space) { no_space_left = 1; break; } space -= num_bs; while (num_bs--) { tty_put_char(tty, '\b'); if (ldata->column > 0) ldata->column--; } tail += 3; break; case ECHO_OP_SET_CANON_COL: ldata->canon_column = ldata->column; tail += 2; break; case ECHO_OP_MOVE_BACK_COL: if (ldata->column > 0) ldata->column--; tail += 2; break; case ECHO_OP_START: /* This is an escaped echo op start code */ if (!space) { no_space_left = 1; break; } tty_put_char(tty, ECHO_OP_START); ldata->column++; space--; tail += 2; break; default: /* * If the op is not a special byte code, * it is a ctrl char tagged to be echoed * as "^X" (where X is the letter * representing the control char). * Note that we must ensure there is * enough space for the whole ctrl pair. * */ if (space < 2) { no_space_left = 1; break; } tty_put_char(tty, '^'); tty_put_char(tty, op ^ 0100); ldata->column += 2; space -= 2; tail += 2; } if (no_space_left) break; } else { if (O_OPOST(tty)) { int retval = do_output_char(c, tty, space); if (retval < 0) break; space -= retval; } else { if (!space) break; tty_put_char(tty, c); space -= 1; } tail += 1; } } /* If the echo buffer is nearly full (so that the possibility exists * of echo overrun before the next commit), then discard enough * data at the tail to prevent a subsequent overrun */ while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) { if (echo_buf(ldata, tail) == ECHO_OP_START) { if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB) tail += 3; else tail += 2; } else tail++; } ldata->echo_tail = tail; return old_space - space; } static void commit_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; size_t nr, old, echoed; size_t head; head = ldata->echo_head; ldata->echo_mark = head; old = ldata->echo_commit - ldata->echo_tail; /* Process committed echoes if the accumulated # of bytes * is over the threshold (and try again each time another * block is accumulated) */ nr = head - ldata->echo_tail; if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK)) return; mutex_lock(&ldata->output_lock); ldata->echo_commit = head; echoed = __process_echoes(tty); mutex_unlock(&ldata->output_lock); if (echoed && tty->ops->flush_chars) tty->ops->flush_chars(tty); } static void process_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; size_t echoed; if (ldata->echo_mark == ldata->echo_tail) return; mutex_lock(&ldata->output_lock); ldata->echo_commit = ldata->echo_mark; echoed = __process_echoes(tty); mutex_unlock(&ldata->output_lock); if (echoed && tty->ops->flush_chars) tty->ops->flush_chars(tty); } /* NB: echo_mark and echo_head should be equivalent here */ static void flush_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; if ((!L_ECHO(tty) && !L_ECHONL(tty)) || ldata->echo_commit == ldata->echo_head) return; mutex_lock(&ldata->output_lock); ldata->echo_commit = ldata->echo_head; __process_echoes(tty); mutex_unlock(&ldata->output_lock); } /** * add_echo_byte - add a byte to the echo buffer * @c: unicode byte to echo * @ldata: n_tty data * * Add a character or operation byte to the echo buffer. */ static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata) { *echo_buf_addr(ldata, ldata->echo_head++) = c; } /** * echo_move_back_col - add operation to move back a column * @ldata: n_tty data * * Add an operation to the echo buffer to move back one column. */ static void echo_move_back_col(struct n_tty_data *ldata) { add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(ECHO_OP_MOVE_BACK_COL, ldata); } /** * echo_set_canon_col - add operation to set the canon column * @ldata: n_tty data * * Add an operation to the echo buffer to set the canon column * to the current column. */ static void echo_set_canon_col(struct n_tty_data *ldata) { add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(ECHO_OP_SET_CANON_COL, ldata); } /** * echo_erase_tab - add operation to erase a tab * @num_chars: number of character columns already used * @after_tab: true if num_chars starts after a previous tab * @ldata: n_tty data * * Add an operation to the echo buffer to erase a tab. * * Called by the eraser function, which knows how many character * columns have been used since either a previous tab or the start * of input. This information will be used later, along with * canon column (if applicable), to go back the correct number * of columns. */ static void echo_erase_tab(unsigned int num_chars, int after_tab, struct n_tty_data *ldata) { add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(ECHO_OP_ERASE_TAB, ldata); /* We only need to know this modulo 8 (tab spacing) */ num_chars &= 7; /* Set the high bit as a flag if num_chars is after a previous tab */ if (after_tab) num_chars |= 0x80; add_echo_byte(num_chars, ldata); } /** * echo_char_raw - echo a character raw * @c: unicode byte to echo * @tty: terminal device * * Echo user input back onto the screen. This must be called only when * L_ECHO(tty) is true. Called from the driver receive_buf path. * * This variant does not treat control characters specially. */ static void echo_char_raw(unsigned char c, struct n_tty_data *ldata) { if (c == ECHO_OP_START) { add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(ECHO_OP_START, ldata); } else { add_echo_byte(c, ldata); } } /** * echo_char - echo a character * @c: unicode byte to echo * @tty: terminal device * * Echo user input back onto the screen. This must be called only when * L_ECHO(tty) is true. Called from the driver receive_buf path. * * This variant tags control characters to be echoed as "^X" * (where X is the letter representing the control char). */ static void echo_char(unsigned char c, struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; if (c == ECHO_OP_START) { add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(ECHO_OP_START, ldata); } else { if (L_ECHOCTL(tty) && iscntrl(c) && c != '\t') add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(c, ldata); } } /** * finish_erasing - complete erase * @ldata: n_tty data */ static inline void finish_erasing(struct n_tty_data *ldata) { if (ldata->erasing) { echo_char_raw('/', ldata); ldata->erasing = 0; } } /** * eraser - handle erase function * @c: character input * @tty: terminal device * * Perform erase and necessary output when an erase character is * present in the stream from the driver layer. Handles the complexities * of UTF-8 multibyte symbols. * * n_tty_receive_buf()/producer path: * caller holds non-exclusive termios_rwsem * modifies read_head * * Modifying the read_head is not considered a publish in this context * because canonical mode is active -- only canon_head publishes */ static void eraser(unsigned char c, struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; enum { ERASE, WERASE, KILL } kill_type; size_t head; size_t cnt; int seen_alnums; if (ldata->read_head == ldata->canon_head) { /* process_output('\a', tty); */ /* what do you think? */ return; } if (c == ERASE_CHAR(tty)) kill_type = ERASE; else if (c == WERASE_CHAR(tty)) kill_type = WERASE; else { if (!L_ECHO(tty)) { ldata->read_head = ldata->canon_head; return; } if (!L_ECHOK(tty) || !L_ECHOKE(tty) || !L_ECHOE(tty)) { ldata->read_head = ldata->canon_head; finish_erasing(ldata); echo_char(KILL_CHAR(tty), tty); /* Add a newline if ECHOK is on and ECHOKE is off. */ if (L_ECHOK(tty)) echo_char_raw('\n', ldata); return; } kill_type = KILL; } seen_alnums = 0; while (ldata->read_head != ldata->canon_head) { head = ldata->read_head; /* erase a single possibly multibyte character */ do { head--; c = read_buf(ldata, head); } while (is_continuation(c, tty) && head != ldata->canon_head); /* do not partially erase */ if (is_continuation(c, tty)) break; if (kill_type == WERASE) { /* Equivalent to BSD's ALTWERASE. */ if (isalnum(c) || c == '_') seen_alnums++; else if (seen_alnums) break; } cnt = ldata->read_head - head; ldata->read_head = head; if (L_ECHO(tty)) { if (L_ECHOPRT(tty)) { if (!ldata->erasing) { echo_char_raw('\\', ldata); ldata->erasing = 1; } /* if cnt > 1, output a multi-byte character */ echo_char(c, tty); while (--cnt > 0) { head++; echo_char_raw(read_buf(ldata, head), ldata); echo_move_back_col(ldata); } } else if (kill_type == ERASE && !L_ECHOE(tty)) { echo_char(ERASE_CHAR(tty), tty); } else if (c == '\t') { unsigned int num_chars = 0; int after_tab = 0; size_t tail = ldata->read_head; /* * Count the columns used for characters * since the start of input or after a * previous tab. * This info is used to go back the correct * number of columns. */ while (tail != ldata->canon_head) { tail--; c = read_buf(ldata, tail); if (c == '\t') { after_tab = 1; break; } else if (iscntrl(c)) { if (L_ECHOCTL(tty)) num_chars += 2; } else if (!is_continuation(c, tty)) { num_chars++; } } echo_erase_tab(num_chars, after_tab, ldata); } else { if (iscntrl(c) && L_ECHOCTL(tty)) { echo_char_raw('\b', ldata); echo_char_raw(' ', ldata); echo_char_raw('\b', ldata); } if (!iscntrl(c) || L_ECHOCTL(tty)) { echo_char_raw('\b', ldata); echo_char_raw(' ', ldata); echo_char_raw('\b', ldata); } } } if (kill_type == ERASE) break; } if (ldata->read_head == ldata->canon_head && L_ECHO(tty)) finish_erasing(ldata); } /** * isig - handle the ISIG optio * @sig: signal * @tty: terminal * * Called when a signal is being sent due to terminal input. * Called from the driver receive_buf path so serialized. * * Locking: ctrl_lock */ static void isig(int sig, struct tty_struct *tty) { struct pid *tty_pgrp = tty_get_pgrp(tty); if (tty_pgrp) { kill_pgrp(tty_pgrp, sig, 1); put_pid(tty_pgrp); } } /** * n_tty_receive_break - handle break * @tty: terminal * * An RS232 break event has been hit in the incoming bitstream. This * can cause a variety of events depending upon the termios settings. * * n_tty_receive_buf()/producer path: * caller holds non-exclusive termios_rwsem * publishes read_head via put_tty_queue() * * Note: may get exclusive termios_rwsem if flushing input buffer */ static void n_tty_receive_break(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; if (I_IGNBRK(tty)) return; if (I_BRKINT(tty)) { isig(SIGINT, tty); if (!L_NOFLSH(tty)) { /* flushing needs exclusive termios_rwsem */ up_read(&tty->termios_rwsem); n_tty_flush_buffer(tty); tty_driver_flush_buffer(tty); down_read(&tty->termios_rwsem); } return; } if (I_PARMRK(tty)) { put_tty_queue('\377', ldata); put_tty_queue('\0', ldata); } put_tty_queue('\0', ldata); if (waitqueue_active(&tty->read_wait)) wake_up_interruptible(&tty->read_wait); } /** * n_tty_receive_overrun - handle overrun reporting * @tty: terminal * * Data arrived faster than we could process it. While the tty * driver has flagged this the bits that were missed are gone * forever. * * Called from the receive_buf path so single threaded. Does not * need locking as num_overrun and overrun_time are function * private. */ static void n_tty_receive_overrun(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; char buf[64]; ldata->num_overrun++; if (time_after(jiffies, ldata->overrun_time + HZ) || time_after(ldata->overrun_time, jiffies)) { printk(KERN_WARNING "%s: %d input overrun(s)\n", tty_name(tty, buf), ldata->num_overrun); ldata->overrun_time = jiffies; ldata->num_overrun = 0; } } /** * n_tty_receive_parity_error - error notifier * @tty: terminal device * @c: character * * Process a parity error and queue the right data to indicate * the error case if necessary. * * n_tty_receive_buf()/producer path: * caller holds non-exclusive termios_rwsem * publishes read_head via put_tty_queue() */ static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c) { struct n_tty_data *ldata = tty->disc_data; if (I_IGNPAR(tty)) return; if (I_PARMRK(tty)) { put_tty_queue('\377', ldata); put_tty_queue('\0', ldata); put_tty_queue(c, ldata); } else if (I_INPCK(tty)) put_tty_queue('\0', ldata); else put_tty_queue(c, ldata); if (waitqueue_active(&tty->read_wait)) wake_up_interruptible(&tty->read_wait); } static void n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c) { if (!L_NOFLSH(tty)) { /* flushing needs exclusive termios_rwsem */ up_read(&tty->termios_rwsem); n_tty_flush_buffer(tty); tty_driver_flush_buffer(tty); down_read(&tty->termios_rwsem); } if (I_IXON(tty)) start_tty(tty); if (L_ECHO(tty)) { echo_char(c, tty); commit_echoes(tty); } else process_echoes(tty); isig(signal, tty); return; } /** * n_tty_receive_char - perform processing * @tty: terminal device * @c: character * * Process an individual character of input received from the driver. * This is serialized with respect to itself by the rules for the * driver above. * * n_tty_receive_buf()/producer path: * caller holds non-exclusive termios_rwsem * publishes canon_head if canonical mode is active * otherwise, publishes read_head via put_tty_queue() * * Returns 1 if LNEXT was received, else returns 0 */ static int n_tty_receive_char_special(struct tty_struct *tty, unsigned char c) { struct n_tty_data *ldata = tty->disc_data; if (I_IXON(tty)) { if (c == START_CHAR(tty)) { start_tty(tty); process_echoes(tty); return 0; } if (c == STOP_CHAR(tty)) { stop_tty(tty); return 0; } } if (L_ISIG(tty)) { if (c == INTR_CHAR(tty)) { n_tty_receive_signal_char(tty, SIGINT, c); return 0; } else if (c == QUIT_CHAR(tty)) { n_tty_receive_signal_char(tty, SIGQUIT, c); return 0; } else if (c == SUSP_CHAR(tty)) { n_tty_receive_signal_char(tty, SIGTSTP, c); return 0; } } if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && I_IXANY(tty)) { start_tty(tty); process_echoes(tty); } if (c == '\r') { if (I_IGNCR(tty)) return 0; if (I_ICRNL(tty)) c = '\n'; } else if (c == '\n' && I_INLCR(tty)) c = '\r'; if (ldata->icanon) { if (c == ERASE_CHAR(tty) || c == KILL_CHAR(tty) || (c == WERASE_CHAR(tty) && L_IEXTEN(tty))) { eraser(c, tty); commit_echoes(tty); return 0; } if (c == LNEXT_CHAR(tty) && L_IEXTEN(tty)) { ldata->lnext = 1; if (L_ECHO(tty)) { finish_erasing(ldata); if (L_ECHOCTL(tty)) { echo_char_raw('^', ldata); echo_char_raw('\b', ldata); commit_echoes(tty); } } return 1; } if (c == REPRINT_CHAR(tty) && L_ECHO(tty) && L_IEXTEN(tty)) { size_t tail = ldata->canon_head; finish_erasing(ldata); echo_char(c, tty); echo_char_raw('\n', ldata); while (tail != ldata->read_head) { echo_char(read_buf(ldata, tail), tty); tail++; } commit_echoes(tty); return 0; } if (c == '\n') { if (L_ECHO(tty) || L_ECHONL(tty)) { echo_char_raw('\n', ldata); commit_echoes(tty); } goto handle_newline; } if (c == EOF_CHAR(tty)) { c = __DISABLED_CHAR; goto handle_newline; } if ((c == EOL_CHAR(tty)) || (c == EOL2_CHAR(tty) && L_IEXTEN(tty))) { /* * XXX are EOL_CHAR and EOL2_CHAR echoed?!? */ if (L_ECHO(tty)) { /* Record the column of first canon char. */ if (ldata->canon_head == ldata->read_head) echo_set_canon_col(ldata); echo_char(c, tty); commit_echoes(tty); } /* * XXX does PARMRK doubling happen for * EOL_CHAR and EOL2_CHAR? */ if (c == (unsigned char) '\377' && I_PARMRK(tty)) put_tty_queue(c, ldata); handle_newline: set_bit(ldata->read_head & (N_TTY_BUF_SIZE - 1), ldata->read_flags); put_tty_queue(c, ldata); ldata->canon_head = ldata->read_head; kill_fasync(&tty->fasync, SIGIO, POLL_IN); if (waitqueue_active(&tty->read_wait)) wake_up_interruptible(&tty->read_wait); return 0; } } if (L_ECHO(tty)) { finish_erasing(ldata); if (c == '\n') echo_char_raw('\n', ldata); else { /* Record the column of first canon char. */ if (ldata->canon_head == ldata->read_head) echo_set_canon_col(ldata); echo_char(c, tty); } commit_echoes(tty); } /* PARMRK doubling check */ if (c == (unsigned char) '\377' && I_PARMRK(tty)) put_tty_queue(c, ldata); put_tty_queue(c, ldata); return 0; } static inline void n_tty_receive_char_inline(struct tty_struct *tty, unsigned char c) { struct n_tty_data *ldata = tty->disc_data; if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && I_IXANY(tty)) { start_tty(tty); process_echoes(tty); } if (L_ECHO(tty)) { finish_erasing(ldata); /* Record the column of first canon char. */ if (ldata->canon_head == ldata->read_head) echo_set_canon_col(ldata); echo_char(c, tty); commit_echoes(tty); } /* PARMRK doubling check */ if (c == (unsigned char) '\377' && I_PARMRK(tty)) put_tty_queue(c, ldata); put_tty_queue(c, ldata); } static void n_tty_receive_char(struct tty_struct *tty, unsigned char c) { n_tty_receive_char_inline(tty, c); } static inline void n_tty_receive_char_fast(struct tty_struct *tty, unsigned char c) { struct n_tty_data *ldata = tty->disc_data; if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && I_IXANY(tty)) { start_tty(tty); process_echoes(tty); } if (L_ECHO(tty)) { finish_erasing(ldata); /* Record the column of first canon char. */ if (ldata->canon_head == ldata->read_head) echo_set_canon_col(ldata); echo_char(c, tty); commit_echoes(tty); } put_tty_queue(c, ldata); } static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c) { if (I_ISTRIP(tty)) c &= 0x7f; if (I_IUCLC(tty) && L_IEXTEN(tty)) c = tolower(c); if (I_IXON(tty)) { if (c == STOP_CHAR(tty)) stop_tty(tty); else if (c == START_CHAR(tty) || (tty->stopped && !tty->flow_stopped && I_IXANY(tty) && c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) && c != SUSP_CHAR(tty))) { start_tty(tty); process_echoes(tty); } } } static void n_tty_receive_char_flagged(struct tty_struct *tty, unsigned char c, char flag) { char buf[64]; switch (flag) { case TTY_BREAK: n_tty_receive_break(tty); break; case TTY_PARITY: case TTY_FRAME: n_tty_receive_parity_error(tty, c); break; case TTY_OVERRUN: n_tty_receive_overrun(tty); break; default: printk(KERN_ERR "%s: unknown flag %d\n", tty_name(tty, buf), flag); break; } } static void n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag) { struct n_tty_data *ldata = tty->disc_data; ldata->lnext = 0; if (likely(flag == TTY_NORMAL)) { if (I_ISTRIP(tty)) c &= 0x7f; if (I_IUCLC(tty) && L_IEXTEN(tty)) c = tolower(c); n_tty_receive_char(tty, c); } else n_tty_receive_char_flagged(tty, c, flag); } /** * n_tty_receive_buf - data receive * @tty: terminal device * @cp: buffer * @fp: flag buffer * @count: characters * * Called by the terminal driver when a block of characters has * been received. This function must be called from soft contexts * not from interrupt context. The driver is responsible for making * calls one at a time and in order (or using flush_to_ldisc) * * n_tty_receive_buf()/producer path: * claims non-exclusive termios_rwsem * publishes read_head and canon_head */ static void n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct n_tty_data *ldata = tty->disc_data; size_t n, head; head = ldata->read_head & (N_TTY_BUF_SIZE - 1); n = N_TTY_BUF_SIZE - max(read_cnt(ldata), head); n = min_t(size_t, count, n); memcpy(read_buf_addr(ldata, head), cp, n); ldata->read_head += n; cp += n; count -= n; head = ldata->read_head & (N_TTY_BUF_SIZE - 1); n = N_TTY_BUF_SIZE - max(read_cnt(ldata), head); n = min_t(size_t, count, n); memcpy(read_buf_addr(ldata, head), cp, n); ldata->read_head += n; } static void n_tty_receive_buf_raw(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct n_tty_data *ldata = tty->disc_data; char flag = TTY_NORMAL; while (count--) { if (fp) flag = *fp++; if (likely(flag == TTY_NORMAL)) put_tty_queue(*cp++, ldata); else n_tty_receive_char_flagged(tty, *cp++, flag); } } static void n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { char flag = TTY_NORMAL; while (count--) { if (fp) flag = *fp++; if (likely(flag == TTY_NORMAL)) n_tty_receive_char_closing(tty, *cp++); else n_tty_receive_char_flagged(tty, *cp++, flag); } } static void n_tty_receive_buf_standard(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct n_tty_data *ldata = tty->disc_data; char flag = TTY_NORMAL; while (count--) { if (fp) flag = *fp++; if (likely(flag == TTY_NORMAL)) { unsigned char c = *cp++; if (I_ISTRIP(tty)) c &= 0x7f; if (I_IUCLC(tty) && L_IEXTEN(tty)) c = tolower(c); if (L_EXTPROC(tty)) { put_tty_queue(c, ldata); continue; } if (!test_bit(c, ldata->char_map)) n_tty_receive_char_inline(tty, c); else if (n_tty_receive_char_special(tty, c) && count) { if (fp) flag = *fp++; n_tty_receive_char_lnext(tty, *cp++, flag); count--; } } else n_tty_receive_char_flagged(tty, *cp++, flag); } } static void n_tty_receive_buf_fast(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct n_tty_data *ldata = tty->disc_data; char flag = TTY_NORMAL; while (count--) { if (fp) flag = *fp++; if (likely(flag == TTY_NORMAL)) { unsigned char c = *cp++; if (!test_bit(c, ldata->char_map)) n_tty_receive_char_fast(tty, c); else if (n_tty_receive_char_special(tty, c) && count) { if (fp) flag = *fp++; n_tty_receive_char_lnext(tty, *cp++, flag); count--; } } else n_tty_receive_char_flagged(tty, *cp++, flag); } } static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct n_tty_data *ldata = tty->disc_data; bool preops = I_ISTRIP(tty) || (I_IUCLC(tty) && L_IEXTEN(tty)); if (ldata->real_raw) n_tty_receive_buf_real_raw(tty, cp, fp, count); else if (ldata->raw || (L_EXTPROC(tty) && !preops)) n_tty_receive_buf_raw(tty, cp, fp, count); else if (tty->closing && !L_EXTPROC(tty)) n_tty_receive_buf_closing(tty, cp, fp, count); else { if (ldata->lnext) { char flag = TTY_NORMAL; if (fp) flag = *fp++; n_tty_receive_char_lnext(tty, *cp++, flag); count--; } if (!preops && !I_PARMRK(tty)) n_tty_receive_buf_fast(tty, cp, fp, count); else n_tty_receive_buf_standard(tty, cp, fp, count); flush_echoes(tty); if (tty->ops->flush_chars) tty->ops->flush_chars(tty); } if ((!ldata->icanon && (read_cnt(ldata) >= ldata->minimum_to_wake)) || L_EXTPROC(tty)) { kill_fasync(&tty->fasync, SIGIO, POLL_IN); if (waitqueue_active(&tty->read_wait)) wake_up_interruptible(&tty->read_wait); } } static int n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp, char *fp, int count, int flow) { struct n_tty_data *ldata = tty->disc_data; int room, n, rcvd = 0; down_read(&tty->termios_rwsem); while (1) { room = receive_room(tty); n = min(count, room); if (!n) { if (flow && !room) ldata->no_room = 1; break; } __receive_buf(tty, cp, fp, n); cp += n; if (fp) fp += n; count -= n; rcvd += n; } tty->receive_room = room; n_tty_check_throttle(tty); up_read(&tty->termios_rwsem); return rcvd; } static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { n_tty_receive_buf_common(tty, cp, fp, count, 0); } static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { return n_tty_receive_buf_common(tty, cp, fp, count, 1); } int is_ignored(int sig) { return (sigismember(&current->blocked, sig) || current->sighand->action[sig-1].sa.sa_handler == SIG_IGN); } /** * n_tty_set_termios - termios data changed * @tty: terminal * @old: previous data * * Called by the tty layer when the user changes termios flags so * that the line discipline can plan ahead. This function cannot sleep * and is protected from re-entry by the tty layer. The user is * guaranteed that this function will not be re-entered or in progress * when the ldisc is closed. * * Locking: Caller holds tty->termios_rwsem */ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) { struct n_tty_data *ldata = tty->disc_data; if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) { bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); ldata->line_start = ldata->read_tail; if (!L_ICANON(tty) || !read_cnt(ldata)) { ldata->canon_head = ldata->read_tail; ldata->push = 0; } else { set_bit((ldata->read_head - 1) & (N_TTY_BUF_SIZE - 1), ldata->read_flags); ldata->canon_head = ldata->read_head; ldata->push = 1; } ldata->erasing = 0; ldata->lnext = 0; } ldata->icanon = (L_ICANON(tty) != 0); if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) || I_ICRNL(tty) || I_INLCR(tty) || L_ICANON(tty) || I_IXON(tty) || L_ISIG(tty) || L_ECHO(tty) || I_PARMRK(tty)) { bitmap_zero(ldata->char_map, 256); if (I_IGNCR(tty) || I_ICRNL(tty)) set_bit('\r', ldata->char_map); if (I_INLCR(tty)) set_bit('\n', ldata->char_map); if (L_ICANON(tty)) { set_bit(ERASE_CHAR(tty), ldata->char_map); set_bit(KILL_CHAR(tty), ldata->char_map); set_bit(EOF_CHAR(tty), ldata->char_map); set_bit('\n', ldata->char_map); set_bit(EOL_CHAR(tty), ldata->char_map); if (L_IEXTEN(tty)) { set_bit(WERASE_CHAR(tty), ldata->char_map); set_bit(LNEXT_CHAR(tty), ldata->char_map); set_bit(EOL2_CHAR(tty), ldata->char_map); if (L_ECHO(tty)) set_bit(REPRINT_CHAR(tty), ldata->char_map); } } if (I_IXON(tty)) { set_bit(START_CHAR(tty), ldata->char_map); set_bit(STOP_CHAR(tty), ldata->char_map); } if (L_ISIG(tty)) { set_bit(INTR_CHAR(tty), ldata->char_map); set_bit(QUIT_CHAR(tty), ldata->char_map); set_bit(SUSP_CHAR(tty), ldata->char_map); } clear_bit(__DISABLED_CHAR, ldata->char_map); ldata->raw = 0; ldata->real_raw = 0; } else { ldata->raw = 1; if ((I_IGNBRK(tty) || (!I_BRKINT(tty) && !I_PARMRK(tty))) && (I_IGNPAR(tty) || !I_INPCK(tty)) && (tty->driver->flags & TTY_DRIVER_REAL_RAW)) ldata->real_raw = 1; else ldata->real_raw = 0; } n_tty_set_room(tty); /* * Fix tty hang when I_IXON(tty) is cleared, but the tty * been stopped by STOP_CHAR(tty) before it. */ if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) { start_tty(tty); process_echoes(tty); } /* The termios change make the tty ready for I/O */ if (waitqueue_active(&tty->write_wait)) wake_up_interruptible(&tty->write_wait); if (waitqueue_active(&tty->read_wait)) wake_up_interruptible(&tty->read_wait); } /** * n_tty_close - close the ldisc for this tty * @tty: device * * Called from the terminal layer when this line discipline is * being shut down, either because of a close or becsuse of a * discipline change. The function will not be called while other * ldisc methods are in progress. */ static void n_tty_close(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; if (tty->link) n_tty_packet_mode_flush(tty); vfree(ldata); tty->disc_data = NULL; } /** * n_tty_open - open an ldisc * @tty: terminal to open * * Called when this line discipline is being attached to the * terminal device. Can sleep. Called serialized so that no * other events will occur in parallel. No further open will occur * until a close. */ static int n_tty_open(struct tty_struct *tty) { struct n_tty_data *ldata; /* Currently a malloc failure here can panic */ ldata = vmalloc(sizeof(*ldata)); if (!ldata) goto err; ldata->overrun_time = jiffies; mutex_init(&ldata->atomic_read_lock); mutex_init(&ldata->output_lock); tty->disc_data = ldata; reset_buffer_flags(tty->disc_data); ldata->column = 0; ldata->canon_column = 0; ldata->minimum_to_wake = 1; ldata->num_overrun = 0; ldata->no_room = 0; ldata->lnext = 0; tty->closing = 0; /* indicate buffer work may resume */ clear_bit(TTY_LDISC_HALTED, &tty->flags); n_tty_set_termios(tty, NULL); tty_unthrottle(tty); return 0; err: return -ENOMEM; } static inline int input_available_p(struct tty_struct *tty, int poll) { struct n_tty_data *ldata = tty->disc_data; int amt = poll && !TIME_CHAR(tty) && MIN_CHAR(tty) ? MIN_CHAR(tty) : 1; if (ldata->icanon && !L_EXTPROC(tty)) return ldata->canon_head != ldata->read_tail; else return read_cnt(ldata) >= amt; } /** * copy_from_read_buf - copy read data directly * @tty: terminal device * @b: user data * @nr: size of data * * Helper function to speed up n_tty_read. It is only called when * ICANON is off; it copies characters straight from the tty queue to * user space directly. It can be profitably called twice; once to * drain the space from the tail pointer to the (physical) end of the * buffer, and once to drain the space from the (physical) beginning of * the buffer to head pointer. * * Called under the ldata->atomic_read_lock sem * * n_tty_read()/consumer path: * caller holds non-exclusive termios_rwsem * read_tail published */ static int copy_from_read_buf(struct tty_struct *tty, unsigned char __user **b, size_t *nr) { struct n_tty_data *ldata = tty->disc_data; int retval; size_t n; bool is_eof; size_t tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); retval = 0; n = min(read_cnt(ldata), N_TTY_BUF_SIZE - tail); n = min(*nr, n); if (n) { retval = copy_to_user(*b, read_buf_addr(ldata, tail), n); n -= retval; is_eof = n == 1 && read_buf(ldata, tail) == EOF_CHAR(tty); tty_audit_add_data(tty, read_buf_addr(ldata, tail), n, ldata->icanon); ldata->read_tail += n; /* Turn single EOF into zero-length read */ if (L_EXTPROC(tty) && ldata->icanon && is_eof && !read_cnt(ldata)) n = 0; *b += n; *nr -= n; } return retval; } /** * canon_copy_from_read_buf - copy read data in canonical mode * @tty: terminal device * @b: user data * @nr: size of data * * Helper function for n_tty_read. It is only called when ICANON is on; * it copies one line of input up to and including the line-delimiting * character into the user-space buffer. * * NB: When termios is changed from non-canonical to canonical mode and * the read buffer contains data, n_tty_set_termios() simulates an EOF * push (as if C-d were input) _without_ the DISABLED_CHAR in the buffer. * This causes data already processed as input to be immediately available * as input although a newline has not been received. * * Called under the atomic_read_lock mutex * * n_tty_read()/consumer path: * caller holds non-exclusive termios_rwsem * read_tail published */ static int canon_copy_from_read_buf(struct tty_struct *tty, unsigned char __user **b, size_t *nr) { struct n_tty_data *ldata = tty->disc_data; size_t n, size, more, c; size_t eol; size_t tail; int ret, found = 0; bool eof_push = 0; /* N.B. avoid overrun if nr == 0 */ n = min(*nr, read_cnt(ldata)); if (!n) return 0; tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); n_tty_trace("%s: nr:%zu tail:%zu n:%zu size:%zu\n", __func__, *nr, tail, n, size); eol = find_next_bit(ldata->read_flags, size, tail); more = n - (size - tail); if (eol == N_TTY_BUF_SIZE && more) { /* scan wrapped without finding set bit */ eol = find_next_bit(ldata->read_flags, more, 0); if (eol != more) found = 1; } else if (eol != size) found = 1; size = N_TTY_BUF_SIZE - tail; n = eol - tail; if (n > 4096) n += 4096; n += found; c = n; if (found && !ldata->push && read_buf(ldata, eol) == __DISABLED_CHAR) { n--; eof_push = !n && ldata->read_tail != ldata->line_start; } n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu size:%zu more:%zu\n", __func__, eol, found, n, c, size, more); if (n > size) { ret = copy_to_user(*b, read_buf_addr(ldata, tail), size); if (ret) return -EFAULT; ret = copy_to_user(*b + size, ldata->read_buf, n - size); } else ret = copy_to_user(*b, read_buf_addr(ldata, tail), n); if (ret) return -EFAULT; *b += n; *nr -= n; if (found) clear_bit(eol, ldata->read_flags); smp_mb__after_clear_bit(); ldata->read_tail += c; if (found) { if (!ldata->push) ldata->line_start = ldata->read_tail; else ldata->push = 0; tty_audit_push(tty); } return eof_push ? -EAGAIN : 0; } extern ssize_t redirected_tty_write(struct file *, const char __user *, size_t, loff_t *); /** * job_control - check job control * @tty: tty * @file: file handle * * Perform job control management checks on this file/tty descriptor * and if appropriate send any needed signals and return a negative * error code if action should be taken. * * Locking: redirected write test is safe * current->signal->tty check is safe * ctrl_lock to safely reference tty->pgrp */ static int job_control(struct tty_struct *tty, struct file *file) { /* Job control check -- must be done at start and after every sleep (POSIX.1 7.1.1.4). */ /* NOTE: not yet done after every sleep pending a thorough check of the logic of this change. -- jlc */ /* don't stop on /dev/console */ if (file->f_op->write == redirected_tty_write || current->signal->tty != tty) return 0; spin_lock_irq(&tty->ctrl_lock); if (!tty->pgrp) printk(KERN_ERR "n_tty_read: no tty->pgrp!\n"); else if (task_pgrp(current) != tty->pgrp) { spin_unlock_irq(&tty->ctrl_lock); if (is_ignored(SIGTTIN) || is_current_pgrp_orphaned()) return -EIO; kill_pgrp(task_pgrp(current), SIGTTIN, 1); set_thread_flag(TIF_SIGPENDING); return -ERESTARTSYS; } spin_unlock_irq(&tty->ctrl_lock); return 0; } /** * n_tty_read - read function for tty * @tty: tty device * @file: file object * @buf: userspace buffer pointer * @nr: size of I/O * * Perform reads for the line discipline. We are guaranteed that the * line discipline will not be closed under us but we may get multiple * parallel readers and must handle this ourselves. We may also get * a hangup. Always called in user context, may sleep. * * This code must be sure never to sleep through a hangup. * * n_tty_read()/consumer path: * claims non-exclusive termios_rwsem * publishes read_tail */ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, unsigned char __user *buf, size_t nr) { struct n_tty_data *ldata = tty->disc_data; unsigned char __user *b = buf; DECLARE_WAITQUEUE(wait, current); int c; int minimum, time; ssize_t retval = 0; long timeout; unsigned long flags; int packet; c = job_control(tty, file); if (c < 0) return c; /* * Internal serialization of reads. */ if (file->f_flags & O_NONBLOCK) { if (!mutex_trylock(&ldata->atomic_read_lock)) return -EAGAIN; } else { if (mutex_lock_interruptible(&ldata->atomic_read_lock)) return -ERESTARTSYS; } down_read(&tty->termios_rwsem); minimum = time = 0; timeout = MAX_SCHEDULE_TIMEOUT; if (!ldata->icanon) { minimum = MIN_CHAR(tty); if (minimum) { time = (HZ / 10) * TIME_CHAR(tty); if (time) ldata->minimum_to_wake = 1; else if (!waitqueue_active(&tty->read_wait) || (ldata->minimum_to_wake > minimum)) ldata->minimum_to_wake = minimum; } else { timeout = (HZ / 10) * TIME_CHAR(tty); ldata->minimum_to_wake = minimum = 1; } } packet = tty->packet; add_wait_queue(&tty->read_wait, &wait); while (nr) { /* First test for status change. */ if (packet && tty->link->ctrl_status) { unsigned char cs; if (b != buf) break; spin_lock_irqsave(&tty->link->ctrl_lock, flags); cs = tty->link->ctrl_status; tty->link->ctrl_status = 0; spin_unlock_irqrestore(&tty->link->ctrl_lock, flags); if (tty_put_user(tty, cs, b++)) { retval = -EFAULT; b--; break; } nr--; break; } /* This statement must be first before checking for input so that any interrupt will set the state back to TASK_RUNNING. */ set_current_state(TASK_INTERRUPTIBLE); if (((minimum - (b - buf)) < ldata->minimum_to_wake) && ((minimum - (b - buf)) >= 1)) ldata->minimum_to_wake = (minimum - (b - buf)); if (!input_available_p(tty, 0)) { if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { up_read(&tty->termios_rwsem); tty_flush_to_ldisc(tty); down_read(&tty->termios_rwsem); if (!input_available_p(tty, 0)) { retval = -EIO; break; } } else { if (tty_hung_up_p(file)) break; if (!timeout) break; if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } n_tty_set_room(tty); up_read(&tty->termios_rwsem); timeout = schedule_timeout(timeout); down_read(&tty->termios_rwsem); continue; } } __set_current_state(TASK_RUNNING); /* Deal with packet mode. */ if (packet && b == buf) { if (tty_put_user(tty, TIOCPKT_DATA, b++)) { retval = -EFAULT; b--; break; } nr--; } if (ldata->icanon && !L_EXTPROC(tty)) { retval = canon_copy_from_read_buf(tty, &b, &nr); if (retval == -EAGAIN) { retval = 0; continue; } else if (retval) break; } else { int uncopied; /* The copy function takes the read lock and handles locking internally for this case */ uncopied = copy_from_read_buf(tty, &b, &nr); uncopied += copy_from_read_buf(tty, &b, &nr); if (uncopied) { retval = -EFAULT; break; } } n_tty_check_unthrottle(tty); if (b - buf >= minimum) break; if (time) timeout = time; } n_tty_set_room(tty); up_read(&tty->termios_rwsem); remove_wait_queue(&tty->read_wait, &wait); if (!waitqueue_active(&tty->read_wait)) ldata->minimum_to_wake = minimum; mutex_unlock(&ldata->atomic_read_lock); __set_current_state(TASK_RUNNING); if (b - buf) retval = b - buf; return retval; } /** * n_tty_write - write function for tty * @tty: tty device * @file: file object * @buf: userspace buffer pointer * @nr: size of I/O * * Write function of the terminal device. This is serialized with * respect to other write callers but not to termios changes, reads * and other such events. Since the receive code will echo characters, * thus calling driver write methods, the output_lock is used in * the output processing functions called here as well as in the * echo processing function to protect the column state and space * left in the buffer. * * This code must be sure never to sleep through a hangup. * * Locking: output_lock to protect column state and space left * (note that the process_output*() functions take this * lock themselves) */ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, const unsigned char *buf, size_t nr) { const unsigned char *b = buf; DECLARE_WAITQUEUE(wait, current); int c; ssize_t retval = 0; /* Job control check -- must be done at start (POSIX.1 7.1.1.4). */ if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) { retval = tty_check_change(tty); if (retval) return retval; } down_read(&tty->termios_rwsem); /* Write out any echoed characters that are still pending */ process_echoes(tty); add_wait_queue(&tty->write_wait, &wait); while (1) { set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (tty_hung_up_p(file) || (tty->link && !tty->link->count)) { retval = -EIO; break; } if (O_OPOST(tty)) { while (nr > 0) { ssize_t num = process_output_block(tty, b, nr); if (num < 0) { if (num == -EAGAIN) break; retval = num; goto break_out; } b += num; nr -= num; if (nr == 0) break; c = *b; if (process_output(c, tty) < 0) break; b++; nr--; } if (tty->ops->flush_chars) tty->ops->flush_chars(tty); } else { while (nr > 0) { c = tty->ops->write(tty, b, nr); if (c < 0) { retval = c; goto break_out; } if (!c) break; b += c; nr -= c; } } if (!nr) break; if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } up_read(&tty->termios_rwsem); schedule(); down_read(&tty->termios_rwsem); } break_out: __set_current_state(TASK_RUNNING); remove_wait_queue(&tty->write_wait, &wait); if (b - buf != nr && tty->fasync) set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); up_read(&tty->termios_rwsem); return (b - buf) ? b - buf : retval; } /** * n_tty_poll - poll method for N_TTY * @tty: terminal device * @file: file accessing it * @wait: poll table * * Called when the line discipline is asked to poll() for data or * for special events. This code is not serialized with respect to * other events save open/close. * * This code must be sure never to sleep through a hangup. * Called without the kernel lock held - fine */ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file, poll_table *wait) { struct n_tty_data *ldata = tty->disc_data; unsigned int mask = 0; poll_wait(file, &tty->read_wait, wait); poll_wait(file, &tty->write_wait, wait); if (input_available_p(tty, 1)) mask |= POLLIN | POLLRDNORM; if (tty->packet && tty->link->ctrl_status) mask |= POLLPRI | POLLIN | POLLRDNORM; if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) mask |= POLLHUP; if (tty_hung_up_p(file)) mask |= POLLHUP; if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) { if (MIN_CHAR(tty) && !TIME_CHAR(tty)) ldata->minimum_to_wake = MIN_CHAR(tty); else ldata->minimum_to_wake = 1; } if (tty->ops->write && !tty_is_writelocked(tty) && tty_chars_in_buffer(tty) < WAKEUP_CHARS && tty_write_room(tty) > 0) mask |= POLLOUT | POLLWRNORM; return mask; } static unsigned long inq_canon(struct n_tty_data *ldata) { size_t nr, head, tail; if (ldata->canon_head == ldata->read_tail) return 0; head = ldata->canon_head; tail = ldata->read_tail; nr = head - tail; /* Skip EOF-chars.. */ while (head != tail) { if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) && read_buf(ldata, tail) == __DISABLED_CHAR) nr--; tail++; } return nr; } static int n_tty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { struct n_tty_data *ldata = tty->disc_data; int retval; switch (cmd) { case TIOCOUTQ: return put_user(tty_chars_in_buffer(tty), (int __user *) arg); case TIOCINQ: down_write(&tty->termios_rwsem); if (L_ICANON(tty)) retval = inq_canon(ldata); else retval = read_cnt(ldata); up_write(&tty->termios_rwsem); return put_user(retval, (unsigned int __user *) arg); default: return n_tty_ioctl_helper(tty, file, cmd, arg); } } static void n_tty_fasync(struct tty_struct *tty, int on) { struct n_tty_data *ldata = tty->disc_data; if (!waitqueue_active(&tty->read_wait)) { if (on) ldata->minimum_to_wake = 1; else if (!tty->fasync) ldata->minimum_to_wake = N_TTY_BUF_SIZE; } } struct tty_ldisc_ops tty_ldisc_N_TTY = { .magic = TTY_LDISC_MAGIC, .name = "n_tty", .open = n_tty_open, .close = n_tty_close, .flush_buffer = n_tty_flush_buffer, .chars_in_buffer = n_tty_chars_in_buffer, .read = n_tty_read, .write = n_tty_write, .ioctl = n_tty_ioctl, .set_termios = n_tty_set_termios, .poll = n_tty_poll, .receive_buf = n_tty_receive_buf, .write_wakeup = n_tty_write_wakeup, .fasync = n_tty_fasync, .receive_buf2 = n_tty_receive_buf2, }; /** * n_tty_inherit_ops - inherit N_TTY methods * @ops: struct tty_ldisc_ops where to save N_TTY methods * * Enables a 'subclass' line discipline to 'inherit' N_TTY * methods. */ void n_tty_inherit_ops(struct tty_ldisc_ops *ops) { *ops = tty_ldisc_N_TTY; ops->owner = NULL; ops->refcount = ops->flags = 0; } EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
./CrossVul/dataset_final_sorted/CWE-362/c/bad_2034_0
crossvul-cpp_data_good_2871_0
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * PACKET - implements raw packet sockets. * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Alan Cox, <gw4pts@gw4pts.ampr.org> * * Fixes: * Alan Cox : verify_area() now used correctly * Alan Cox : new skbuff lists, look ma no backlogs! * Alan Cox : tidied skbuff lists. * Alan Cox : Now uses generic datagram routines I * added. Also fixed the peek/read crash * from all old Linux datagram code. * Alan Cox : Uses the improved datagram code. * Alan Cox : Added NULL's for socket options. * Alan Cox : Re-commented the code. * Alan Cox : Use new kernel side addressing * Rob Janssen : Correct MTU usage. * Dave Platt : Counter leaks caused by incorrect * interrupt locking and some slightly * dubious gcc output. Can you read * compiler: it said _VOLATILE_ * Richard Kooijman : Timestamp fixes. * Alan Cox : New buffers. Use sk->mac.raw. * Alan Cox : sendmsg/recvmsg support. * Alan Cox : Protocol setting support * Alexey Kuznetsov : Untied from IPv4 stack. * Cyrus Durgin : Fixed kerneld for kmod. * Michal Ostrowski : Module initialization cleanup. * Ulises Alonso : Frame number limit removal and * packet_set_ring memory leak. * Eric Biederman : Allow for > 8 byte hardware addresses. * The convention is that longer addresses * will simply extend the hardware address * byte arrays at the end of sockaddr_ll * and packet_mreq. * Johann Baudy : Added TX RING. * Chetan Loke : Implemented TPACKET_V3 block abstraction * layer. * Copyright (C) 2011, <lokec@ccs.neu.edu> * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/types.h> #include <linux/mm.h> #include <linux/capability.h> #include <linux/fcntl.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_packet.h> #include <linux/wireless.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <net/net_namespace.h> #include <net/ip.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/uaccess.h> #include <asm/ioctls.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <asm/io.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/poll.h> #include <linux/module.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/if_vlan.h> #include <linux/virtio_net.h> #include <linux/errqueue.h> #include <linux/net_tstamp.h> #include <linux/percpu.h> #ifdef CONFIG_INET #include <net/inet_common.h> #endif #include <linux/bpf.h> #include <net/compat.h> #include "internal.h" /* Assumptions: - if device has no dev->hard_header routine, it adds and removes ll header inside itself. In this case ll header is invisible outside of device, but higher levels still should reserve dev->hard_header_len. Some devices are enough clever to reallocate skb, when header will not fit to reserved space (tunnel), another ones are silly (PPP). - packet socket receives packets with pulled ll header, so that SOCK_RAW should push it back. On receive: ----------- Incoming, dev->hard_header!=NULL mac_header -> ll header data -> data Outgoing, dev->hard_header!=NULL mac_header -> ll header data -> ll header Incoming, dev->hard_header==NULL mac_header -> UNKNOWN position. It is very likely, that it points to ll header. PPP makes it, that is wrong, because introduce assymetry between rx and tx paths. data -> data Outgoing, dev->hard_header==NULL mac_header -> data. ll header is still not built! data -> data Resume If dev->hard_header==NULL we are unlikely to restore sensible ll header. On transmit: ------------ dev->hard_header != NULL mac_header -> ll header data -> ll header dev->hard_header == NULL (ll header is added by device, we cannot control it) mac_header -> data data -> data We should set nh.raw on output to correct posistion, packet classifier depends on it. */ /* Private packet socket structures. */ /* identical to struct packet_mreq except it has * a longer address field. */ struct packet_mreq_max { int mr_ifindex; unsigned short mr_type; unsigned short mr_alen; unsigned char mr_address[MAX_ADDR_LEN]; }; union tpacket_uhdr { struct tpacket_hdr *h1; struct tpacket2_hdr *h2; struct tpacket3_hdr *h3; void *raw; }; static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, int closing, int tx_ring); #define V3_ALIGNMENT (8) #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) #define BLK_PLUS_PRIV(sz_of_priv) \ (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x))) struct packet_sock; static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); static void *packet_previous_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status); static void packet_increment_head(struct packet_ring_buffer *buff); static int prb_curr_blk_in_use(struct tpacket_block_desc *); static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, struct packet_sock *); static void prb_retire_current_block(struct tpacket_kbdq_core *, struct packet_sock *, unsigned int status); static int prb_queue_frozen(struct tpacket_kbdq_core *); static void prb_open_block(struct tpacket_kbdq_core *, struct tpacket_block_desc *); static void prb_retire_rx_blk_timer_expired(unsigned long); static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); static void prb_init_blk_timer(struct packet_sock *, struct tpacket_kbdq_core *, void (*func) (unsigned long)); static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); static void prb_clear_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); static void prb_fill_vlan_info(struct tpacket_kbdq_core *, struct tpacket3_hdr *); static void packet_flush_mclist(struct sock *sk); static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb); struct packet_skb_cb { union { struct sockaddr_pkt pkt; union { /* Trick: alias skb original length with * ll.sll_family and ll.protocol in order * to save room. */ unsigned int origlen; struct sockaddr_ll ll; }; } sa; }; #define vio_le() virtio_legacy_is_little_endian() #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) #define GET_PBLOCK_DESC(x, bid) \ ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) #define GET_NEXT_PRB_BLK_NUM(x) \ (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ ((x)->kactive_blk_num+1) : 0) static void __fanout_unlink(struct sock *sk, struct packet_sock *po); static void __fanout_link(struct sock *sk, struct packet_sock *po); static int packet_direct_xmit(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct sk_buff *orig_skb = skb; struct netdev_queue *txq; int ret = NETDEV_TX_BUSY; if (unlikely(!netif_running(dev) || !netif_carrier_ok(dev))) goto drop; skb = validate_xmit_skb_list(skb, dev); if (skb != orig_skb) goto drop; packet_pick_tx_queue(dev, skb); txq = skb_get_tx_queue(dev, skb); local_bh_disable(); HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_drv_stopped(txq)) ret = netdev_start_xmit(skb, dev, txq, false); HARD_TX_UNLOCK(dev, txq); local_bh_enable(); if (!dev_xmit_complete(ret)) kfree_skb(skb); return ret; drop: atomic_long_inc(&dev->tx_dropped); kfree_skb_list(skb); return NET_XMIT_DROP; } static struct net_device *packet_cached_dev_get(struct packet_sock *po) { struct net_device *dev; rcu_read_lock(); dev = rcu_dereference(po->cached_dev); if (likely(dev)) dev_hold(dev); rcu_read_unlock(); return dev; } static void packet_cached_dev_assign(struct packet_sock *po, struct net_device *dev) { rcu_assign_pointer(po->cached_dev, dev); } static void packet_cached_dev_reset(struct packet_sock *po) { RCU_INIT_POINTER(po->cached_dev, NULL); } static bool packet_use_direct_xmit(const struct packet_sock *po) { return po->xmit == packet_direct_xmit; } static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) { return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; } static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) { const struct net_device_ops *ops = dev->netdev_ops; u16 queue_index; if (ops->ndo_select_queue) { queue_index = ops->ndo_select_queue(dev, skb, NULL, __packet_pick_tx_queue); queue_index = netdev_cap_txqueue(dev, queue_index); } else { queue_index = __packet_pick_tx_queue(dev, skb); } skb_set_queue_mapping(skb, queue_index); } /* register_prot_hook must be invoked with the po->bind_lock held, * or from a context in which asynchronous accesses to the packet * socket is not possible (packet_create()). */ static void register_prot_hook(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); if (!po->running) { if (po->fanout) __fanout_link(sk, po); else dev_add_pack(&po->prot_hook); sock_hold(sk); po->running = 1; } } /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock * held. If the sync parameter is true, we will temporarily drop * the po->bind_lock and do a synchronize_net to make sure no * asynchronous packet processing paths still refer to the elements * of po->prot_hook. If the sync parameter is false, it is the * callers responsibility to take care of this. */ static void __unregister_prot_hook(struct sock *sk, bool sync) { struct packet_sock *po = pkt_sk(sk); po->running = 0; if (po->fanout) __fanout_unlink(sk, po); else __dev_remove_pack(&po->prot_hook); __sock_put(sk); if (sync) { spin_unlock(&po->bind_lock); synchronize_net(); spin_lock(&po->bind_lock); } } static void unregister_prot_hook(struct sock *sk, bool sync) { struct packet_sock *po = pkt_sk(sk); if (po->running) __unregister_prot_hook(sk, sync); } static inline struct page * __pure pgv_to_page(void *addr) { if (is_vmalloc_addr(addr)) return vmalloc_to_page(addr); return virt_to_page(addr); } static void __packet_set_status(struct packet_sock *po, void *frame, int status) { union tpacket_uhdr h; h.raw = frame; switch (po->tp_version) { case TPACKET_V1: h.h1->tp_status = status; flush_dcache_page(pgv_to_page(&h.h1->tp_status)); break; case TPACKET_V2: h.h2->tp_status = status; flush_dcache_page(pgv_to_page(&h.h2->tp_status)); break; case TPACKET_V3: h.h3->tp_status = status; flush_dcache_page(pgv_to_page(&h.h3->tp_status)); break; default: WARN(1, "TPACKET version not supported.\n"); BUG(); } smp_wmb(); } static int __packet_get_status(struct packet_sock *po, void *frame) { union tpacket_uhdr h; smp_rmb(); h.raw = frame; switch (po->tp_version) { case TPACKET_V1: flush_dcache_page(pgv_to_page(&h.h1->tp_status)); return h.h1->tp_status; case TPACKET_V2: flush_dcache_page(pgv_to_page(&h.h2->tp_status)); return h.h2->tp_status; case TPACKET_V3: flush_dcache_page(pgv_to_page(&h.h3->tp_status)); return h.h3->tp_status; default: WARN(1, "TPACKET version not supported.\n"); BUG(); return 0; } } static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts, unsigned int flags) { struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); if (shhwtstamps && (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts)) return TP_STATUS_TS_RAW_HARDWARE; if (ktime_to_timespec_cond(skb->tstamp, ts)) return TP_STATUS_TS_SOFTWARE; return 0; } static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, struct sk_buff *skb) { union tpacket_uhdr h; struct timespec ts; __u32 ts_status; if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) return 0; h.raw = frame; switch (po->tp_version) { case TPACKET_V1: h.h1->tp_sec = ts.tv_sec; h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; break; case TPACKET_V2: h.h2->tp_sec = ts.tv_sec; h.h2->tp_nsec = ts.tv_nsec; break; case TPACKET_V3: h.h3->tp_sec = ts.tv_sec; h.h3->tp_nsec = ts.tv_nsec; break; default: WARN(1, "TPACKET version not supported.\n"); BUG(); } /* one flush is safe, as both fields always lie on the same cacheline */ flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); smp_wmb(); return ts_status; } static void *packet_lookup_frame(struct packet_sock *po, struct packet_ring_buffer *rb, unsigned int position, int status) { unsigned int pg_vec_pos, frame_offset; union tpacket_uhdr h; pg_vec_pos = position / rb->frames_per_block; frame_offset = position % rb->frames_per_block; h.raw = rb->pg_vec[pg_vec_pos].buffer + (frame_offset * rb->frame_size); if (status != __packet_get_status(po, h.raw)) return NULL; return h.raw; } static void *packet_current_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { return packet_lookup_frame(po, rb, rb->head, status); } static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) { del_timer_sync(&pkc->retire_blk_timer); } static void prb_shutdown_retire_blk_timer(struct packet_sock *po, struct sk_buff_head *rb_queue) { struct tpacket_kbdq_core *pkc; pkc = GET_PBDQC_FROM_RB(&po->rx_ring); spin_lock_bh(&rb_queue->lock); pkc->delete_blk_timer = 1; spin_unlock_bh(&rb_queue->lock); prb_del_retire_blk_timer(pkc); } static void prb_init_blk_timer(struct packet_sock *po, struct tpacket_kbdq_core *pkc, void (*func) (unsigned long)) { init_timer(&pkc->retire_blk_timer); pkc->retire_blk_timer.data = (long)po; pkc->retire_blk_timer.function = func; pkc->retire_blk_timer.expires = jiffies; } static void prb_setup_retire_blk_timer(struct packet_sock *po) { struct tpacket_kbdq_core *pkc; pkc = GET_PBDQC_FROM_RB(&po->rx_ring); prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired); } static int prb_calc_retire_blk_tmo(struct packet_sock *po, int blk_size_in_bytes) { struct net_device *dev; unsigned int mbits = 0, msec = 0, div = 0, tmo = 0; struct ethtool_link_ksettings ecmd; int err; rtnl_lock(); dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); if (unlikely(!dev)) { rtnl_unlock(); return DEFAULT_PRB_RETIRE_TOV; } err = __ethtool_get_link_ksettings(dev, &ecmd); rtnl_unlock(); if (!err) { /* * If the link speed is so slow you don't really * need to worry about perf anyways */ if (ecmd.base.speed < SPEED_1000 || ecmd.base.speed == SPEED_UNKNOWN) { return DEFAULT_PRB_RETIRE_TOV; } else { msec = 1; div = ecmd.base.speed / 1000; } } mbits = (blk_size_in_bytes * 8) / (1024 * 1024); if (div) mbits /= div; tmo = mbits * msec; if (div) return tmo+1; return tmo; } static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, union tpacket_req_u *req_u) { p1->feature_req_word = req_u->req3.tp_feature_req_word; } static void init_prb_bdqc(struct packet_sock *po, struct packet_ring_buffer *rb, struct pgv *pg_vec, union tpacket_req_u *req_u) { struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); struct tpacket_block_desc *pbd; memset(p1, 0x0, sizeof(*p1)); p1->knxt_seq_num = 1; p1->pkbdq = pg_vec; pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; p1->pkblk_start = pg_vec[0].buffer; p1->kblk_size = req_u->req3.tp_block_size; p1->knum_blocks = req_u->req3.tp_block_nr; p1->hdrlen = po->tp_hdrlen; p1->version = po->tp_version; p1->last_kactive_blk_num = 0; po->stats.stats3.tp_freeze_q_cnt = 0; if (req_u->req3.tp_retire_blk_tov) p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; else p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, req_u->req3.tp_block_size); p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); prb_init_ft_ops(p1, req_u); prb_setup_retire_blk_timer(po); prb_open_block(p1, pbd); } /* Do NOT update the last_blk_num first. * Assumes sk_buff_head lock is held. */ static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) { mod_timer(&pkc->retire_blk_timer, jiffies + pkc->tov_in_jiffies); pkc->last_kactive_blk_num = pkc->kactive_blk_num; } /* * Timer logic: * 1) We refresh the timer only when we open a block. * By doing this we don't waste cycles refreshing the timer * on packet-by-packet basis. * * With a 1MB block-size, on a 1Gbps line, it will take * i) ~8 ms to fill a block + ii) memcpy etc. * In this cut we are not accounting for the memcpy time. * * So, if the user sets the 'tmo' to 10ms then the timer * will never fire while the block is still getting filled * (which is what we want). However, the user could choose * to close a block early and that's fine. * * But when the timer does fire, we check whether or not to refresh it. * Since the tmo granularity is in msecs, it is not too expensive * to refresh the timer, lets say every '8' msecs. * Either the user can set the 'tmo' or we can derive it based on * a) line-speed and b) block-size. * prb_calc_retire_blk_tmo() calculates the tmo. * */ static void prb_retire_rx_blk_timer_expired(unsigned long data) { struct packet_sock *po = (struct packet_sock *)data; struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); unsigned int frozen; struct tpacket_block_desc *pbd; spin_lock(&po->sk.sk_receive_queue.lock); frozen = prb_queue_frozen(pkc); pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); if (unlikely(pkc->delete_blk_timer)) goto out; /* We only need to plug the race when the block is partially filled. * tpacket_rcv: * lock(); increment BLOCK_NUM_PKTS; unlock() * copy_bits() is in progress ... * timer fires on other cpu: * we can't retire the current block because copy_bits * is in progress. * */ if (BLOCK_NUM_PKTS(pbd)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ cpu_relax(); } } if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { if (!frozen) { if (!BLOCK_NUM_PKTS(pbd)) { /* An empty block. Just refresh the timer. */ goto refresh_timer; } prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); if (!prb_dispatch_next_block(pkc, po)) goto refresh_timer; else goto out; } else { /* Case 1. Queue was frozen because user-space was * lagging behind. */ if (prb_curr_blk_in_use(pbd)) { /* * Ok, user-space is still behind. * So just refresh the timer. */ goto refresh_timer; } else { /* Case 2. queue was frozen,user-space caught up, * now the link went idle && the timer fired. * We don't have a block to close.So we open this * block and restart the timer. * opening a block thaws the queue,restarts timer * Thawing/timer-refresh is a side effect. */ prb_open_block(pkc, pbd); goto out; } } } refresh_timer: _prb_refresh_rx_retire_blk_timer(pkc); out: spin_unlock(&po->sk.sk_receive_queue.lock); } static void prb_flush_block(struct tpacket_kbdq_core *pkc1, struct tpacket_block_desc *pbd1, __u32 status) { /* Flush everything minus the block header */ #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 u8 *start, *end; start = (u8 *)pbd1; /* Skip the block header(we know header WILL fit in 4K) */ start += PAGE_SIZE; end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); for (; start < end; start += PAGE_SIZE) flush_dcache_page(pgv_to_page(start)); smp_wmb(); #endif /* Now update the block status. */ BLOCK_STATUS(pbd1) = status; /* Flush the block header */ #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 start = (u8 *)pbd1; flush_dcache_page(pgv_to_page(start)); smp_wmb(); #endif } /* * Side effect: * * 1) flush the block * 2) Increment active_blk_num * * Note:We DONT refresh the timer on purpose. * Because almost always the next block will be opened. */ static void prb_close_block(struct tpacket_kbdq_core *pkc1, struct tpacket_block_desc *pbd1, struct packet_sock *po, unsigned int stat) { __u32 status = TP_STATUS_USER | stat; struct tpacket3_hdr *last_pkt; struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; struct sock *sk = &po->sk; if (po->stats.stats3.tp_drops) status |= TP_STATUS_LOSING; last_pkt = (struct tpacket3_hdr *)pkc1->prev; last_pkt->tp_next_offset = 0; /* Get the ts of the last pkt */ if (BLOCK_NUM_PKTS(pbd1)) { h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; } else { /* Ok, we tmo'd - so get the current time. * * It shouldn't really happen as we don't close empty * blocks. See prb_retire_rx_blk_timer_expired(). */ struct timespec ts; getnstimeofday(&ts); h1->ts_last_pkt.ts_sec = ts.tv_sec; h1->ts_last_pkt.ts_nsec = ts.tv_nsec; } smp_wmb(); /* Flush the block */ prb_flush_block(pkc1, pbd1, status); sk->sk_data_ready(sk); pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); } static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) { pkc->reset_pending_on_curr_blk = 0; } /* * Side effect of opening a block: * * 1) prb_queue is thawed. * 2) retire_blk_timer is refreshed. * */ static void prb_open_block(struct tpacket_kbdq_core *pkc1, struct tpacket_block_desc *pbd1) { struct timespec ts; struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; smp_rmb(); /* We could have just memset this but we will lose the * flexibility of making the priv area sticky */ BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; BLOCK_NUM_PKTS(pbd1) = 0; BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); getnstimeofday(&ts); h1->ts_first_pkt.ts_sec = ts.tv_sec; h1->ts_first_pkt.ts_nsec = ts.tv_nsec; pkc1->pkblk_start = (char *)pbd1; pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; pbd1->version = pkc1->version; pkc1->prev = pkc1->nxt_offset; pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; prb_thaw_queue(pkc1); _prb_refresh_rx_retire_blk_timer(pkc1); smp_wmb(); } /* * Queue freeze logic: * 1) Assume tp_block_nr = 8 blocks. * 2) At time 't0', user opens Rx ring. * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 * 4) user-space is either sleeping or processing block '0'. * 5) tpacket_rcv is currently filling block '7', since there is no space left, * it will close block-7,loop around and try to fill block '0'. * call-flow: * __packet_lookup_frame_in_block * prb_retire_current_block() * prb_dispatch_next_block() * |->(BLOCK_STATUS == USER) evaluates to true * 5.1) Since block-0 is currently in-use, we just freeze the queue. * 6) Now there are two cases: * 6.1) Link goes idle right after the queue is frozen. * But remember, the last open_block() refreshed the timer. * When this timer expires,it will refresh itself so that we can * re-open block-0 in near future. * 6.2) Link is busy and keeps on receiving packets. This is a simple * case and __packet_lookup_frame_in_block will check if block-0 * is free and can now be re-used. */ static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, struct packet_sock *po) { pkc->reset_pending_on_curr_blk = 1; po->stats.stats3.tp_freeze_q_cnt++; } #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) /* * If the next block is free then we will dispatch it * and return a good offset. * Else, we will freeze the queue. * So, caller must check the return value. */ static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, struct packet_sock *po) { struct tpacket_block_desc *pbd; smp_rmb(); /* 1. Get current block num */ pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); /* 2. If this block is currently in_use then freeze the queue */ if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { prb_freeze_queue(pkc, po); return NULL; } /* * 3. * open this block and return the offset where the first packet * needs to get stored. */ prb_open_block(pkc, pbd); return (void *)pkc->nxt_offset; } static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, struct packet_sock *po, unsigned int status) { struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); /* retire/close the current block */ if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { /* * Plug the case where copy_bits() is in progress on * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't * have space to copy the pkt in the current block and * called prb_retire_current_block() * * We don't need to worry about the TMO case because * the timer-handler already handled this case. */ if (!(status & TP_STATUS_BLK_TMO)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ cpu_relax(); } } prb_close_block(pkc, pbd, po, status); return; } } static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd) { return TP_STATUS_USER & BLOCK_STATUS(pbd); } static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) { return pkc->reset_pending_on_curr_blk; } static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) { struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); atomic_dec(&pkc->blk_fill_in_prog); } static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); } static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { ppd->hv1.tp_rxhash = 0; } static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { if (skb_vlan_tag_present(pkc->skb)) { ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { ppd->hv1.tp_vlan_tci = 0; ppd->hv1.tp_vlan_tpid = 0; ppd->tp_status = TP_STATUS_AVAILABLE; } } static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { ppd->hv1.tp_padding = 0; prb_fill_vlan_info(pkc, ppd); if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) prb_fill_rxhash(pkc, ppd); else prb_clear_rxhash(pkc, ppd); } static void prb_fill_curr_block(char *curr, struct tpacket_kbdq_core *pkc, struct tpacket_block_desc *pbd, unsigned int len) { struct tpacket3_hdr *ppd; ppd = (struct tpacket3_hdr *)curr; ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); pkc->prev = curr; pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); BLOCK_NUM_PKTS(pbd) += 1; atomic_inc(&pkc->blk_fill_in_prog); prb_run_all_ft_ops(pkc, ppd); } /* Assumes caller has the sk->rx_queue.lock */ static void *__packet_lookup_frame_in_block(struct packet_sock *po, struct sk_buff *skb, int status, unsigned int len ) { struct tpacket_kbdq_core *pkc; struct tpacket_block_desc *pbd; char *curr, *end; pkc = GET_PBDQC_FROM_RB(&po->rx_ring); pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); /* Queue is frozen when user space is lagging behind */ if (prb_queue_frozen(pkc)) { /* * Check if that last block which caused the queue to freeze, * is still in_use by user-space. */ if (prb_curr_blk_in_use(pbd)) { /* Can't record this packet */ return NULL; } else { /* * Ok, the block was released by user-space. * Now let's open that block. * opening a block also thaws the queue. * Thawing is a side effect. */ prb_open_block(pkc, pbd); } } smp_mb(); curr = pkc->nxt_offset; pkc->skb = skb; end = (char *)pbd + pkc->kblk_size; /* first try the current block */ if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { prb_fill_curr_block(curr, pkc, pbd, len); return (void *)curr; } /* Ok, close the current block */ prb_retire_current_block(pkc, po, 0); /* Now, try to dispatch the next block */ curr = (char *)prb_dispatch_next_block(pkc, po); if (curr) { pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); prb_fill_curr_block(curr, pkc, pbd, len); return (void *)curr; } /* * No free blocks are available.user_space hasn't caught up yet. * Queue was just frozen and now this packet will get dropped. */ return NULL; } static void *packet_current_rx_frame(struct packet_sock *po, struct sk_buff *skb, int status, unsigned int len) { char *curr = NULL; switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: curr = packet_lookup_frame(po, &po->rx_ring, po->rx_ring.head, status); return curr; case TPACKET_V3: return __packet_lookup_frame_in_block(po, skb, status, len); default: WARN(1, "TPACKET version not supported\n"); BUG(); return NULL; } } static void *prb_lookup_block(struct packet_sock *po, struct packet_ring_buffer *rb, unsigned int idx, int status) { struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); if (status != BLOCK_STATUS(pbd)) return NULL; return pbd; } static int prb_previous_blk_num(struct packet_ring_buffer *rb) { unsigned int prev; if (rb->prb_bdqc.kactive_blk_num) prev = rb->prb_bdqc.kactive_blk_num-1; else prev = rb->prb_bdqc.knum_blocks-1; return prev; } /* Assumes caller has held the rx_queue.lock */ static void *__prb_previous_block(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { unsigned int previous = prb_previous_blk_num(rb); return prb_lookup_block(po, rb, previous, status); } static void *packet_previous_rx_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { if (po->tp_version <= TPACKET_V2) return packet_previous_frame(po, rb, status); return __prb_previous_block(po, rb, status); } static void packet_increment_rx_head(struct packet_sock *po, struct packet_ring_buffer *rb) { switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: return packet_increment_head(rb); case TPACKET_V3: default: WARN(1, "TPACKET version not supported.\n"); BUG(); return; } } static void *packet_previous_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; return packet_lookup_frame(po, rb, previous, status); } static void packet_increment_head(struct packet_ring_buffer *buff) { buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; } static void packet_inc_pending(struct packet_ring_buffer *rb) { this_cpu_inc(*rb->pending_refcnt); } static void packet_dec_pending(struct packet_ring_buffer *rb) { this_cpu_dec(*rb->pending_refcnt); } static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) { unsigned int refcnt = 0; int cpu; /* We don't use pending refcount in rx_ring. */ if (rb->pending_refcnt == NULL) return 0; for_each_possible_cpu(cpu) refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); return refcnt; } static int packet_alloc_pending(struct packet_sock *po) { po->rx_ring.pending_refcnt = NULL; po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); if (unlikely(po->tx_ring.pending_refcnt == NULL)) return -ENOBUFS; return 0; } static void packet_free_pending(struct packet_sock *po) { free_percpu(po->tx_ring.pending_refcnt); } #define ROOM_POW_OFF 2 #define ROOM_NONE 0x0 #define ROOM_LOW 0x1 #define ROOM_NORMAL 0x2 static bool __tpacket_has_room(struct packet_sock *po, int pow_off) { int idx, len; len = po->rx_ring.frame_max + 1; idx = po->rx_ring.head; if (pow_off) idx += len >> pow_off; if (idx >= len) idx -= len; return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); } static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off) { int idx, len; len = po->rx_ring.prb_bdqc.knum_blocks; idx = po->rx_ring.prb_bdqc.kactive_blk_num; if (pow_off) idx += len >> pow_off; if (idx >= len) idx -= len; return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); } static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) { struct sock *sk = &po->sk; int ret = ROOM_NONE; if (po->prot_hook.func != tpacket_rcv) { int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc) - (skb ? skb->truesize : 0); if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF)) return ROOM_NORMAL; else if (avail > 0) return ROOM_LOW; else return ROOM_NONE; } if (po->tp_version == TPACKET_V3) { if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) ret = ROOM_NORMAL; else if (__tpacket_v3_has_room(po, 0)) ret = ROOM_LOW; } else { if (__tpacket_has_room(po, ROOM_POW_OFF)) ret = ROOM_NORMAL; else if (__tpacket_has_room(po, 0)) ret = ROOM_LOW; } return ret; } static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) { int ret; bool has_room; spin_lock_bh(&po->sk.sk_receive_queue.lock); ret = __packet_rcv_has_room(po, skb); has_room = ret == ROOM_NORMAL; if (po->pressure == has_room) po->pressure = !has_room; spin_unlock_bh(&po->sk.sk_receive_queue.lock); return ret; } static void packet_sock_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_error_queue); WARN_ON(atomic_read(&sk->sk_rmem_alloc)); WARN_ON(refcount_read(&sk->sk_wmem_alloc)); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Attempt to release alive packet socket: %p\n", sk); return; } sk_refcnt_debug_dec(sk); } static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) { u32 rxhash; int i, count = 0; rxhash = skb_get_hash(skb); for (i = 0; i < ROLLOVER_HLEN; i++) if (po->rollover->history[i] == rxhash) count++; po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash; return count > (ROLLOVER_HLEN >> 1); } static unsigned int fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return reciprocal_scale(__skb_get_hash_symmetric(skb), num); } static unsigned int fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { unsigned int val = atomic_inc_return(&f->rr_cur); return val % num; } static unsigned int fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return smp_processor_id() % num; } static unsigned int fanout_demux_rnd(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return prandom_u32_max(num); } static unsigned int fanout_demux_rollover(struct packet_fanout *f, struct sk_buff *skb, unsigned int idx, bool try_self, unsigned int num) { struct packet_sock *po, *po_next, *po_skip = NULL; unsigned int i, j, room = ROOM_NONE; po = pkt_sk(f->arr[idx]); if (try_self) { room = packet_rcv_has_room(po, skb); if (room == ROOM_NORMAL || (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) return idx; po_skip = po; } i = j = min_t(int, po->rollover->sock, num - 1); do { po_next = pkt_sk(f->arr[i]); if (po_next != po_skip && !po_next->pressure && packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) { if (i != j) po->rollover->sock = i; atomic_long_inc(&po->rollover->num); if (room == ROOM_LOW) atomic_long_inc(&po->rollover->num_huge); return i; } if (++i == num) i = 0; } while (i != j); atomic_long_inc(&po->rollover->num_failed); return idx; } static unsigned int fanout_demux_qm(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return skb_get_queue_mapping(skb) % num; } static unsigned int fanout_demux_bpf(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { struct bpf_prog *prog; unsigned int ret = 0; rcu_read_lock(); prog = rcu_dereference(f->bpf_prog); if (prog) ret = bpf_prog_run_clear_cb(prog, skb) % num; rcu_read_unlock(); return ret; } static bool fanout_has_flag(struct packet_fanout *f, u16 flag) { return f->flags & (flag >> 8); } static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct packet_fanout *f = pt->af_packet_priv; unsigned int num = READ_ONCE(f->num_members); struct net *net = read_pnet(&f->net); struct packet_sock *po; unsigned int idx; if (!net_eq(dev_net(dev), net) || !num) { kfree_skb(skb); return 0; } if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET); if (!skb) return 0; } switch (f->type) { case PACKET_FANOUT_HASH: default: idx = fanout_demux_hash(f, skb, num); break; case PACKET_FANOUT_LB: idx = fanout_demux_lb(f, skb, num); break; case PACKET_FANOUT_CPU: idx = fanout_demux_cpu(f, skb, num); break; case PACKET_FANOUT_RND: idx = fanout_demux_rnd(f, skb, num); break; case PACKET_FANOUT_QM: idx = fanout_demux_qm(f, skb, num); break; case PACKET_FANOUT_ROLLOVER: idx = fanout_demux_rollover(f, skb, 0, false, num); break; case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: idx = fanout_demux_bpf(f, skb, num); break; } if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER)) idx = fanout_demux_rollover(f, skb, idx, true, num); po = pkt_sk(f->arr[idx]); return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); } DEFINE_MUTEX(fanout_mutex); EXPORT_SYMBOL_GPL(fanout_mutex); static LIST_HEAD(fanout_list); static u16 fanout_next_id; static void __fanout_link(struct sock *sk, struct packet_sock *po) { struct packet_fanout *f = po->fanout; spin_lock(&f->lock); f->arr[f->num_members] = sk; smp_wmb(); f->num_members++; if (f->num_members == 1) dev_add_pack(&f->prot_hook); spin_unlock(&f->lock); } static void __fanout_unlink(struct sock *sk, struct packet_sock *po) { struct packet_fanout *f = po->fanout; int i; spin_lock(&f->lock); for (i = 0; i < f->num_members; i++) { if (f->arr[i] == sk) break; } BUG_ON(i >= f->num_members); f->arr[i] = f->arr[f->num_members - 1]; f->num_members--; if (f->num_members == 0) __dev_remove_pack(&f->prot_hook); spin_unlock(&f->lock); } static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) { if (sk->sk_family != PF_PACKET) return false; return ptype->af_packet_priv == pkt_sk(sk)->fanout; } static void fanout_init_data(struct packet_fanout *f) { switch (f->type) { case PACKET_FANOUT_LB: atomic_set(&f->rr_cur, 0); break; case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: RCU_INIT_POINTER(f->bpf_prog, NULL); break; } } static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) { struct bpf_prog *old; spin_lock(&f->lock); old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); rcu_assign_pointer(f->bpf_prog, new); spin_unlock(&f->lock); if (old) { synchronize_net(); bpf_prog_destroy(old); } } static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data, unsigned int len) { struct bpf_prog *new; struct sock_fprog fprog; int ret; if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) return -EPERM; if (len != sizeof(fprog)) return -EINVAL; if (copy_from_user(&fprog, data, len)) return -EFAULT; ret = bpf_prog_create_from_user(&new, &fprog, NULL, false); if (ret) return ret; __fanout_set_data_bpf(po->fanout, new); return 0; } static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data, unsigned int len) { struct bpf_prog *new; u32 fd; if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) return -EPERM; if (len != sizeof(fd)) return -EINVAL; if (copy_from_user(&fd, data, len)) return -EFAULT; new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); if (IS_ERR(new)) return PTR_ERR(new); __fanout_set_data_bpf(po->fanout, new); return 0; } static int fanout_set_data(struct packet_sock *po, char __user *data, unsigned int len) { switch (po->fanout->type) { case PACKET_FANOUT_CBPF: return fanout_set_data_cbpf(po, data, len); case PACKET_FANOUT_EBPF: return fanout_set_data_ebpf(po, data, len); default: return -EINVAL; }; } static void fanout_release_data(struct packet_fanout *f) { switch (f->type) { case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: __fanout_set_data_bpf(f, NULL); }; } static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id) { struct packet_fanout *f; list_for_each_entry(f, &fanout_list, list) { if (f->id == candidate_id && read_pnet(&f->net) == sock_net(sk)) { return false; } } return true; } static bool fanout_find_new_id(struct sock *sk, u16 *new_id) { u16 id = fanout_next_id; do { if (__fanout_id_is_free(sk, id)) { *new_id = id; fanout_next_id = id + 1; return true; } id++; } while (id != fanout_next_id); return false; } static int fanout_add(struct sock *sk, u16 id, u16 type_flags) { struct packet_rollover *rollover = NULL; struct packet_sock *po = pkt_sk(sk); struct packet_fanout *f, *match; u8 type = type_flags & 0xff; u8 flags = type_flags >> 8; int err; switch (type) { case PACKET_FANOUT_ROLLOVER: if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) return -EINVAL; case PACKET_FANOUT_HASH: case PACKET_FANOUT_LB: case PACKET_FANOUT_CPU: case PACKET_FANOUT_RND: case PACKET_FANOUT_QM: case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: break; default: return -EINVAL; } mutex_lock(&fanout_mutex); err = -EALREADY; if (po->fanout) goto out; if (type == PACKET_FANOUT_ROLLOVER || (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { err = -ENOMEM; rollover = kzalloc(sizeof(*rollover), GFP_KERNEL); if (!rollover) goto out; atomic_long_set(&rollover->num, 0); atomic_long_set(&rollover->num_huge, 0); atomic_long_set(&rollover->num_failed, 0); po->rollover = rollover; } if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) { if (id != 0) { err = -EINVAL; goto out; } if (!fanout_find_new_id(sk, &id)) { err = -ENOMEM; goto out; } /* ephemeral flag for the first socket in the group: drop it */ flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8); } match = NULL; list_for_each_entry(f, &fanout_list, list) { if (f->id == id && read_pnet(&f->net) == sock_net(sk)) { match = f; break; } } err = -EINVAL; if (match && match->flags != flags) goto out; if (!match) { err = -ENOMEM; match = kzalloc(sizeof(*match), GFP_KERNEL); if (!match) goto out; write_pnet(&match->net, sock_net(sk)); match->id = id; match->type = type; match->flags = flags; INIT_LIST_HEAD(&match->list); spin_lock_init(&match->lock); refcount_set(&match->sk_ref, 0); fanout_init_data(match); match->prot_hook.type = po->prot_hook.type; match->prot_hook.dev = po->prot_hook.dev; match->prot_hook.func = packet_rcv_fanout; match->prot_hook.af_packet_priv = match; match->prot_hook.id_match = match_fanout_group; list_add(&match->list, &fanout_list); } err = -EINVAL; spin_lock(&po->bind_lock); if (po->running && match->type == type && match->prot_hook.type == po->prot_hook.type && match->prot_hook.dev == po->prot_hook.dev) { err = -ENOSPC; if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) { __dev_remove_pack(&po->prot_hook); po->fanout = match; refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); __fanout_link(sk, po); err = 0; } } spin_unlock(&po->bind_lock); if (err && !refcount_read(&match->sk_ref)) { list_del(&match->list); kfree(match); } out: if (err && rollover) { kfree(rollover); po->rollover = NULL; } mutex_unlock(&fanout_mutex); return err; } /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout. * It is the responsibility of the caller to call fanout_release_data() and * free the returned packet_fanout (after synchronize_net()) */ static struct packet_fanout *fanout_release(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); struct packet_fanout *f; mutex_lock(&fanout_mutex); f = po->fanout; if (f) { po->fanout = NULL; if (refcount_dec_and_test(&f->sk_ref)) list_del(&f->list); else f = NULL; if (po->rollover) kfree_rcu(po->rollover, rcu); } mutex_unlock(&fanout_mutex); return f; } static bool packet_extra_vlan_len_allowed(const struct net_device *dev, struct sk_buff *skb) { /* Earlier code assumed this would be a VLAN pkt, double-check * this now that we have the actual packet in hand. We can only * do this check on Ethernet devices. */ if (unlikely(dev->type != ARPHRD_ETHER)) return false; skb_reset_mac_header(skb); return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)); } static const struct proto_ops packet_ops; static const struct proto_ops packet_ops_spkt; static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct sockaddr_pkt *spkt; /* * When we registered the protocol we saved the socket in the data * field for just this event. */ sk = pt->af_packet_priv; /* * Yank back the headers [hope the device set this * right or kerboom...] * * Incoming packets have ll header pulled, * push it back. * * For outgoing ones skb->data == skb_mac_header(skb) * so that this procedure is noop. */ if (skb->pkt_type == PACKET_LOOPBACK) goto out; if (!net_eq(dev_net(dev), sock_net(sk))) goto out; skb = skb_share_check(skb, GFP_ATOMIC); if (skb == NULL) goto oom; /* drop any routing info */ skb_dst_drop(skb); /* drop conntrack reference */ nf_reset(skb); spkt = &PACKET_SKB_CB(skb)->sa.pkt; skb_push(skb, skb->data - skb_mac_header(skb)); /* * The SOCK_PACKET socket receives _all_ frames. */ spkt->spkt_family = dev->type; strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); spkt->spkt_protocol = skb->protocol; /* * Charge the memory to the socket. This is done specifically * to prevent sockets using all the memory up. */ if (sock_queue_rcv_skb(sk, skb) == 0) return 0; out: kfree_skb(skb); oom: return 0; } /* * Output a raw packet to a device layer. This bypasses all the other * protocol layers and you must therefore supply it with a complete frame */ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); struct sk_buff *skb = NULL; struct net_device *dev; struct sockcm_cookie sockc; __be16 proto = 0; int err; int extra_len = 0; /* * Get and verify the address. */ if (saddr) { if (msg->msg_namelen < sizeof(struct sockaddr)) return -EINVAL; if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) proto = saddr->spkt_protocol; } else return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ /* * Find the device first to size check it */ saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; retry: rcu_read_lock(); dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); err = -ENODEV; if (dev == NULL) goto out_unlock; err = -ENETDOWN; if (!(dev->flags & IFF_UP)) goto out_unlock; /* * You may not queue a frame bigger than the mtu. This is the lowest level * raw protocol and you must do your own fragmentation at this level. */ if (unlikely(sock_flag(sk, SOCK_NOFCS))) { if (!netif_supports_nofcs(dev)) { err = -EPROTONOSUPPORT; goto out_unlock; } extra_len = 4; /* We're doing our own CRC */ } err = -EMSGSIZE; if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) goto out_unlock; if (!skb) { size_t reserved = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; rcu_read_unlock(); skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); if (skb == NULL) return -ENOBUFS; /* FIXME: Save some space for broken drivers that write a hard * header at transmission time by themselves. PPP is the notable * one here. This should really be fixed at the driver level. */ skb_reserve(skb, reserved); skb_reset_network_header(skb); /* Try to align data part correctly */ if (hhlen) { skb->data -= hhlen; skb->tail -= hhlen; if (len < hhlen) skb_reset_network_header(skb); } err = memcpy_from_msg(skb_put(skb, len), msg, len); if (err) goto out_free; goto retry; } if (!dev_validate_header(dev, skb->data, len)) { err = -EINVAL; goto out_unlock; } if (len > (dev->mtu + dev->hard_header_len + extra_len) && !packet_extra_vlan_len_allowed(dev, skb)) { err = -EMSGSIZE; goto out_unlock; } sockc.tsflags = sk->sk_tsflags; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) goto out_unlock; } skb->protocol = proto; skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); if (unlikely(extra_len == 4)) skb->no_fcs = 1; skb_probe_transport_header(skb, 0); dev_queue_xmit(skb); rcu_read_unlock(); return len; out_unlock: rcu_read_unlock(); out_free: kfree_skb(skb); return err; } static unsigned int run_filter(struct sk_buff *skb, const struct sock *sk, unsigned int res) { struct sk_filter *filter; rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); if (filter != NULL) res = bpf_prog_run_clear_cb(filter->prog, skb); rcu_read_unlock(); return res; } static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, size_t *len) { struct virtio_net_hdr vnet_hdr; if (*len < sizeof(vnet_hdr)) return -EINVAL; *len -= sizeof(vnet_hdr); if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true)) return -EINVAL; return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); } /* * This function makes lazy skb cloning in hope that most of packets * are discarded by BPF. * * Note tricky part: we DO mangle shared skb! skb->data, skb->len * and skb->cb are mangled. It works because (and until) packets * falling here are owned by current CPU. Output packets are cloned * by dev_queue_xmit_nit(), input packets are processed by net_bh * sequencially, so that if we return skb to original state on exit, * we will not harm anyone. */ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct sockaddr_ll *sll; struct packet_sock *po; u8 *skb_head = skb->data; int skb_len = skb->len; unsigned int snaplen, res; bool is_drop_n_account = false; if (skb->pkt_type == PACKET_LOOPBACK) goto drop; sk = pt->af_packet_priv; po = pkt_sk(sk); if (!net_eq(dev_net(dev), sock_net(sk))) goto drop; skb->dev = dev; if (dev->header_ops) { /* The device has an explicit notion of ll header, * exported to higher levels. * * Otherwise, the device hides details of its frame * structure, so that corresponding packet head is * never delivered to user. */ if (sk->sk_type != SOCK_DGRAM) skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ skb_pull(skb, skb_network_offset(skb)); } } snaplen = skb->len; res = run_filter(skb, sk, snaplen); if (!res) goto drop_n_restore; if (snaplen > res) snaplen = res; if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) goto drop_n_acct; if (skb_shared(skb)) { struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); if (nskb == NULL) goto drop_n_acct; if (skb_head != skb->data) { skb->data = skb_head; skb->len = skb_len; } consume_skb(skb); skb = nskb; } sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); sll = &PACKET_SKB_CB(skb)->sa.ll; sll->sll_hatype = dev->type; sll->sll_pkttype = skb->pkt_type; if (unlikely(po->origdev)) sll->sll_ifindex = orig_dev->ifindex; else sll->sll_ifindex = dev->ifindex; sll->sll_halen = dev_parse_header(skb, sll->sll_addr); /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). * Use their space for storing the original skb length. */ PACKET_SKB_CB(skb)->sa.origlen = skb->len; if (pskb_trim(skb, snaplen)) goto drop_n_acct; skb_set_owner_r(skb, sk); skb->dev = NULL; skb_dst_drop(skb); /* drop conntrack reference */ nf_reset(skb); spin_lock(&sk->sk_receive_queue.lock); po->stats.stats1.tp_packets++; sock_skb_set_dropcount(sk, skb); __skb_queue_tail(&sk->sk_receive_queue, skb); spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); return 0; drop_n_acct: is_drop_n_account = true; spin_lock(&sk->sk_receive_queue.lock); po->stats.stats1.tp_drops++; atomic_inc(&sk->sk_drops); spin_unlock(&sk->sk_receive_queue.lock); drop_n_restore: if (skb_head != skb->data && skb_shared(skb)) { skb->data = skb_head; skb->len = skb_len; } drop: if (!is_drop_n_account) consume_skb(skb); else kfree_skb(skb); return 0; } static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct packet_sock *po; struct sockaddr_ll *sll; union tpacket_uhdr h; u8 *skb_head = skb->data; int skb_len = skb->len; unsigned int snaplen, res; unsigned long status = TP_STATUS_USER; unsigned short macoff, netoff, hdrlen; struct sk_buff *copy_skb = NULL; struct timespec ts; __u32 ts_status; bool is_drop_n_account = false; bool do_vnet = false; /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. * We may add members to them until current aligned size without forcing * userspace to call getsockopt(..., PACKET_HDRLEN, ...). */ BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); if (skb->pkt_type == PACKET_LOOPBACK) goto drop; sk = pt->af_packet_priv; po = pkt_sk(sk); if (!net_eq(dev_net(dev), sock_net(sk))) goto drop; if (dev->header_ops) { if (sk->sk_type != SOCK_DGRAM) skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ skb_pull(skb, skb_network_offset(skb)); } } snaplen = skb->len; res = run_filter(skb, sk, snaplen); if (!res) goto drop_n_restore; if (skb->ip_summed == CHECKSUM_PARTIAL) status |= TP_STATUS_CSUMNOTREADY; else if (skb->pkt_type != PACKET_OUTGOING && (skb->ip_summed == CHECKSUM_COMPLETE || skb_csum_unnecessary(skb))) status |= TP_STATUS_CSUM_VALID; if (snaplen > res) snaplen = res; if (sk->sk_type == SOCK_DGRAM) { macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + po->tp_reserve; } else { unsigned int maclen = skb_network_offset(skb); netoff = TPACKET_ALIGN(po->tp_hdrlen + (maclen < 16 ? 16 : maclen)) + po->tp_reserve; if (po->has_vnet_hdr) { netoff += sizeof(struct virtio_net_hdr); do_vnet = true; } macoff = netoff - maclen; } if (po->tp_version <= TPACKET_V2) { if (macoff + snaplen > po->rx_ring.frame_size) { if (po->copy_thresh && atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { if (skb_shared(skb)) { copy_skb = skb_clone(skb, GFP_ATOMIC); } else { copy_skb = skb_get(skb); skb_head = skb->data; } if (copy_skb) skb_set_owner_r(copy_skb, sk); } snaplen = po->rx_ring.frame_size - macoff; if ((int)snaplen < 0) { snaplen = 0; do_vnet = false; } } } else if (unlikely(macoff + snaplen > GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { u32 nval; nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", snaplen, nval, macoff); snaplen = nval; if (unlikely((int)snaplen < 0)) { snaplen = 0; macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; do_vnet = false; } } spin_lock(&sk->sk_receive_queue.lock); h.raw = packet_current_rx_frame(po, skb, TP_STATUS_KERNEL, (macoff+snaplen)); if (!h.raw) goto drop_n_account; if (po->tp_version <= TPACKET_V2) { packet_increment_rx_head(po, &po->rx_ring); /* * LOSING will be reported till you read the stats, * because it's COR - Clear On Read. * Anyways, moving it for V1/V2 only as V3 doesn't need this * at packet level. */ if (po->stats.stats1.tp_drops) status |= TP_STATUS_LOSING; } po->stats.stats1.tp_packets++; if (copy_skb) { status |= TP_STATUS_COPY; __skb_queue_tail(&sk->sk_receive_queue, copy_skb); } spin_unlock(&sk->sk_receive_queue.lock); if (do_vnet) { if (virtio_net_hdr_from_skb(skb, h.raw + macoff - sizeof(struct virtio_net_hdr), vio_le(), true)) { spin_lock(&sk->sk_receive_queue.lock); goto drop_n_account; } } skb_copy_bits(skb, 0, h.raw + macoff, snaplen); if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) getnstimeofday(&ts); status |= ts_status; switch (po->tp_version) { case TPACKET_V1: h.h1->tp_len = skb->len; h.h1->tp_snaplen = snaplen; h.h1->tp_mac = macoff; h.h1->tp_net = netoff; h.h1->tp_sec = ts.tv_sec; h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; hdrlen = sizeof(*h.h1); break; case TPACKET_V2: h.h2->tp_len = skb->len; h.h2->tp_snaplen = snaplen; h.h2->tp_mac = macoff; h.h2->tp_net = netoff; h.h2->tp_sec = ts.tv_sec; h.h2->tp_nsec = ts.tv_nsec; if (skb_vlan_tag_present(skb)) { h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { h.h2->tp_vlan_tci = 0; h.h2->tp_vlan_tpid = 0; } memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); hdrlen = sizeof(*h.h2); break; case TPACKET_V3: /* tp_nxt_offset,vlan are already populated above. * So DONT clear those fields here */ h.h3->tp_status |= status; h.h3->tp_len = skb->len; h.h3->tp_snaplen = snaplen; h.h3->tp_mac = macoff; h.h3->tp_net = netoff; h.h3->tp_sec = ts.tv_sec; h.h3->tp_nsec = ts.tv_nsec; memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); hdrlen = sizeof(*h.h3); break; default: BUG(); } sll = h.raw + TPACKET_ALIGN(hdrlen); sll->sll_halen = dev_parse_header(skb, sll->sll_addr); sll->sll_family = AF_PACKET; sll->sll_hatype = dev->type; sll->sll_protocol = skb->protocol; sll->sll_pkttype = skb->pkt_type; if (unlikely(po->origdev)) sll->sll_ifindex = orig_dev->ifindex; else sll->sll_ifindex = dev->ifindex; smp_mb(); #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 if (po->tp_version <= TPACKET_V2) { u8 *start, *end; end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + macoff + snaplen); for (start = h.raw; start < end; start += PAGE_SIZE) flush_dcache_page(pgv_to_page(start)); } smp_wmb(); #endif if (po->tp_version <= TPACKET_V2) { __packet_set_status(po, h.raw, status); sk->sk_data_ready(sk); } else { prb_clear_blk_fill_status(&po->rx_ring); } drop_n_restore: if (skb_head != skb->data && skb_shared(skb)) { skb->data = skb_head; skb->len = skb_len; } drop: if (!is_drop_n_account) consume_skb(skb); else kfree_skb(skb); return 0; drop_n_account: is_drop_n_account = true; po->stats.stats1.tp_drops++; spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); kfree_skb(copy_skb); goto drop_n_restore; } static void tpacket_destruct_skb(struct sk_buff *skb) { struct packet_sock *po = pkt_sk(skb->sk); if (likely(po->tx_ring.pg_vec)) { void *ph; __u32 ts; ph = skb_shinfo(skb)->destructor_arg; packet_dec_pending(&po->tx_ring); ts = __packet_set_timestamp(po, ph, skb); __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); } sock_wfree(skb); } static void tpacket_set_protocol(const struct net_device *dev, struct sk_buff *skb) { if (dev->type == ARPHRD_ETHER) { skb_reset_mac_header(skb); skb->protocol = eth_hdr(skb)->h_proto; } } static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len) { if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 > __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len))) vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(), __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2); if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len) return -EINVAL; return 0; } static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len, struct virtio_net_hdr *vnet_hdr) { if (*len < sizeof(*vnet_hdr)) return -EINVAL; *len -= sizeof(*vnet_hdr); if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter)) return -EFAULT; return __packet_snd_vnet_parse(vnet_hdr, *len); } static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, void *frame, struct net_device *dev, void *data, int tp_len, __be16 proto, unsigned char *addr, int hlen, int copylen, const struct sockcm_cookie *sockc) { union tpacket_uhdr ph; int to_write, offset, len, nr_frags, len_max; struct socket *sock = po->sk.sk_socket; struct page *page; int err; ph.raw = frame; skb->protocol = proto; skb->dev = dev; skb->priority = po->sk.sk_priority; skb->mark = po->sk.sk_mark; sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); skb_shinfo(skb)->destructor_arg = ph.raw; skb_reserve(skb, hlen); skb_reset_network_header(skb); to_write = tp_len; if (sock->type == SOCK_DGRAM) { err = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, tp_len); if (unlikely(err < 0)) return -EINVAL; } else if (copylen) { int hdrlen = min_t(int, copylen, tp_len); skb_push(skb, dev->hard_header_len); skb_put(skb, copylen - dev->hard_header_len); err = skb_store_bits(skb, 0, data, hdrlen); if (unlikely(err)) return err; if (!dev_validate_header(dev, skb->data, hdrlen)) return -EINVAL; if (!skb->protocol) tpacket_set_protocol(dev, skb); data += hdrlen; to_write -= hdrlen; } offset = offset_in_page(data); len_max = PAGE_SIZE - offset; len = ((to_write > len_max) ? len_max : to_write); skb->data_len = to_write; skb->len += to_write; skb->truesize += to_write; refcount_add(to_write, &po->sk.sk_wmem_alloc); while (likely(to_write)) { nr_frags = skb_shinfo(skb)->nr_frags; if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { pr_err("Packet exceed the number of skb frags(%lu)\n", MAX_SKB_FRAGS); return -EFAULT; } page = pgv_to_page(data); data += len; flush_dcache_page(page); get_page(page); skb_fill_page_desc(skb, nr_frags, page, offset, len); to_write -= len; offset = 0; len_max = PAGE_SIZE; len = ((to_write > len_max) ? len_max : to_write); } skb_probe_transport_header(skb, 0); return tp_len; } static int tpacket_parse_header(struct packet_sock *po, void *frame, int size_max, void **data) { union tpacket_uhdr ph; int tp_len, off; ph.raw = frame; switch (po->tp_version) { case TPACKET_V3: if (ph.h3->tp_next_offset != 0) { pr_warn_once("variable sized slot not supported"); return -EINVAL; } tp_len = ph.h3->tp_len; break; case TPACKET_V2: tp_len = ph.h2->tp_len; break; default: tp_len = ph.h1->tp_len; break; } if (unlikely(tp_len > size_max)) { pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); return -EMSGSIZE; } if (unlikely(po->tp_tx_has_off)) { int off_min, off_max; off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); off_max = po->tx_ring.frame_size - tp_len; if (po->sk.sk_type == SOCK_DGRAM) { switch (po->tp_version) { case TPACKET_V3: off = ph.h3->tp_net; break; case TPACKET_V2: off = ph.h2->tp_net; break; default: off = ph.h1->tp_net; break; } } else { switch (po->tp_version) { case TPACKET_V3: off = ph.h3->tp_mac; break; case TPACKET_V2: off = ph.h2->tp_mac; break; default: off = ph.h1->tp_mac; break; } } if (unlikely((off < off_min) || (off_max < off))) return -EINVAL; } else { off = po->tp_hdrlen - sizeof(struct sockaddr_ll); } *data = frame + off; return tp_len; } static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) { struct sk_buff *skb; struct net_device *dev; struct virtio_net_hdr *vnet_hdr = NULL; struct sockcm_cookie sockc; __be16 proto; int err, reserve = 0; void *ph; DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); int tp_len, size_max; unsigned char *addr; void *data; int len_sum = 0; int status = TP_STATUS_AVAILABLE; int hlen, tlen, copylen = 0; mutex_lock(&po->pg_vec_lock); if (likely(saddr == NULL)) { dev = packet_cached_dev_get(po); proto = po->num; addr = NULL; } else { err = -EINVAL; if (msg->msg_namelen < sizeof(struct sockaddr_ll)) goto out; if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) goto out; proto = saddr->sll_protocol; addr = saddr->sll_addr; dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); } err = -ENXIO; if (unlikely(dev == NULL)) goto out; err = -ENETDOWN; if (unlikely(!(dev->flags & IFF_UP))) goto out_put; sockc.tsflags = po->sk.sk_tsflags; if (msg->msg_controllen) { err = sock_cmsg_send(&po->sk, msg, &sockc); if (unlikely(err)) goto out_put; } if (po->sk.sk_socket->type == SOCK_RAW) reserve = dev->hard_header_len; size_max = po->tx_ring.frame_size - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr) size_max = dev->mtu + reserve + VLAN_HLEN; do { ph = packet_current_frame(po, &po->tx_ring, TP_STATUS_SEND_REQUEST); if (unlikely(ph == NULL)) { if (need_wait && need_resched()) schedule(); continue; } skb = NULL; tp_len = tpacket_parse_header(po, ph, size_max, &data); if (tp_len < 0) goto tpacket_error; status = TP_STATUS_SEND_REQUEST; hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; if (po->has_vnet_hdr) { vnet_hdr = data; data += sizeof(*vnet_hdr); tp_len -= sizeof(*vnet_hdr); if (tp_len < 0 || __packet_snd_vnet_parse(vnet_hdr, tp_len)) { tp_len = -EINVAL; goto tpacket_error; } copylen = __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len); } copylen = max_t(int, copylen, dev->hard_header_len); skb = sock_alloc_send_skb(&po->sk, hlen + tlen + sizeof(struct sockaddr_ll) + (copylen - dev->hard_header_len), !need_wait, &err); if (unlikely(skb == NULL)) { /* we assume the socket was initially writeable ... */ if (likely(len_sum > 0)) err = len_sum; goto out_status; } tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto, addr, hlen, copylen, &sockc); if (likely(tp_len >= 0) && tp_len > dev->mtu + reserve && !po->has_vnet_hdr && !packet_extra_vlan_len_allowed(dev, skb)) tp_len = -EMSGSIZE; if (unlikely(tp_len < 0)) { tpacket_error: if (po->tp_loss) { __packet_set_status(po, ph, TP_STATUS_AVAILABLE); packet_increment_head(&po->tx_ring); kfree_skb(skb); continue; } else { status = TP_STATUS_WRONG_FORMAT; err = tp_len; goto out_status; } } if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) { tp_len = -EINVAL; goto tpacket_error; } skb->destructor = tpacket_destruct_skb; __packet_set_status(po, ph, TP_STATUS_SENDING); packet_inc_pending(&po->tx_ring); status = TP_STATUS_SEND_REQUEST; err = po->xmit(skb); if (unlikely(err > 0)) { err = net_xmit_errno(err); if (err && __packet_get_status(po, ph) == TP_STATUS_AVAILABLE) { /* skb was destructed already */ skb = NULL; goto out_status; } /* * skb was dropped but not destructed yet; * let's treat it like congestion or err < 0 */ err = 0; } packet_increment_head(&po->tx_ring); len_sum += tp_len; } while (likely((ph != NULL) || /* Note: packet_read_pending() might be slow if we have * to call it as it's per_cpu variable, but in fast-path * we already short-circuit the loop with the first * condition, and luckily don't have to go that path * anyway. */ (need_wait && packet_read_pending(&po->tx_ring)))); err = len_sum; goto out_put; out_status: __packet_set_status(po, ph, status); kfree_skb(skb); out_put: dev_put(dev); out: mutex_unlock(&po->pg_vec_lock); return err; } static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, size_t reserve, size_t len, size_t linear, int noblock, int *err) { struct sk_buff *skb; /* Under a page? Don't bother with paged skb. */ if (prepad + len < PAGE_SIZE || !linear) linear = len; skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, err, 0); if (!skb) return NULL; skb_reserve(skb, reserve); skb_put(skb, linear); skb->data_len = len - linear; skb->len += len - linear; return skb; } static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); struct sk_buff *skb; struct net_device *dev; __be16 proto; unsigned char *addr; int err, reserve = 0; struct sockcm_cookie sockc; struct virtio_net_hdr vnet_hdr = { 0 }; int offset = 0; struct packet_sock *po = pkt_sk(sk); int hlen, tlen, linear; int extra_len = 0; /* * Get and verify the address. */ if (likely(saddr == NULL)) { dev = packet_cached_dev_get(po); proto = po->num; addr = NULL; } else { err = -EINVAL; if (msg->msg_namelen < sizeof(struct sockaddr_ll)) goto out; if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) goto out; proto = saddr->sll_protocol; addr = saddr->sll_addr; dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); } err = -ENXIO; if (unlikely(dev == NULL)) goto out_unlock; err = -ENETDOWN; if (unlikely(!(dev->flags & IFF_UP))) goto out_unlock; sockc.tsflags = sk->sk_tsflags; sockc.mark = sk->sk_mark; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) goto out_unlock; } if (sock->type == SOCK_RAW) reserve = dev->hard_header_len; if (po->has_vnet_hdr) { err = packet_snd_vnet_parse(msg, &len, &vnet_hdr); if (err) goto out_unlock; } if (unlikely(sock_flag(sk, SOCK_NOFCS))) { if (!netif_supports_nofcs(dev)) { err = -EPROTONOSUPPORT; goto out_unlock; } extra_len = 4; /* We're doing our own CRC */ } err = -EMSGSIZE; if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) goto out_unlock; err = -ENOBUFS; hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len); linear = max(linear, min_t(int, len, dev->hard_header_len)); skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out_unlock; skb_set_network_header(skb, reserve); err = -EINVAL; if (sock->type == SOCK_DGRAM) { offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); if (unlikely(offset < 0)) goto out_free; } /* Returns -EFAULT on error */ err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); if (err) goto out_free; if (sock->type == SOCK_RAW && !dev_validate_header(dev, skb->data, len)) { err = -EINVAL; goto out_free; } sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && !packet_extra_vlan_len_allowed(dev, skb)) { err = -EMSGSIZE; goto out_free; } skb->protocol = proto; skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sockc.mark; if (po->has_vnet_hdr) { err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); if (err) goto out_free; len += sizeof(vnet_hdr); } skb_probe_transport_header(skb, reserve); if (unlikely(extra_len == 4)) skb->no_fcs = 1; err = po->xmit(skb); if (err > 0 && (err = net_xmit_errno(err)) != 0) goto out_unlock; dev_put(dev); return len; out_free: kfree_skb(skb); out_unlock: if (dev) dev_put(dev); out: return err; } static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); if (po->tx_ring.pg_vec) return tpacket_snd(po, msg); else return packet_snd(sock, msg, len); } /* * Close a PACKET socket. This is fairly simple. We immediately go * to 'closed' state and remove our protocol entry in the device list. */ static int packet_release(struct socket *sock) { struct sock *sk = sock->sk; struct packet_sock *po; struct packet_fanout *f; struct net *net; union tpacket_req_u req_u; if (!sk) return 0; net = sock_net(sk); po = pkt_sk(sk); mutex_lock(&net->packet.sklist_lock); sk_del_node_init_rcu(sk); mutex_unlock(&net->packet.sklist_lock); preempt_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); preempt_enable(); spin_lock(&po->bind_lock); unregister_prot_hook(sk, false); packet_cached_dev_reset(po); if (po->prot_hook.dev) { dev_put(po->prot_hook.dev); po->prot_hook.dev = NULL; } spin_unlock(&po->bind_lock); packet_flush_mclist(sk); if (po->rx_ring.pg_vec) { memset(&req_u, 0, sizeof(req_u)); packet_set_ring(sk, &req_u, 1, 0); } if (po->tx_ring.pg_vec) { memset(&req_u, 0, sizeof(req_u)); packet_set_ring(sk, &req_u, 1, 1); } f = fanout_release(sk); synchronize_net(); if (f) { fanout_release_data(f); kfree(f); } /* * Now the socket is dead. No more input will appear. */ sock_orphan(sk); sock->sk = NULL; /* Purge queues */ skb_queue_purge(&sk->sk_receive_queue); packet_free_pending(po); sk_refcnt_debug_release(sk); sock_put(sk); return 0; } /* * Attach a packet hook. */ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, __be16 proto) { struct packet_sock *po = pkt_sk(sk); struct net_device *dev_curr; __be16 proto_curr; bool need_rehook; struct net_device *dev = NULL; int ret = 0; bool unlisted = false; if (po->fanout) return -EINVAL; lock_sock(sk); spin_lock(&po->bind_lock); rcu_read_lock(); if (name) { dev = dev_get_by_name_rcu(sock_net(sk), name); if (!dev) { ret = -ENODEV; goto out_unlock; } } else if (ifindex) { dev = dev_get_by_index_rcu(sock_net(sk), ifindex); if (!dev) { ret = -ENODEV; goto out_unlock; } } if (dev) dev_hold(dev); proto_curr = po->prot_hook.type; dev_curr = po->prot_hook.dev; need_rehook = proto_curr != proto || dev_curr != dev; if (need_rehook) { if (po->running) { rcu_read_unlock(); __unregister_prot_hook(sk, true); rcu_read_lock(); dev_curr = po->prot_hook.dev; if (dev) unlisted = !dev_get_by_index_rcu(sock_net(sk), dev->ifindex); } po->num = proto; po->prot_hook.type = proto; if (unlikely(unlisted)) { dev_put(dev); po->prot_hook.dev = NULL; po->ifindex = -1; packet_cached_dev_reset(po); } else { po->prot_hook.dev = dev; po->ifindex = dev ? dev->ifindex : 0; packet_cached_dev_assign(po, dev); } } if (dev_curr) dev_put(dev_curr); if (proto == 0 || !need_rehook) goto out_unlock; if (!unlisted && (!dev || (dev->flags & IFF_UP))) { register_prot_hook(sk); } else { sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } out_unlock: rcu_read_unlock(); spin_unlock(&po->bind_lock); release_sock(sk); return ret; } /* * Bind a packet socket to a device */ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; char name[sizeof(uaddr->sa_data) + 1]; /* * Check legality */ if (addr_len != sizeof(struct sockaddr)) return -EINVAL; /* uaddr->sa_data comes from the userspace, it's not guaranteed to be * zero-terminated. */ memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); name[sizeof(uaddr->sa_data)] = 0; return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); } static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; struct sock *sk = sock->sk; /* * Check legality */ if (addr_len < sizeof(struct sockaddr_ll)) return -EINVAL; if (sll->sll_family != AF_PACKET) return -EINVAL; return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol ? : pkt_sk(sk)->num); } static struct proto packet_proto = { .name = "PACKET", .owner = THIS_MODULE, .obj_size = sizeof(struct packet_sock), }; /* * Create a packet of type SOCK_PACKET. */ static int packet_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct packet_sock *po; __be16 proto = (__force __be16)protocol; /* weird, but documented */ int err; if (!ns_capable(net->user_ns, CAP_NET_RAW)) return -EPERM; if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && sock->type != SOCK_PACKET) return -ESOCKTNOSUPPORT; sock->state = SS_UNCONNECTED; err = -ENOBUFS; sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern); if (sk == NULL) goto out; sock->ops = &packet_ops; if (sock->type == SOCK_PACKET) sock->ops = &packet_ops_spkt; sock_init_data(sock, sk); po = pkt_sk(sk); sk->sk_family = PF_PACKET; po->num = proto; po->xmit = dev_queue_xmit; err = packet_alloc_pending(po); if (err) goto out2; packet_cached_dev_reset(po); sk->sk_destruct = packet_sock_destruct; sk_refcnt_debug_inc(sk); /* * Attach a protocol block */ spin_lock_init(&po->bind_lock); mutex_init(&po->pg_vec_lock); po->rollover = NULL; po->prot_hook.func = packet_rcv; if (sock->type == SOCK_PACKET) po->prot_hook.func = packet_rcv_spkt; po->prot_hook.af_packet_priv = sk; if (proto) { po->prot_hook.type = proto; register_prot_hook(sk); } mutex_lock(&net->packet.sklist_lock); sk_add_node_rcu(sk, &net->packet.sklist); mutex_unlock(&net->packet.sklist_lock); preempt_disable(); sock_prot_inuse_add(net, &packet_proto, 1); preempt_enable(); return 0; out2: sk_free(sk); out: return err; } /* * Pull a packet from our receive queue and hand it to the user. * If necessary we block. */ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; int vnet_hdr_len = 0; unsigned int origlen = 0; err = -EINVAL; if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) goto out; #if 0 /* What error should we return now? EUNATTACH? */ if (pkt_sk(sk)->ifindex < 0) return -ENODEV; #endif if (flags & MSG_ERRQUEUE) { err = sock_recv_errqueue(sk, msg, len, SOL_PACKET, PACKET_TX_TIMESTAMP); goto out; } /* * Call the generic datagram receiver. This handles all sorts * of horrible races and re-entrancy so we can forget about it * in the protocol layers. * * Now it will return ENETDOWN, if device have just gone down, * but then it will block. */ skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); /* * An error occurred so return it. Because skb_recv_datagram() * handles the blocking we don't see and worry about blocking * retries. */ if (skb == NULL) goto out; if (pkt_sk(sk)->pressure) packet_rcv_has_room(pkt_sk(sk), NULL); if (pkt_sk(sk)->has_vnet_hdr) { err = packet_rcv_vnet(msg, skb, &len); if (err) goto out_free; vnet_hdr_len = sizeof(struct virtio_net_hdr); } /* You lose any data beyond the buffer you gave. If it worries * a user program they can ask the device for its MTU * anyway. */ copied = skb->len; if (copied > len) { copied = len; msg->msg_flags |= MSG_TRUNC; } err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free; if (sock->type != SOCK_PACKET) { struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; /* Original length was stored in sockaddr_ll fields */ origlen = PACKET_SKB_CB(skb)->sa.origlen; sll->sll_family = AF_PACKET; sll->sll_protocol = skb->protocol; } sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) { /* If the address length field is there to be filled * in, we fill it in now. */ if (sock->type == SOCK_PACKET) { __sockaddr_check_size(sizeof(struct sockaddr_pkt)); msg->msg_namelen = sizeof(struct sockaddr_pkt); } else { struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr); } memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, msg->msg_namelen); } if (pkt_sk(sk)->auxdata) { struct tpacket_auxdata aux; aux.tp_status = TP_STATUS_USER; if (skb->ip_summed == CHECKSUM_PARTIAL) aux.tp_status |= TP_STATUS_CSUMNOTREADY; else if (skb->pkt_type != PACKET_OUTGOING && (skb->ip_summed == CHECKSUM_COMPLETE || skb_csum_unnecessary(skb))) aux.tp_status |= TP_STATUS_CSUM_VALID; aux.tp_len = origlen; aux.tp_snaplen = skb->len; aux.tp_mac = 0; aux.tp_net = skb_network_offset(skb); if (skb_vlan_tag_present(skb)) { aux.tp_vlan_tci = skb_vlan_tag_get(skb); aux.tp_vlan_tpid = ntohs(skb->vlan_proto); aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { aux.tp_vlan_tci = 0; aux.tp_vlan_tpid = 0; } put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); } /* * Free or return the buffer as appropriate. Again this * hides all the races and re-entrancy issues from us. */ err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); out_free: skb_free_datagram(sk, skb); out: return err; } static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct net_device *dev; struct sock *sk = sock->sk; if (peer) return -EOPNOTSUPP; uaddr->sa_family = AF_PACKET; memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); if (dev) strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); rcu_read_unlock(); *uaddr_len = sizeof(*uaddr); return 0; } static int packet_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct net_device *dev; struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); if (peer) return -EOPNOTSUPP; sll->sll_family = AF_PACKET; sll->sll_ifindex = po->ifindex; sll->sll_protocol = po->num; sll->sll_pkttype = 0; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); if (dev) { sll->sll_hatype = dev->type; sll->sll_halen = dev->addr_len; memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); } else { sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ sll->sll_halen = 0; } rcu_read_unlock(); *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; return 0; } static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int what) { switch (i->type) { case PACKET_MR_MULTICAST: if (i->alen != dev->addr_len) return -EINVAL; if (what > 0) return dev_mc_add(dev, i->addr); else return dev_mc_del(dev, i->addr); break; case PACKET_MR_PROMISC: return dev_set_promiscuity(dev, what); case PACKET_MR_ALLMULTI: return dev_set_allmulti(dev, what); case PACKET_MR_UNICAST: if (i->alen != dev->addr_len) return -EINVAL; if (what > 0) return dev_uc_add(dev, i->addr); else return dev_uc_del(dev, i->addr); break; default: break; } return 0; } static void packet_dev_mclist_delete(struct net_device *dev, struct packet_mclist **mlp) { struct packet_mclist *ml; while ((ml = *mlp) != NULL) { if (ml->ifindex == dev->ifindex) { packet_dev_mc(dev, ml, -1); *mlp = ml->next; kfree(ml); } else mlp = &ml->next; } } static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) { struct packet_sock *po = pkt_sk(sk); struct packet_mclist *ml, *i; struct net_device *dev; int err; rtnl_lock(); err = -ENODEV; dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); if (!dev) goto done; err = -EINVAL; if (mreq->mr_alen > dev->addr_len) goto done; err = -ENOBUFS; i = kmalloc(sizeof(*i), GFP_KERNEL); if (i == NULL) goto done; err = 0; for (ml = po->mclist; ml; ml = ml->next) { if (ml->ifindex == mreq->mr_ifindex && ml->type == mreq->mr_type && ml->alen == mreq->mr_alen && memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { ml->count++; /* Free the new element ... */ kfree(i); goto done; } } i->type = mreq->mr_type; i->ifindex = mreq->mr_ifindex; i->alen = mreq->mr_alen; memcpy(i->addr, mreq->mr_address, i->alen); memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); i->count = 1; i->next = po->mclist; po->mclist = i; err = packet_dev_mc(dev, i, 1); if (err) { po->mclist = i->next; kfree(i); } done: rtnl_unlock(); return err; } static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) { struct packet_mclist *ml, **mlp; rtnl_lock(); for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { if (ml->ifindex == mreq->mr_ifindex && ml->type == mreq->mr_type && ml->alen == mreq->mr_alen && memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { if (--ml->count == 0) { struct net_device *dev; *mlp = ml->next; dev = __dev_get_by_index(sock_net(sk), ml->ifindex); if (dev) packet_dev_mc(dev, ml, -1); kfree(ml); } break; } } rtnl_unlock(); return 0; } static void packet_flush_mclist(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); struct packet_mclist *ml; if (!po->mclist) return; rtnl_lock(); while ((ml = po->mclist) != NULL) { struct net_device *dev; po->mclist = ml->next; dev = __dev_get_by_index(sock_net(sk), ml->ifindex); if (dev != NULL) packet_dev_mc(dev, ml, -1); kfree(ml); } rtnl_unlock(); } static int packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); int ret; if (level != SOL_PACKET) return -ENOPROTOOPT; switch (optname) { case PACKET_ADD_MEMBERSHIP: case PACKET_DROP_MEMBERSHIP: { struct packet_mreq_max mreq; int len = optlen; memset(&mreq, 0, sizeof(mreq)); if (len < sizeof(struct packet_mreq)) return -EINVAL; if (len > sizeof(mreq)) len = sizeof(mreq); if (copy_from_user(&mreq, optval, len)) return -EFAULT; if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) return -EINVAL; if (optname == PACKET_ADD_MEMBERSHIP) ret = packet_mc_add(sk, &mreq); else ret = packet_mc_drop(sk, &mreq); return ret; } case PACKET_RX_RING: case PACKET_TX_RING: { union tpacket_req_u req_u; int len; switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: len = sizeof(req_u.req); break; case TPACKET_V3: default: len = sizeof(req_u.req3); break; } if (optlen < len) return -EINVAL; if (copy_from_user(&req_u.req, optval, len)) return -EFAULT; return packet_set_ring(sk, &req_u, 0, optname == PACKET_TX_RING); } case PACKET_COPY_THRESH: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; pkt_sk(sk)->copy_thresh = val; return 0; } case PACKET_VERSION: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; switch (val) { case TPACKET_V1: case TPACKET_V2: case TPACKET_V3: break; default: return -EINVAL; } lock_sock(sk); if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { ret = -EBUSY; } else { po->tp_version = val; ret = 0; } release_sock(sk); return ret; } case PACKET_RESERVE: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; if (val > INT_MAX) return -EINVAL; lock_sock(sk); if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { ret = -EBUSY; } else { po->tp_reserve = val; ret = 0; } release_sock(sk); return ret; } case PACKET_LOSS: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_loss = !!val; return 0; } case PACKET_AUXDATA: { int val; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->auxdata = !!val; return 0; } case PACKET_ORIGDEV: { int val; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->origdev = !!val; return 0; } case PACKET_VNET_HDR: { int val; if (sock->type != SOCK_RAW) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->has_vnet_hdr = !!val; return 0; } case PACKET_TIMESTAMP: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_tstamp = val; return 0; } case PACKET_FANOUT: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; return fanout_add(sk, val & 0xffff, val >> 16); } case PACKET_FANOUT_DATA: { if (!po->fanout) return -EINVAL; return fanout_set_data(po, optval, optlen); } case PACKET_TX_HAS_OFF: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_tx_has_off = !!val; return 0; } case PACKET_QDISC_BYPASS: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->xmit = val ? packet_direct_xmit : dev_queue_xmit; return 0; } default: return -ENOPROTOOPT; } } static int packet_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { int len; int val, lv = sizeof(val); struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); void *data = &val; union tpacket_stats_u st; struct tpacket_rollover_stats rstats; if (level != SOL_PACKET) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case PACKET_STATISTICS: spin_lock_bh(&sk->sk_receive_queue.lock); memcpy(&st, &po->stats, sizeof(st)); memset(&po->stats, 0, sizeof(po->stats)); spin_unlock_bh(&sk->sk_receive_queue.lock); if (po->tp_version == TPACKET_V3) { lv = sizeof(struct tpacket_stats_v3); st.stats3.tp_packets += st.stats3.tp_drops; data = &st.stats3; } else { lv = sizeof(struct tpacket_stats); st.stats1.tp_packets += st.stats1.tp_drops; data = &st.stats1; } break; case PACKET_AUXDATA: val = po->auxdata; break; case PACKET_ORIGDEV: val = po->origdev; break; case PACKET_VNET_HDR: val = po->has_vnet_hdr; break; case PACKET_VERSION: val = po->tp_version; break; case PACKET_HDRLEN: if (len > sizeof(int)) len = sizeof(int); if (len < sizeof(int)) return -EINVAL; if (copy_from_user(&val, optval, len)) return -EFAULT; switch (val) { case TPACKET_V1: val = sizeof(struct tpacket_hdr); break; case TPACKET_V2: val = sizeof(struct tpacket2_hdr); break; case TPACKET_V3: val = sizeof(struct tpacket3_hdr); break; default: return -EINVAL; } break; case PACKET_RESERVE: val = po->tp_reserve; break; case PACKET_LOSS: val = po->tp_loss; break; case PACKET_TIMESTAMP: val = po->tp_tstamp; break; case PACKET_FANOUT: val = (po->fanout ? ((u32)po->fanout->id | ((u32)po->fanout->type << 16) | ((u32)po->fanout->flags << 24)) : 0); break; case PACKET_ROLLOVER_STATS: if (!po->rollover) return -EINVAL; rstats.tp_all = atomic_long_read(&po->rollover->num); rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); data = &rstats; lv = sizeof(rstats); break; case PACKET_TX_HAS_OFF: val = po->tp_tx_has_off; break; case PACKET_QDISC_BYPASS: val = packet_use_direct_xmit(po); break; default: return -ENOPROTOOPT; } if (len > lv) len = lv; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, data, len)) return -EFAULT; return 0; } #ifdef CONFIG_COMPAT static int compat_packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct packet_sock *po = pkt_sk(sock->sk); if (level != SOL_PACKET) return -ENOPROTOOPT; if (optname == PACKET_FANOUT_DATA && po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) { optval = (char __user *)get_compat_bpf_fprog(optval); if (!optval) return -EFAULT; optlen = sizeof(struct sock_fprog); } return packet_setsockopt(sock, level, optname, optval, optlen); } #endif static int packet_notifier(struct notifier_block *this, unsigned long msg, void *ptr) { struct sock *sk; struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); rcu_read_lock(); sk_for_each_rcu(sk, &net->packet.sklist) { struct packet_sock *po = pkt_sk(sk); switch (msg) { case NETDEV_UNREGISTER: if (po->mclist) packet_dev_mclist_delete(dev, &po->mclist); /* fallthrough */ case NETDEV_DOWN: if (dev->ifindex == po->ifindex) { spin_lock(&po->bind_lock); if (po->running) { __unregister_prot_hook(sk, false); sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } if (msg == NETDEV_UNREGISTER) { packet_cached_dev_reset(po); po->ifindex = -1; if (po->prot_hook.dev) dev_put(po->prot_hook.dev); po->prot_hook.dev = NULL; } spin_unlock(&po->bind_lock); } break; case NETDEV_UP: if (dev->ifindex == po->ifindex) { spin_lock(&po->bind_lock); if (po->num) register_prot_hook(sk); spin_unlock(&po->bind_lock); } break; } } rcu_read_unlock(); return NOTIFY_DONE; } static int packet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; switch (cmd) { case SIOCOUTQ: { int amount = sk_wmem_alloc_get(sk); return put_user(amount, (int __user *)arg); } case SIOCINQ: { struct sk_buff *skb; int amount = 0; spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) amount = skb->len; spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *)arg); case SIOCGSTAMPNS: return sock_get_timestampns(sk, (struct timespec __user *)arg); #ifdef CONFIG_INET case SIOCADDRT: case SIOCDELRT: case SIOCDARP: case SIOCGARP: case SIOCSARP: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCSIFFLAGS: return inet_dgram_ops.ioctl(sock, cmd, arg); #endif default: return -ENOIOCTLCMD; } return 0; } static unsigned int packet_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); unsigned int mask = datagram_poll(file, sock, wait); spin_lock_bh(&sk->sk_receive_queue.lock); if (po->rx_ring.pg_vec) { if (!packet_previous_rx_frame(po, &po->rx_ring, TP_STATUS_KERNEL)) mask |= POLLIN | POLLRDNORM; } if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) po->pressure = 0; spin_unlock_bh(&sk->sk_receive_queue.lock); spin_lock_bh(&sk->sk_write_queue.lock); if (po->tx_ring.pg_vec) { if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) mask |= POLLOUT | POLLWRNORM; } spin_unlock_bh(&sk->sk_write_queue.lock); return mask; } /* Dirty? Well, I still did not learn better way to account * for user mmaps. */ static void packet_mm_open(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct socket *sock = file->private_data; struct sock *sk = sock->sk; if (sk) atomic_inc(&pkt_sk(sk)->mapped); } static void packet_mm_close(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct socket *sock = file->private_data; struct sock *sk = sock->sk; if (sk) atomic_dec(&pkt_sk(sk)->mapped); } static const struct vm_operations_struct packet_mmap_ops = { .open = packet_mm_open, .close = packet_mm_close, }; static void free_pg_vec(struct pgv *pg_vec, unsigned int order, unsigned int len) { int i; for (i = 0; i < len; i++) { if (likely(pg_vec[i].buffer)) { if (is_vmalloc_addr(pg_vec[i].buffer)) vfree(pg_vec[i].buffer); else free_pages((unsigned long)pg_vec[i].buffer, order); pg_vec[i].buffer = NULL; } } kfree(pg_vec); } static char *alloc_one_pg_vec_page(unsigned long order) { char *buffer; gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; buffer = (char *) __get_free_pages(gfp_flags, order); if (buffer) return buffer; /* __get_free_pages failed, fall back to vmalloc */ buffer = vzalloc((1 << order) * PAGE_SIZE); if (buffer) return buffer; /* vmalloc failed, lets dig into swap here */ gfp_flags &= ~__GFP_NORETRY; buffer = (char *) __get_free_pages(gfp_flags, order); if (buffer) return buffer; /* complete and utter failure */ return NULL; } static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) { unsigned int block_nr = req->tp_block_nr; struct pgv *pg_vec; int i; pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); if (unlikely(!pg_vec)) goto out; for (i = 0; i < block_nr; i++) { pg_vec[i].buffer = alloc_one_pg_vec_page(order); if (unlikely(!pg_vec[i].buffer)) goto out_free_pgvec; } out: return pg_vec; out_free_pgvec: free_pg_vec(pg_vec, order, block_nr); pg_vec = NULL; goto out; } static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, int closing, int tx_ring) { struct pgv *pg_vec = NULL; struct packet_sock *po = pkt_sk(sk); int was_running, order = 0; struct packet_ring_buffer *rb; struct sk_buff_head *rb_queue; __be16 num; int err = -EINVAL; /* Added to avoid minimal code churn */ struct tpacket_req *req = &req_u->req; lock_sock(sk); rb = tx_ring ? &po->tx_ring : &po->rx_ring; rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; err = -EBUSY; if (!closing) { if (atomic_read(&po->mapped)) goto out; if (packet_read_pending(rb)) goto out; } if (req->tp_block_nr) { /* Sanity tests and some calculations */ err = -EBUSY; if (unlikely(rb->pg_vec)) goto out; switch (po->tp_version) { case TPACKET_V1: po->tp_hdrlen = TPACKET_HDRLEN; break; case TPACKET_V2: po->tp_hdrlen = TPACKET2_HDRLEN; break; case TPACKET_V3: po->tp_hdrlen = TPACKET3_HDRLEN; break; } err = -EINVAL; if (unlikely((int)req->tp_block_size <= 0)) goto out; if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) goto out; if (po->tp_version >= TPACKET_V3 && req->tp_block_size <= BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv)) goto out; if (unlikely(req->tp_frame_size < po->tp_hdrlen + po->tp_reserve)) goto out; if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) goto out; rb->frames_per_block = req->tp_block_size / req->tp_frame_size; if (unlikely(rb->frames_per_block == 0)) goto out; if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr)) goto out; if (unlikely((rb->frames_per_block * req->tp_block_nr) != req->tp_frame_nr)) goto out; err = -ENOMEM; order = get_order(req->tp_block_size); pg_vec = alloc_pg_vec(req, order); if (unlikely(!pg_vec)) goto out; switch (po->tp_version) { case TPACKET_V3: /* Block transmit is not supported yet */ if (!tx_ring) { init_prb_bdqc(po, rb, pg_vec, req_u); } else { struct tpacket_req3 *req3 = &req_u->req3; if (req3->tp_retire_blk_tov || req3->tp_sizeof_priv || req3->tp_feature_req_word) { err = -EINVAL; goto out; } } break; default: break; } } /* Done */ else { err = -EINVAL; if (unlikely(req->tp_frame_nr)) goto out; } /* Detach socket from network */ spin_lock(&po->bind_lock); was_running = po->running; num = po->num; if (was_running) { po->num = 0; __unregister_prot_hook(sk, false); } spin_unlock(&po->bind_lock); synchronize_net(); err = -EBUSY; mutex_lock(&po->pg_vec_lock); if (closing || atomic_read(&po->mapped) == 0) { err = 0; spin_lock_bh(&rb_queue->lock); swap(rb->pg_vec, pg_vec); rb->frame_max = (req->tp_frame_nr - 1); rb->head = 0; rb->frame_size = req->tp_frame_size; spin_unlock_bh(&rb_queue->lock); swap(rb->pg_vec_order, order); swap(rb->pg_vec_len, req->tp_block_nr); rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; po->prot_hook.func = (po->rx_ring.pg_vec) ? tpacket_rcv : packet_rcv; skb_queue_purge(rb_queue); if (atomic_read(&po->mapped)) pr_err("packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); } mutex_unlock(&po->pg_vec_lock); spin_lock(&po->bind_lock); if (was_running) { po->num = num; register_prot_hook(sk); } spin_unlock(&po->bind_lock); if (pg_vec && (po->tp_version > TPACKET_V2)) { /* Because we don't support block-based V3 on tx-ring */ if (!tx_ring) prb_shutdown_retire_blk_timer(po, rb_queue); } if (pg_vec) free_pg_vec(pg_vec, order, req->tp_block_nr); out: release_sock(sk); return err; } static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); unsigned long size, expected_size; struct packet_ring_buffer *rb; unsigned long start; int err = -EINVAL; int i; if (vma->vm_pgoff) return -EINVAL; mutex_lock(&po->pg_vec_lock); expected_size = 0; for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { if (rb->pg_vec) { expected_size += rb->pg_vec_len * rb->pg_vec_pages * PAGE_SIZE; } } if (expected_size == 0) goto out; size = vma->vm_end - vma->vm_start; if (size != expected_size) goto out; start = vma->vm_start; for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { if (rb->pg_vec == NULL) continue; for (i = 0; i < rb->pg_vec_len; i++) { struct page *page; void *kaddr = rb->pg_vec[i].buffer; int pg_num; for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { page = pgv_to_page(kaddr); err = vm_insert_page(vma, start, page); if (unlikely(err)) goto out; start += PAGE_SIZE; kaddr += PAGE_SIZE; } } } atomic_inc(&po->mapped); vma->vm_ops = &packet_mmap_ops; err = 0; out: mutex_unlock(&po->pg_vec_lock); return err; } static const struct proto_ops packet_ops_spkt = { .family = PF_PACKET, .owner = THIS_MODULE, .release = packet_release, .bind = packet_bind_spkt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = packet_getname_spkt, .poll = datagram_poll, .ioctl = packet_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = packet_sendmsg_spkt, .recvmsg = packet_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static const struct proto_ops packet_ops = { .family = PF_PACKET, .owner = THIS_MODULE, .release = packet_release, .bind = packet_bind, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = packet_getname, .poll = packet_poll, .ioctl = packet_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = packet_setsockopt, .getsockopt = packet_getsockopt, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_packet_setsockopt, #endif .sendmsg = packet_sendmsg, .recvmsg = packet_recvmsg, .mmap = packet_mmap, .sendpage = sock_no_sendpage, }; static const struct net_proto_family packet_family_ops = { .family = PF_PACKET, .create = packet_create, .owner = THIS_MODULE, }; static struct notifier_block packet_netdev_notifier = { .notifier_call = packet_notifier, }; #ifdef CONFIG_PROC_FS static void *packet_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { struct net *net = seq_file_net(seq); rcu_read_lock(); return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); } static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net *net = seq_file_net(seq); return seq_hlist_next_rcu(v, &net->packet.sklist, pos); } static void packet_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static int packet_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); else { struct sock *s = sk_entry(v); const struct packet_sock *po = pkt_sk(s); seq_printf(seq, "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", s, refcount_read(&s->sk_refcnt), s->sk_type, ntohs(po->num), po->ifindex, po->running, atomic_read(&s->sk_rmem_alloc), from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), sock_i_ino(s)); } return 0; } static const struct seq_operations packet_seq_ops = { .start = packet_seq_start, .next = packet_seq_next, .stop = packet_seq_stop, .show = packet_seq_show, }; static int packet_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &packet_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations packet_seq_fops = { .owner = THIS_MODULE, .open = packet_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif static int __net_init packet_net_init(struct net *net) { mutex_init(&net->packet.sklist_lock); INIT_HLIST_HEAD(&net->packet.sklist); if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops)) return -ENOMEM; return 0; } static void __net_exit packet_net_exit(struct net *net) { remove_proc_entry("packet", net->proc_net); } static struct pernet_operations packet_net_ops = { .init = packet_net_init, .exit = packet_net_exit, }; static void __exit packet_exit(void) { unregister_netdevice_notifier(&packet_netdev_notifier); unregister_pernet_subsys(&packet_net_ops); sock_unregister(PF_PACKET); proto_unregister(&packet_proto); } static int __init packet_init(void) { int rc = proto_register(&packet_proto, 0); if (rc != 0) goto out; sock_register(&packet_family_ops); register_pernet_subsys(&packet_net_ops); register_netdevice_notifier(&packet_netdev_notifier); out: return rc; } module_init(packet_init); module_exit(packet_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_PACKET);
./CrossVul/dataset_final_sorted/CWE-362/c/good_2871_0
crossvul-cpp_data_bad_840_0
/* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ #include <string.h> /* memset */ #include "cpu.h" #include "mem.h" #include "hist.h" /* HIST_countFast_wksp */ #define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ #include "fse.h" #define HUF_STATIC_LINKING_ONLY #include "huf.h" #include "zstd_compress_internal.h" #include "zstd_fast.h" #include "zstd_double_fast.h" #include "zstd_lazy.h" #include "zstd_opt.h" #include "zstd_ldm.h" /*-************************************* * Helper functions ***************************************/ size_t ZSTD_compressBound(size_t srcSize) { return ZSTD_COMPRESSBOUND(srcSize); } /*-************************************* * Context memory management ***************************************/ struct ZSTD_CDict_s { void* dictBuffer; const void* dictContent; size_t dictContentSize; void* workspace; size_t workspaceSize; ZSTD_matchState_t matchState; ZSTD_compressedBlockState_t cBlockState; ZSTD_customMem customMem; U32 dictID; }; /* typedef'd to ZSTD_CDict within "zstd.h" */ ZSTD_CCtx* ZSTD_createCCtx(void) { return ZSTD_createCCtx_advanced(ZSTD_defaultCMem); } static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager) { assert(cctx != NULL); memset(cctx, 0, sizeof(*cctx)); cctx->customMem = memManager; cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); { size_t const err = ZSTD_CCtx_resetParameters(cctx); assert(!ZSTD_isError(err)); (void)err; } } ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem) { ZSTD_STATIC_ASSERT(zcss_init==0); ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1)); if (!customMem.customAlloc ^ !customMem.customFree) return NULL; { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem); if (!cctx) return NULL; ZSTD_initCCtx(cctx, customMem); return cctx; } } ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize) { ZSTD_CCtx* const cctx = (ZSTD_CCtx*) workspace; if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */ if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */ memset(workspace, 0, workspaceSize); /* may be a bit generous, could memset be smaller ? */ cctx->staticSize = workspaceSize; cctx->workSpace = (void*)(cctx+1); cctx->workSpaceSize = workspaceSize - sizeof(ZSTD_CCtx); /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */ if (cctx->workSpaceSize < HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t)) return NULL; assert(((size_t)cctx->workSpace & (sizeof(void*)-1)) == 0); /* ensure correct alignment */ cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)cctx->workSpace; cctx->blockState.nextCBlock = cctx->blockState.prevCBlock + 1; { void* const ptr = cctx->blockState.nextCBlock + 1; cctx->entropyWorkspace = (U32*)ptr; } cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); return cctx; } static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx) { assert(cctx != NULL); assert(cctx->staticSize == 0); ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL; ZSTD_freeCDict(cctx->cdictLocal); cctx->cdictLocal = NULL; #ifdef ZSTD_MULTITHREAD ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL; #endif } size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx) { if (cctx==NULL) return 0; /* support free on NULL */ if (cctx->staticSize) return ERROR(memory_allocation); /* not compatible with static CCtx */ ZSTD_freeCCtxContent(cctx); ZSTD_free(cctx, cctx->customMem); return 0; } static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx) { #ifdef ZSTD_MULTITHREAD return ZSTDMT_sizeof_CCtx(cctx->mtctx); #else (void) cctx; return 0; #endif } size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx) { if (cctx==NULL) return 0; /* support sizeof on NULL */ return sizeof(*cctx) + cctx->workSpaceSize + ZSTD_sizeof_CDict(cctx->cdictLocal) + ZSTD_sizeof_mtctx(cctx); } size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs) { return ZSTD_sizeof_CCtx(zcs); /* same object */ } /* private API call, for dictBuilder only */ const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); } static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( ZSTD_compressionParameters cParams) { ZSTD_CCtx_params cctxParams; memset(&cctxParams, 0, sizeof(cctxParams)); cctxParams.cParams = cParams; cctxParams.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */ assert(!ZSTD_checkCParams(cParams)); cctxParams.fParams.contentSizeFlag = 1; return cctxParams; } static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced( ZSTD_customMem customMem) { ZSTD_CCtx_params* params; if (!customMem.customAlloc ^ !customMem.customFree) return NULL; params = (ZSTD_CCtx_params*)ZSTD_calloc( sizeof(ZSTD_CCtx_params), customMem); if (!params) { return NULL; } params->customMem = customMem; params->compressionLevel = ZSTD_CLEVEL_DEFAULT; params->fParams.contentSizeFlag = 1; return params; } ZSTD_CCtx_params* ZSTD_createCCtxParams(void) { return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem); } size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params) { if (params == NULL) { return 0; } ZSTD_free(params, params->customMem); return 0; } size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params) { return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT); } size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) { if (!cctxParams) { return ERROR(GENERIC); } memset(cctxParams, 0, sizeof(*cctxParams)); cctxParams->compressionLevel = compressionLevel; cctxParams->fParams.contentSizeFlag = 1; return 0; } size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params) { if (!cctxParams) { return ERROR(GENERIC); } CHECK_F( ZSTD_checkCParams(params.cParams) ); memset(cctxParams, 0, sizeof(*cctxParams)); cctxParams->cParams = params.cParams; cctxParams->fParams = params.fParams; cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */ assert(!ZSTD_checkCParams(params.cParams)); return 0; } /* ZSTD_assignParamsToCCtxParams() : * params is presumed valid at this stage */ static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams( ZSTD_CCtx_params cctxParams, ZSTD_parameters params) { ZSTD_CCtx_params ret = cctxParams; ret.cParams = params.cParams; ret.fParams = params.fParams; ret.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */ assert(!ZSTD_checkCParams(params.cParams)); return ret; } #define CLAMPCHECK(val,min,max) { \ if (((val)<(min)) | ((val)>(max))) { \ return ERROR(parameter_outOfBound); \ } } static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) { switch(param) { case ZSTD_p_compressionLevel: case ZSTD_p_hashLog: case ZSTD_p_chainLog: case ZSTD_p_searchLog: case ZSTD_p_minMatch: case ZSTD_p_targetLength: case ZSTD_p_compressionStrategy: return 1; case ZSTD_p_format: case ZSTD_p_windowLog: case ZSTD_p_contentSizeFlag: case ZSTD_p_checksumFlag: case ZSTD_p_dictIDFlag: case ZSTD_p_forceMaxWindow : case ZSTD_p_nbWorkers: case ZSTD_p_jobSize: case ZSTD_p_overlapSizeLog: case ZSTD_p_enableLongDistanceMatching: case ZSTD_p_ldmHashLog: case ZSTD_p_ldmMinMatch: case ZSTD_p_ldmBucketSizeLog: case ZSTD_p_ldmHashEveryLog: case ZSTD_p_forceAttachDict: default: return 0; } } size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value) { DEBUGLOG(4, "ZSTD_CCtx_setParameter (%u, %u)", (U32)param, value); if (cctx->streamStage != zcss_init) { if (ZSTD_isUpdateAuthorized(param)) { cctx->cParamsChanged = 1; } else { return ERROR(stage_wrong); } } switch(param) { case ZSTD_p_format : return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); case ZSTD_p_compressionLevel: if (cctx->cdict) return ERROR(stage_wrong); return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); case ZSTD_p_windowLog: case ZSTD_p_hashLog: case ZSTD_p_chainLog: case ZSTD_p_searchLog: case ZSTD_p_minMatch: case ZSTD_p_targetLength: case ZSTD_p_compressionStrategy: if (cctx->cdict) return ERROR(stage_wrong); return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); case ZSTD_p_contentSizeFlag: case ZSTD_p_checksumFlag: case ZSTD_p_dictIDFlag: return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); case ZSTD_p_forceMaxWindow : /* Force back-references to remain < windowSize, * even when referencing into Dictionary content. * default : 0 when using a CDict, 1 when using a Prefix */ return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); case ZSTD_p_forceAttachDict: return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); case ZSTD_p_nbWorkers: if ((value>0) && cctx->staticSize) { return ERROR(parameter_unsupported); /* MT not compatible with static alloc */ } return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); case ZSTD_p_jobSize: case ZSTD_p_overlapSizeLog: return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); case ZSTD_p_enableLongDistanceMatching: case ZSTD_p_ldmHashLog: case ZSTD_p_ldmMinMatch: case ZSTD_p_ldmBucketSizeLog: case ZSTD_p_ldmHashEveryLog: if (cctx->cdict) return ERROR(stage_wrong); return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); default: return ERROR(parameter_unsupported); } } size_t ZSTD_CCtxParam_setParameter( ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, unsigned value) { DEBUGLOG(4, "ZSTD_CCtxParam_setParameter (%u, %u)", (U32)param, value); switch(param) { case ZSTD_p_format : if (value > (unsigned)ZSTD_f_zstd1_magicless) return ERROR(parameter_unsupported); CCtxParams->format = (ZSTD_format_e)value; return (size_t)CCtxParams->format; case ZSTD_p_compressionLevel : { int cLevel = (int)value; /* cast expected to restore negative sign */ if (cLevel > ZSTD_maxCLevel()) cLevel = ZSTD_maxCLevel(); if (cLevel) { /* 0 : does not change current level */ CCtxParams->compressionLevel = cLevel; } if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel; return 0; /* return type (size_t) cannot represent negative values */ } case ZSTD_p_windowLog : if (value>0) /* 0 => use default */ CLAMPCHECK(value, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX); CCtxParams->cParams.windowLog = value; return CCtxParams->cParams.windowLog; case ZSTD_p_hashLog : if (value>0) /* 0 => use default */ CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); CCtxParams->cParams.hashLog = value; return CCtxParams->cParams.hashLog; case ZSTD_p_chainLog : if (value>0) /* 0 => use default */ CLAMPCHECK(value, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX); CCtxParams->cParams.chainLog = value; return CCtxParams->cParams.chainLog; case ZSTD_p_searchLog : if (value>0) /* 0 => use default */ CLAMPCHECK(value, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX); CCtxParams->cParams.searchLog = value; return value; case ZSTD_p_minMatch : if (value>0) /* 0 => use default */ CLAMPCHECK(value, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX); CCtxParams->cParams.searchLength = value; return CCtxParams->cParams.searchLength; case ZSTD_p_targetLength : /* all values are valid. 0 => use default */ CCtxParams->cParams.targetLength = value; return CCtxParams->cParams.targetLength; case ZSTD_p_compressionStrategy : if (value>0) /* 0 => use default */ CLAMPCHECK(value, (unsigned)ZSTD_fast, (unsigned)ZSTD_btultra); CCtxParams->cParams.strategy = (ZSTD_strategy)value; return (size_t)CCtxParams->cParams.strategy; case ZSTD_p_contentSizeFlag : /* Content size written in frame header _when known_ (default:1) */ DEBUGLOG(4, "set content size flag = %u", (value>0)); CCtxParams->fParams.contentSizeFlag = value > 0; return CCtxParams->fParams.contentSizeFlag; case ZSTD_p_checksumFlag : /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */ CCtxParams->fParams.checksumFlag = value > 0; return CCtxParams->fParams.checksumFlag; case ZSTD_p_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */ DEBUGLOG(4, "set dictIDFlag = %u", (value>0)); CCtxParams->fParams.noDictIDFlag = !value; return !CCtxParams->fParams.noDictIDFlag; case ZSTD_p_forceMaxWindow : CCtxParams->forceWindow = (value > 0); return CCtxParams->forceWindow; case ZSTD_p_forceAttachDict : CCtxParams->attachDictPref = value ? (value > 0 ? ZSTD_dictForceAttach : ZSTD_dictForceCopy) : ZSTD_dictDefaultAttach; return CCtxParams->attachDictPref; case ZSTD_p_nbWorkers : #ifndef ZSTD_MULTITHREAD if (value>0) return ERROR(parameter_unsupported); return 0; #else return ZSTDMT_CCtxParam_setNbWorkers(CCtxParams, value); #endif case ZSTD_p_jobSize : #ifndef ZSTD_MULTITHREAD return ERROR(parameter_unsupported); #else return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_jobSize, value); #endif case ZSTD_p_overlapSizeLog : #ifndef ZSTD_MULTITHREAD return ERROR(parameter_unsupported); #else return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_overlapSectionLog, value); #endif case ZSTD_p_enableLongDistanceMatching : CCtxParams->ldmParams.enableLdm = (value>0); return CCtxParams->ldmParams.enableLdm; case ZSTD_p_ldmHashLog : if (value>0) /* 0 ==> auto */ CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); CCtxParams->ldmParams.hashLog = value; return CCtxParams->ldmParams.hashLog; case ZSTD_p_ldmMinMatch : if (value>0) /* 0 ==> default */ CLAMPCHECK(value, ZSTD_LDM_MINMATCH_MIN, ZSTD_LDM_MINMATCH_MAX); CCtxParams->ldmParams.minMatchLength = value; return CCtxParams->ldmParams.minMatchLength; case ZSTD_p_ldmBucketSizeLog : if (value > ZSTD_LDM_BUCKETSIZELOG_MAX) return ERROR(parameter_outOfBound); CCtxParams->ldmParams.bucketSizeLog = value; return CCtxParams->ldmParams.bucketSizeLog; case ZSTD_p_ldmHashEveryLog : if (value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN) return ERROR(parameter_outOfBound); CCtxParams->ldmParams.hashEveryLog = value; return CCtxParams->ldmParams.hashEveryLog; default: return ERROR(parameter_unsupported); } } size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned* value) { return ZSTD_CCtxParam_getParameter(&cctx->requestedParams, param, value); } size_t ZSTD_CCtxParam_getParameter( ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, unsigned* value) { switch(param) { case ZSTD_p_format : *value = CCtxParams->format; break; case ZSTD_p_compressionLevel : *value = CCtxParams->compressionLevel; break; case ZSTD_p_windowLog : *value = CCtxParams->cParams.windowLog; break; case ZSTD_p_hashLog : *value = CCtxParams->cParams.hashLog; break; case ZSTD_p_chainLog : *value = CCtxParams->cParams.chainLog; break; case ZSTD_p_searchLog : *value = CCtxParams->cParams.searchLog; break; case ZSTD_p_minMatch : *value = CCtxParams->cParams.searchLength; break; case ZSTD_p_targetLength : *value = CCtxParams->cParams.targetLength; break; case ZSTD_p_compressionStrategy : *value = (unsigned)CCtxParams->cParams.strategy; break; case ZSTD_p_contentSizeFlag : *value = CCtxParams->fParams.contentSizeFlag; break; case ZSTD_p_checksumFlag : *value = CCtxParams->fParams.checksumFlag; break; case ZSTD_p_dictIDFlag : *value = !CCtxParams->fParams.noDictIDFlag; break; case ZSTD_p_forceMaxWindow : *value = CCtxParams->forceWindow; break; case ZSTD_p_forceAttachDict : *value = CCtxParams->attachDictPref; break; case ZSTD_p_nbWorkers : #ifndef ZSTD_MULTITHREAD assert(CCtxParams->nbWorkers == 0); #endif *value = CCtxParams->nbWorkers; break; case ZSTD_p_jobSize : #ifndef ZSTD_MULTITHREAD return ERROR(parameter_unsupported); #else *value = CCtxParams->jobSize; break; #endif case ZSTD_p_overlapSizeLog : #ifndef ZSTD_MULTITHREAD return ERROR(parameter_unsupported); #else *value = CCtxParams->overlapSizeLog; break; #endif case ZSTD_p_enableLongDistanceMatching : *value = CCtxParams->ldmParams.enableLdm; break; case ZSTD_p_ldmHashLog : *value = CCtxParams->ldmParams.hashLog; break; case ZSTD_p_ldmMinMatch : *value = CCtxParams->ldmParams.minMatchLength; break; case ZSTD_p_ldmBucketSizeLog : *value = CCtxParams->ldmParams.bucketSizeLog; break; case ZSTD_p_ldmHashEveryLog : *value = CCtxParams->ldmParams.hashEveryLog; break; default: return ERROR(parameter_unsupported); } return 0; } /** ZSTD_CCtx_setParametersUsingCCtxParams() : * just applies `params` into `cctx` * no action is performed, parameters are merely stored. * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx. * This is possible even if a compression is ongoing. * In which case, new parameters will be applied on the fly, starting with next compression job. */ size_t ZSTD_CCtx_setParametersUsingCCtxParams( ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params) { DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams"); if (cctx->streamStage != zcss_init) return ERROR(stage_wrong); if (cctx->cdict) return ERROR(stage_wrong); cctx->requestedParams = *params; return 0; } ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize); if (cctx->streamStage != zcss_init) return ERROR(stage_wrong); cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; return 0; } size_t ZSTD_CCtx_loadDictionary_advanced( ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) { if (cctx->streamStage != zcss_init) return ERROR(stage_wrong); if (cctx->staticSize) return ERROR(memory_allocation); /* no malloc for static CCtx */ DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize); ZSTD_freeCDict(cctx->cdictLocal); /* in case one already exists */ if (dict==NULL || dictSize==0) { /* no dictionary mode */ cctx->cdictLocal = NULL; cctx->cdict = NULL; } else { ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(&cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, dictSize); cctx->cdictLocal = ZSTD_createCDict_advanced( dict, dictSize, dictLoadMethod, dictContentType, cParams, cctx->customMem); cctx->cdict = cctx->cdictLocal; if (cctx->cdictLocal == NULL) return ERROR(memory_allocation); } return 0; } ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference( ZSTD_CCtx* cctx, const void* dict, size_t dictSize) { return ZSTD_CCtx_loadDictionary_advanced( cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto); } ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) { return ZSTD_CCtx_loadDictionary_advanced( cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto); } size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) { if (cctx->streamStage != zcss_init) return ERROR(stage_wrong); cctx->cdict = cdict; memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* exclusive */ return 0; } size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize) { return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent); } size_t ZSTD_CCtx_refPrefix_advanced( ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) { if (cctx->streamStage != zcss_init) return ERROR(stage_wrong); cctx->cdict = NULL; /* prefix discards any prior cdict */ cctx->prefixDict.dict = prefix; cctx->prefixDict.dictSize = prefixSize; cctx->prefixDict.dictContentType = dictContentType; return 0; } /*! ZSTD_CCtx_reset() : * Also dumps dictionary */ void ZSTD_CCtx_reset(ZSTD_CCtx* cctx) { cctx->streamStage = zcss_init; cctx->pledgedSrcSizePlusOne = 0; } size_t ZSTD_CCtx_resetParameters(ZSTD_CCtx* cctx) { if (cctx->streamStage != zcss_init) return ERROR(stage_wrong); cctx->cdict = NULL; return ZSTD_CCtxParams_reset(&cctx->requestedParams); } /** ZSTD_checkCParams() : control CParam values remain within authorized range. @return : 0, or an error code if one value is beyond authorized range */ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) { CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX); CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX); CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX); CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX); ZSTD_STATIC_ASSERT(ZSTD_TARGETLENGTH_MIN == 0); if (cParams.targetLength > ZSTD_TARGETLENGTH_MAX) return ERROR(parameter_outOfBound); if ((U32)(cParams.strategy) > (U32)ZSTD_btultra) return ERROR(parameter_unsupported); return 0; } /** ZSTD_clampCParams() : * make CParam values within valid range. * @return : valid CParams */ static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams) { # define CLAMP(val,min,max) { \ if (val<min) val=min; \ else if (val>max) val=max; \ } CLAMP(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX); CLAMP(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX); CLAMP(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); CLAMP(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX); CLAMP(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX); ZSTD_STATIC_ASSERT(ZSTD_TARGETLENGTH_MIN == 0); if (cParams.targetLength > ZSTD_TARGETLENGTH_MAX) cParams.targetLength = ZSTD_TARGETLENGTH_MAX; CLAMP(cParams.strategy, ZSTD_fast, ZSTD_btultra); return cParams; } /** ZSTD_cycleLog() : * condition for correct operation : hashLog > 1 */ static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) { U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2); return hashLog - btScale; } /** ZSTD_adjustCParams_internal() : optimize `cPar` for a given input (`srcSize` and `dictSize`). mostly downsizing to reduce memory consumption and initialization latency. Both `srcSize` and `dictSize` are optional (use 0 if unknown). Note : cPar is assumed validated. Use ZSTD_checkCParams() to ensure this condition. */ static ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize) { static const U64 minSrcSize = 513; /* (1<<9) + 1 */ static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1); assert(ZSTD_checkCParams(cPar)==0); if (dictSize && (srcSize+1<2) /* srcSize unknown */ ) srcSize = minSrcSize; /* presumed small when there is a dictionary */ else if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* 0 == unknown : presumed large */ /* resize windowLog if input is small enough, to use less memory */ if ( (srcSize < maxWindowResize) && (dictSize < maxWindowResize) ) { U32 const tSize = (U32)(srcSize + dictSize); static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN; U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN : ZSTD_highbit32(tSize-1) + 1; if (cPar.windowLog > srcLog) cPar.windowLog = srcLog; } if (cPar.hashLog > cPar.windowLog+1) cPar.hashLog = cPar.windowLog+1; { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); if (cycleLog > cPar.windowLog) cPar.chainLog -= (cycleLog - cPar.windowLog); } if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */ return cPar; } ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize) { cPar = ZSTD_clampCParams(cPar); return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize); } ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize) { ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize); if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG; if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog; if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog; if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog; if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog; if (CCtxParams->cParams.searchLength) cParams.searchLength = CCtxParams->cParams.searchLength; if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength; if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy; assert(!ZSTD_checkCParams(cParams)); return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize); } static size_t ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, const U32 forCCtx) { size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog); size_t const hSize = ((size_t)1) << cParams->hashLog; U32 const hashLog3 = (forCCtx && cParams->searchLength==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; size_t const h3Size = ((size_t)1) << hashLog3; size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits)) * sizeof(U32) + (ZSTD_OPT_NUM+1) * (sizeof(ZSTD_match_t)+sizeof(ZSTD_optimal_t)); size_t const optSpace = (forCCtx && ((cParams->strategy == ZSTD_btopt) || (cParams->strategy == ZSTD_btultra))) ? optPotentialSpace : 0; DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u", (U32)chainSize, (U32)hSize, (U32)h3Size); return tableSpace + optSpace; } size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params) { /* Estimate CCtx size is supported for single-threaded compression only. */ if (params->nbWorkers > 0) { return ERROR(GENERIC); } { ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(params, 0, 0); size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog); U32 const divider = (cParams.searchLength==3) ? 3 : 4; size_t const maxNbSeq = blockSize / divider; size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq; size_t const entropySpace = HUF_WORKSPACE_SIZE; size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t); size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1); size_t const ldmSpace = ZSTD_ldm_getTableSize(params->ldmParams); size_t const ldmSeqSpace = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq); size_t const neededSpace = entropySpace + blockStateSpace + tokenSpace + matchStateSize + ldmSpace + ldmSeqSpace; DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx)); DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace); return sizeof(ZSTD_CCtx) + neededSpace; } } size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) { ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams); return ZSTD_estimateCCtxSize_usingCCtxParams(&params); } static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel) { ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0); return ZSTD_estimateCCtxSize_usingCParams(cParams); } size_t ZSTD_estimateCCtxSize(int compressionLevel) { int level; size_t memBudget = 0; for (level=1; level<=compressionLevel; level++) { size_t const newMB = ZSTD_estimateCCtxSize_internal(level); if (newMB > memBudget) memBudget = newMB; } return memBudget; } size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) { if (params->nbWorkers > 0) { return ERROR(GENERIC); } { size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params); size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params->cParams.windowLog); size_t const inBuffSize = ((size_t)1 << params->cParams.windowLog) + blockSize; size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1; size_t const streamingSize = inBuffSize + outBuffSize; return CCtxSize + streamingSize; } } size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams) { ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams); return ZSTD_estimateCStreamSize_usingCCtxParams(&params); } static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel) { ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0); return ZSTD_estimateCStreamSize_usingCParams(cParams); } size_t ZSTD_estimateCStreamSize(int compressionLevel) { int level; size_t memBudget = 0; for (level=1; level<=compressionLevel; level++) { size_t const newMB = ZSTD_estimateCStreamSize_internal(level); if (newMB > memBudget) memBudget = newMB; } return memBudget; } /* ZSTD_getFrameProgression(): * tells how much data has been consumed (input) and produced (output) for current frame. * able to count progression inside worker threads (non-blocking mode). */ ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx) { #ifdef ZSTD_MULTITHREAD if (cctx->appliedParams.nbWorkers > 0) { return ZSTDMT_getFrameProgression(cctx->mtctx); } #endif { ZSTD_frameProgression fp; size_t const buffered = (cctx->inBuff == NULL) ? 0 : cctx->inBuffPos - cctx->inToCompress; if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress); assert(buffered <= ZSTD_BLOCKSIZE_MAX); fp.ingested = cctx->consumedSrcSize + buffered; fp.consumed = cctx->consumedSrcSize; fp.produced = cctx->producedCSize; fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */ fp.currentJobID = 0; fp.nbActiveWorkers = 0; return fp; } } /*! ZSTD_toFlushNow() * Only useful for multithreading scenarios currently (nbWorkers >= 1). */ size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx) { #ifdef ZSTD_MULTITHREAD if (cctx->appliedParams.nbWorkers > 0) { return ZSTDMT_toFlushNow(cctx->mtctx); } #endif (void)cctx; return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */ } static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1, ZSTD_compressionParameters cParams2) { return (cParams1.hashLog == cParams2.hashLog) & (cParams1.chainLog == cParams2.chainLog) & (cParams1.strategy == cParams2.strategy) /* opt parser space */ & ((cParams1.searchLength==3) == (cParams2.searchLength==3)); /* hashlog3 space */ } static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1, ZSTD_compressionParameters cParams2) { (void)cParams1; (void)cParams2; assert(cParams1.windowLog == cParams2.windowLog); assert(cParams1.chainLog == cParams2.chainLog); assert(cParams1.hashLog == cParams2.hashLog); assert(cParams1.searchLog == cParams2.searchLog); assert(cParams1.searchLength == cParams2.searchLength); assert(cParams1.targetLength == cParams2.targetLength); assert(cParams1.strategy == cParams2.strategy); } /** The parameters are equivalent if ldm is not enabled in both sets or * all the parameters are equivalent. */ static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1, ldmParams_t ldmParams2) { return (!ldmParams1.enableLdm && !ldmParams2.enableLdm) || (ldmParams1.enableLdm == ldmParams2.enableLdm && ldmParams1.hashLog == ldmParams2.hashLog && ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog && ldmParams1.minMatchLength == ldmParams2.minMatchLength && ldmParams1.hashEveryLog == ldmParams2.hashEveryLog); } typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e; /* ZSTD_sufficientBuff() : * check internal buffers exist for streaming if buffPol == ZSTDb_buffered . * Note : they are assumed to be correctly sized if ZSTD_equivalentCParams()==1 */ static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t maxNbSeq1, size_t maxNbLit1, ZSTD_buffered_policy_e buffPol2, ZSTD_compressionParameters cParams2, U64 pledgedSrcSize) { size_t const windowSize2 = MAX(1, (size_t)MIN(((U64)1 << cParams2.windowLog), pledgedSrcSize)); size_t const blockSize2 = MIN(ZSTD_BLOCKSIZE_MAX, windowSize2); size_t const maxNbSeq2 = blockSize2 / ((cParams2.searchLength == 3) ? 3 : 4); size_t const maxNbLit2 = blockSize2; size_t const neededBufferSize2 = (buffPol2==ZSTDb_buffered) ? windowSize2 + blockSize2 : 0; DEBUGLOG(4, "ZSTD_sufficientBuff: is neededBufferSize2=%u <= bufferSize1=%u", (U32)neededBufferSize2, (U32)bufferSize1); DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbSeq2=%u <= maxNbSeq1=%u", (U32)maxNbSeq2, (U32)maxNbSeq1); DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbLit2=%u <= maxNbLit1=%u", (U32)maxNbLit2, (U32)maxNbLit1); return (maxNbLit2 <= maxNbLit1) & (maxNbSeq2 <= maxNbSeq1) & (neededBufferSize2 <= bufferSize1); } /** Equivalence for resetCCtx purposes */ static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1, ZSTD_CCtx_params params2, size_t buffSize1, size_t maxNbSeq1, size_t maxNbLit1, ZSTD_buffered_policy_e buffPol2, U64 pledgedSrcSize) { DEBUGLOG(4, "ZSTD_equivalentParams: pledgedSrcSize=%u", (U32)pledgedSrcSize); if (!ZSTD_equivalentCParams(params1.cParams, params2.cParams)) { DEBUGLOG(4, "ZSTD_equivalentCParams() == 0"); return 0; } if (!ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams)) { DEBUGLOG(4, "ZSTD_equivalentLdmParams() == 0"); return 0; } if (!ZSTD_sufficientBuff(buffSize1, maxNbSeq1, maxNbLit1, buffPol2, params2.cParams, pledgedSrcSize)) { DEBUGLOG(4, "ZSTD_sufficientBuff() == 0"); return 0; } return 1; } static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs) { int i; for (i = 0; i < ZSTD_REP_NUM; ++i) bs->rep[i] = repStartValue[i]; bs->entropy.huf.repeatMode = HUF_repeat_none; bs->entropy.fse.offcode_repeatMode = FSE_repeat_none; bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none; bs->entropy.fse.litlength_repeatMode = FSE_repeat_none; } /*! ZSTD_invalidateMatchState() * Invalidate all the matches in the match finder tables. * Requires nextSrc and base to be set (can be NULL). */ static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms) { ZSTD_window_clear(&ms->window); ms->nextToUpdate = ms->window.dictLimit + 1; ms->nextToUpdate3 = ms->window.dictLimit + 1; ms->loadedDictEnd = 0; ms->opt.litLengthSum = 0; /* force reset of btopt stats */ ms->dictMatchState = NULL; } /*! ZSTD_continueCCtx() : * reuse CCtx without reset (note : requires no dictionary) */ static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pledgedSrcSize) { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize)); size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); DEBUGLOG(4, "ZSTD_continueCCtx: re-use context in place"); cctx->blockSize = blockSize; /* previous block size could be different even for same windowLog, due to pledgedSrcSize */ cctx->appliedParams = params; cctx->blockState.matchState.cParams = params.cParams; cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; cctx->consumedSrcSize = 0; cctx->producedCSize = 0; if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN) cctx->appliedParams.fParams.contentSizeFlag = 0; DEBUGLOG(4, "pledged content size : %u ; flag : %u", (U32)pledgedSrcSize, cctx->appliedParams.fParams.contentSizeFlag); cctx->stage = ZSTDcs_init; cctx->dictID = 0; if (params.ldmParams.enableLdm) ZSTD_window_clear(&cctx->ldmState.window); ZSTD_referenceExternalSequences(cctx, NULL, 0); ZSTD_invalidateMatchState(&cctx->blockState.matchState); ZSTD_reset_compressedBlockState(cctx->blockState.prevCBlock); XXH64_reset(&cctx->xxhState, 0); return 0; } typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e; static void* ZSTD_reset_matchState(ZSTD_matchState_t* ms, void* ptr, const ZSTD_compressionParameters* cParams, ZSTD_compResetPolicy_e const crp, U32 const forCCtx) { size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog); size_t const hSize = ((size_t)1) << cParams->hashLog; U32 const hashLog3 = (forCCtx && cParams->searchLength==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; size_t const h3Size = ((size_t)1) << hashLog3; size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); assert(((size_t)ptr & 3) == 0); ms->hashLog3 = hashLog3; memset(&ms->window, 0, sizeof(ms->window)); ms->window.dictLimit = 1; /* start from 1, so that 1st position is valid */ ms->window.lowLimit = 1; /* it ensures first and later CCtx usages compress the same */ ms->window.nextSrc = ms->window.base + 1; /* see issue #1241 */ ZSTD_invalidateMatchState(ms); /* opt parser space */ if (forCCtx && ((cParams->strategy == ZSTD_btopt) | (cParams->strategy == ZSTD_btultra))) { DEBUGLOG(4, "reserving optimal parser space"); ms->opt.litFreq = (U32*)ptr; ms->opt.litLengthFreq = ms->opt.litFreq + (1<<Litbits); ms->opt.matchLengthFreq = ms->opt.litLengthFreq + (MaxLL+1); ms->opt.offCodeFreq = ms->opt.matchLengthFreq + (MaxML+1); ptr = ms->opt.offCodeFreq + (MaxOff+1); ms->opt.matchTable = (ZSTD_match_t*)ptr; ptr = ms->opt.matchTable + ZSTD_OPT_NUM+1; ms->opt.priceTable = (ZSTD_optimal_t*)ptr; ptr = ms->opt.priceTable + ZSTD_OPT_NUM+1; } /* table Space */ DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_noMemset); assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */ if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace); /* reset tables only */ ms->hashTable = (U32*)(ptr); ms->chainTable = ms->hashTable + hSize; ms->hashTable3 = ms->chainTable + chainSize; ptr = ms->hashTable3 + h3Size; ms->cParams = *cParams; assert(((size_t)ptr & 3) == 0); return ptr; } #define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* define "workspace is too large" as this number of times larger than needed */ #define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128 /* when workspace is continuously too large * during at least this number of times, * context's memory usage is considered wasteful, * because it's sized to handle a worst case scenario which rarely happens. * In which case, resize it down to free some memory */ /*! ZSTD_resetCCtx_internal() : note : `params` are assumed fully validated at this stage */ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, ZSTD_CCtx_params params, U64 pledgedSrcSize, ZSTD_compResetPolicy_e const crp, ZSTD_buffered_policy_e const zbuff) { DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u", (U32)pledgedSrcSize, params.cParams.windowLog); assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); if (crp == ZSTDcrp_continue) { if (ZSTD_equivalentParams(zc->appliedParams, params, zc->inBuffSize, zc->seqStore.maxNbSeq, zc->seqStore.maxNbLit, zbuff, pledgedSrcSize)) { DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> continue mode (wLog1=%u, blockSize1=%zu)", zc->appliedParams.cParams.windowLog, zc->blockSize); zc->workSpaceOversizedDuration += (zc->workSpaceOversizedDuration > 0); /* if it was too large, it still is */ if (zc->workSpaceOversizedDuration <= ZSTD_WORKSPACETOOLARGE_MAXDURATION) return ZSTD_continueCCtx(zc, params, pledgedSrcSize); } } DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx"); if (params.ldmParams.enableLdm) { /* Adjust long distance matching parameters */ ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams); assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog); assert(params.ldmParams.hashEveryLog < 32); zc->ldmState.hashPower = ZSTD_ldm_getHashPower(params.ldmParams.minMatchLength); } { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize)); size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); U32 const divider = (params.cParams.searchLength==3) ? 3 : 4; size_t const maxNbSeq = blockSize / divider; size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq; size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0; size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0; size_t const matchStateSize = ZSTD_sizeof_matchState(&params.cParams, /* forCCtx */ 1); size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize); void* ptr; /* used to partition workSpace */ /* Check if workSpace is large enough, alloc a new one if needed */ { size_t const entropySpace = HUF_WORKSPACE_SIZE; size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t); size_t const bufferSpace = buffInSize + buffOutSize; size_t const ldmSpace = ZSTD_ldm_getTableSize(params.ldmParams); size_t const ldmSeqSpace = maxNbLdmSeq * sizeof(rawSeq); size_t const neededSpace = entropySpace + blockStateSpace + ldmSpace + ldmSeqSpace + matchStateSize + tokenSpace + bufferSpace; int const workSpaceTooSmall = zc->workSpaceSize < neededSpace; int const workSpaceTooLarge = zc->workSpaceSize > ZSTD_WORKSPACETOOLARGE_FACTOR * neededSpace; int const workSpaceWasteful = workSpaceTooLarge && (zc->workSpaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION); zc->workSpaceOversizedDuration = workSpaceTooLarge ? zc->workSpaceOversizedDuration+1 : 0; DEBUGLOG(4, "Need %zuKB workspace, including %zuKB for match state, and %zuKB for buffers", neededSpace>>10, matchStateSize>>10, bufferSpace>>10); DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize); if (workSpaceTooSmall || workSpaceWasteful) { DEBUGLOG(4, "Need to resize workSpaceSize from %zuKB to %zuKB", zc->workSpaceSize >> 10, neededSpace >> 10); /* static cctx : no resize, error out */ if (zc->staticSize) return ERROR(memory_allocation); zc->workSpaceSize = 0; ZSTD_free(zc->workSpace, zc->customMem); zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem); if (zc->workSpace == NULL) return ERROR(memory_allocation); zc->workSpaceSize = neededSpace; zc->workSpaceOversizedDuration = 0; /* Statically sized space. * entropyWorkspace never moves, * though prev/next block swap places */ assert(((size_t)zc->workSpace & 3) == 0); /* ensure correct alignment */ assert(zc->workSpaceSize >= 2 * sizeof(ZSTD_compressedBlockState_t)); zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)zc->workSpace; zc->blockState.nextCBlock = zc->blockState.prevCBlock + 1; ptr = zc->blockState.nextCBlock + 1; zc->entropyWorkspace = (U32*)ptr; } } /* init params */ zc->appliedParams = params; zc->blockState.matchState.cParams = params.cParams; zc->pledgedSrcSizePlusOne = pledgedSrcSize+1; zc->consumedSrcSize = 0; zc->producedCSize = 0; if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN) zc->appliedParams.fParams.contentSizeFlag = 0; DEBUGLOG(4, "pledged content size : %u ; flag : %u", (U32)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag); zc->blockSize = blockSize; XXH64_reset(&zc->xxhState, 0); zc->stage = ZSTDcs_init; zc->dictID = 0; ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); ptr = zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32; /* ldm hash table */ /* initialize bucketOffsets table later for pointer alignment */ if (params.ldmParams.enableLdm) { size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog; memset(ptr, 0, ldmHSize * sizeof(ldmEntry_t)); assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */ zc->ldmState.hashTable = (ldmEntry_t*)ptr; ptr = zc->ldmState.hashTable + ldmHSize; zc->ldmSequences = (rawSeq*)ptr; ptr = zc->ldmSequences + maxNbLdmSeq; zc->maxNbLdmSequences = maxNbLdmSeq; memset(&zc->ldmState.window, 0, sizeof(zc->ldmState.window)); } assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */ ptr = ZSTD_reset_matchState(&zc->blockState.matchState, ptr, &params.cParams, crp, /* forCCtx */ 1); /* sequences storage */ zc->seqStore.maxNbSeq = maxNbSeq; zc->seqStore.sequencesStart = (seqDef*)ptr; ptr = zc->seqStore.sequencesStart + maxNbSeq; zc->seqStore.llCode = (BYTE*) ptr; zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq; zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq; zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq; /* ZSTD_wildcopy() is used to copy into the literals buffer, * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes. */ zc->seqStore.maxNbLit = blockSize; ptr = zc->seqStore.litStart + blockSize + WILDCOPY_OVERLENGTH; /* ldm bucketOffsets table */ if (params.ldmParams.enableLdm) { size_t const ldmBucketSize = ((size_t)1) << (params.ldmParams.hashLog - params.ldmParams.bucketSizeLog); memset(ptr, 0, ldmBucketSize); zc->ldmState.bucketOffsets = (BYTE*)ptr; ptr = zc->ldmState.bucketOffsets + ldmBucketSize; ZSTD_window_clear(&zc->ldmState.window); } ZSTD_referenceExternalSequences(zc, NULL, 0); /* buffers */ zc->inBuffSize = buffInSize; zc->inBuff = (char*)ptr; zc->outBuffSize = buffOutSize; zc->outBuff = zc->inBuff + buffInSize; return 0; } } /* ZSTD_invalidateRepCodes() : * ensures next compression will not use repcodes from previous block. * Note : only works with regular variant; * do not use with extDict variant ! */ void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) { int i; for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0; assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window)); } /* These are the approximate sizes for each strategy past which copying the * dictionary tables into the working context is faster than using them * in-place. */ static const size_t attachDictSizeCutoffs[(unsigned)ZSTD_btultra+1] = { 8 KB, /* unused */ 8 KB, /* ZSTD_fast */ 16 KB, /* ZSTD_dfast */ 32 KB, /* ZSTD_greedy */ 32 KB, /* ZSTD_lazy */ 32 KB, /* ZSTD_lazy2 */ 32 KB, /* ZSTD_btlazy2 */ 32 KB, /* ZSTD_btopt */ 8 KB /* ZSTD_btultra */ }; static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict, ZSTD_CCtx_params params, U64 pledgedSrcSize) { size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy]; return ( pledgedSrcSize <= cutoff || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN || params.attachDictPref == ZSTD_dictForceAttach ) && params.attachDictPref != ZSTD_dictForceCopy && !params.forceWindow; /* dictMatchState isn't correctly * handled in _enforceMaxDist */ } static size_t ZSTD_resetCCtx_byAttachingCDict( ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { { const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams; unsigned const windowLog = params.cParams.windowLog; assert(windowLog != 0); /* Resize working context table params for input only, since the dict * has its own tables. */ params.cParams = ZSTD_adjustCParams_internal(*cdict_cParams, pledgedSrcSize, 0); params.cParams.windowLog = windowLog; ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, ZSTDcrp_continue, zbuff); assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy); } { const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc - cdict->matchState.window.base); const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit; if (cdictLen == 0) { /* don't even attach dictionaries with no contents */ DEBUGLOG(4, "skipping attaching empty dictionary"); } else { DEBUGLOG(4, "attaching dictionary into context"); cctx->blockState.matchState.dictMatchState = &cdict->matchState; /* prep working match state so dict matches never have negative indices * when they are translated to the working context's index space. */ if (cctx->blockState.matchState.window.dictLimit < cdictEnd) { cctx->blockState.matchState.window.nextSrc = cctx->blockState.matchState.window.base + cdictEnd; ZSTD_window_clear(&cctx->blockState.matchState.window); } cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit; } } cctx->dictID = cdict->dictID; /* copy block state */ memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); return 0; } static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams; DEBUGLOG(4, "copying dictionary into context"); { unsigned const windowLog = params.cParams.windowLog; assert(windowLog != 0); /* Copy only compression parameters related to tables. */ params.cParams = *cdict_cParams; params.cParams.windowLog = windowLog; ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, ZSTDcrp_noMemset, zbuff); assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy); assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog); assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog); } /* copy tables */ { size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog); size_t const hSize = (size_t)1 << cdict_cParams->hashLog; size_t const tableSpace = (chainSize + hSize) * sizeof(U32); assert((U32*)cctx->blockState.matchState.chainTable == (U32*)cctx->blockState.matchState.hashTable + hSize); /* chainTable must follow hashTable */ assert((U32*)cctx->blockState.matchState.hashTable3 == (U32*)cctx->blockState.matchState.chainTable + chainSize); assert((U32*)cdict->matchState.chainTable == (U32*)cdict->matchState.hashTable + hSize); /* chainTable must follow hashTable */ assert((U32*)cdict->matchState.hashTable3 == (U32*)cdict->matchState.chainTable + chainSize); memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, tableSpace); /* presumes all tables follow each other */ } /* Zero the hashTable3, since the cdict never fills it */ { size_t const h3Size = (size_t)1 << cctx->blockState.matchState.hashLog3; assert(cdict->matchState.hashLog3 == 0); memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32)); } /* copy dictionary offsets */ { ZSTD_matchState_t const* srcMatchState = &cdict->matchState; ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState; dstMatchState->window = srcMatchState->window; dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3; dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; } cctx->dictID = cdict->dictID; /* copy block state */ memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); return 0; } /* We have a choice between copying the dictionary context into the working * context, or referencing the dictionary context from the working context * in-place. We decide here which strategy to use. */ static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)", (U32)pledgedSrcSize); if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) { return ZSTD_resetCCtx_byAttachingCDict( cctx, cdict, params, pledgedSrcSize, zbuff); } else { return ZSTD_resetCCtx_byCopyingCDict( cctx, cdict, params, pledgedSrcSize, zbuff); } } /*! ZSTD_copyCCtx_internal() : * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). * The "context", in this case, refers to the hash and chain tables, * entropy tables, and dictionary references. * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx. * @return : 0, or an error code */ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, ZSTD_frameParameters fParams, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { DEBUGLOG(5, "ZSTD_copyCCtx_internal"); if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong); memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); { ZSTD_CCtx_params params = dstCCtx->requestedParams; /* Copy only compression parameters related to tables. */ params.cParams = srcCCtx->appliedParams.cParams; params.fParams = fParams; ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset, zbuff); assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog); assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy); assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog); assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog); assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3); } /* copy tables */ { size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog); size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog; size_t const h3Size = (size_t)1 << srcCCtx->blockState.matchState.hashLog3; size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); assert((U32*)dstCCtx->blockState.matchState.chainTable == (U32*)dstCCtx->blockState.matchState.hashTable + hSize); /* chainTable must follow hashTable */ assert((U32*)dstCCtx->blockState.matchState.hashTable3 == (U32*)dstCCtx->blockState.matchState.chainTable + chainSize); memcpy(dstCCtx->blockState.matchState.hashTable, srcCCtx->blockState.matchState.hashTable, tableSpace); /* presumes all tables follow each other */ } /* copy dictionary offsets */ { const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState; ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState; dstMatchState->window = srcMatchState->window; dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3; dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; } dstCCtx->dictID = srcCCtx->dictID; /* copy block state */ memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock)); return 0; } /*! ZSTD_copyCCtx() : * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). * pledgedSrcSize==0 means "unknown". * @return : 0, or an error code */ size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize) { ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0); ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1); if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN); return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx, fParams, pledgedSrcSize, zbuff); } #define ZSTD_ROWSIZE 16 /*! ZSTD_reduceTable() : * reduce table indexes by `reducerValue`, or squash to zero. * PreserveMark preserves "unsorted mark" for btlazy2 strategy. * It must be set to a clear 0/1 value, to remove branch during inlining. * Presume table size is a multiple of ZSTD_ROWSIZE * to help auto-vectorization */ FORCE_INLINE_TEMPLATE void ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark) { int const nbRows = (int)size / ZSTD_ROWSIZE; int cellNb = 0; int rowNb; assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */ assert(size < (1U<<31)); /* can be casted to int */ for (rowNb=0 ; rowNb < nbRows ; rowNb++) { int column; for (column=0; column<ZSTD_ROWSIZE; column++) { if (preserveMark) { U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0; table[cellNb] += adder; } if (table[cellNb] < reducerValue) table[cellNb] = 0; else table[cellNb] -= reducerValue; cellNb++; } } } static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue) { ZSTD_reduceTable_internal(table, size, reducerValue, 0); } static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue) { ZSTD_reduceTable_internal(table, size, reducerValue, 1); } /*! ZSTD_reduceIndex() : * rescale all indexes to avoid future overflow (indexes are U32) */ static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue) { ZSTD_matchState_t* const ms = &zc->blockState.matchState; { U32 const hSize = (U32)1 << zc->appliedParams.cParams.hashLog; ZSTD_reduceTable(ms->hashTable, hSize, reducerValue); } if (zc->appliedParams.cParams.strategy != ZSTD_fast) { U32 const chainSize = (U32)1 << zc->appliedParams.cParams.chainLog; if (zc->appliedParams.cParams.strategy == ZSTD_btlazy2) ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue); else ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue); } if (ms->hashLog3) { U32 const h3Size = (U32)1 << ms->hashLog3; ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue); } } /*-******************************************************* * Block entropic compression *********************************************************/ /* See doc/zstd_compression_format.md for detailed format description */ static size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) { U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3); if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall); MEM_writeLE24(dst, cBlockHeader24); memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize); return ZSTD_blockHeaderSize + srcSize; } static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize) { BYTE* const ostart = (BYTE* const)dst; U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall); switch(flSize) { case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3)); break; case 2: /* 2 - 2 - 12 */ MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4))); break; case 3: /* 2 - 2 - 20 */ MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4))); break; default: /* not necessary : flSize is {1,2,3} */ assert(0); } memcpy(ostart + flSize, src, srcSize); return srcSize + flSize; } static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize) { BYTE* const ostart = (BYTE* const)dst; U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */ switch(flSize) { case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3)); break; case 2: /* 2 - 2 - 12 */ MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4))); break; case 3: /* 2 - 2 - 20 */ MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4))); break; default: /* not necessary : flSize is {1,2,3} */ assert(0); } ostart[flSize] = *(const BYTE*)src; return flSize+1; } /* ZSTD_minGain() : * minimum compression required * to generate a compress block or a compressed literals section. * note : use same formula for both situations */ static size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat) { U32 const minlog = (strat==ZSTD_btultra) ? 7 : 6; return (srcSize >> minlog) + 2; } static size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, ZSTD_hufCTables_t* nextHuf, ZSTD_strategy strategy, int disableLiteralCompression, void* dst, size_t dstCapacity, const void* src, size_t srcSize, void* workspace, size_t wkspSize, const int bmi2) { size_t const minGain = ZSTD_minGain(srcSize, strategy); size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); BYTE* const ostart = (BYTE*)dst; U32 singleStream = srcSize < 256; symbolEncodingType_e hType = set_compressed; size_t cLitSize; DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i)", disableLiteralCompression); /* Prepare nextEntropy assuming reusing the existing table */ memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); if (disableLiteralCompression) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); /* small ? don't even attempt compression (speed opt) */ # define COMPRESS_LITERALS_SIZE_MIN 63 { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); } if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */ { HUF_repeat repeat = prevHuf->repeatMode; int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0; if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1; cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2); if (repeat != HUF_repeat_none) { /* reused the existing table */ hType = set_repeat; } } if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) { memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); } if (cLitSize==1) { memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); } if (hType == set_compressed) { /* using a newly constructed table */ nextHuf->repeatMode = HUF_repeat_check; } /* Build header */ switch(lhSize) { case 3: /* 2 - 2 - 10 - 10 */ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); MEM_writeLE24(ostart, lhc); break; } case 4: /* 2 - 2 - 14 - 14 */ { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); MEM_writeLE32(ostart, lhc); break; } case 5: /* 2 - 2 - 18 - 18 */ { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); MEM_writeLE32(ostart, lhc); ostart[4] = (BYTE)(cLitSize >> 10); break; } default: /* not possible : lhSize is {3,4,5} */ assert(0); } return lhSize+cLitSize; } void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) { const seqDef* const sequences = seqStorePtr->sequencesStart; BYTE* const llCodeTable = seqStorePtr->llCode; BYTE* const ofCodeTable = seqStorePtr->ofCode; BYTE* const mlCodeTable = seqStorePtr->mlCode; U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); U32 u; assert(nbSeq <= seqStorePtr->maxNbSeq); for (u=0; u<nbSeq; u++) { U32 const llv = sequences[u].litLength; U32 const mlv = sequences[u].matchLength; llCodeTable[u] = (BYTE)ZSTD_LLcode(llv); ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset); mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv); } if (seqStorePtr->longLengthID==1) llCodeTable[seqStorePtr->longLengthPos] = MaxLL; if (seqStorePtr->longLengthID==2) mlCodeTable[seqStorePtr->longLengthPos] = MaxML; } /** * -log2(x / 256) lookup table for x in [0, 256). * If x == 0: Return 0 * Else: Return floor(-log2(x / 256) * 256) */ static unsigned const kInverseProbabiltyLog256[256] = { 0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162, 1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889, 874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734, 724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626, 618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542, 535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473, 468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415, 411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366, 362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322, 318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282, 279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247, 244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215, 212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185, 182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157, 155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132, 130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108, 106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85, 83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64, 62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44, 42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25, 23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7, 5, 4, 2, 1, }; /** * Returns the cost in bits of encoding the distribution described by count * using the entropy bound. */ static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total) { unsigned cost = 0; unsigned s; for (s = 0; s <= max; ++s) { unsigned norm = (unsigned)((256 * count[s]) / total); if (count[s] != 0 && norm == 0) norm = 1; assert(count[s] < total); cost += count[s] * kInverseProbabiltyLog256[norm]; } return cost >> 8; } /** * Returns the cost in bits of encoding the distribution in count using the * table described by norm. The max symbol support by norm is assumed >= max. * norm must be valid for every symbol with non-zero probability in count. */ static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog, unsigned const* count, unsigned const max) { unsigned const shift = 8 - accuracyLog; size_t cost = 0; unsigned s; assert(accuracyLog <= 8); for (s = 0; s <= max; ++s) { unsigned const normAcc = norm[s] != -1 ? norm[s] : 1; unsigned const norm256 = normAcc << shift; assert(norm256 > 0); assert(norm256 < 256); cost += count[s] * kInverseProbabiltyLog256[norm256]; } return cost >> 8; } static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) { void const* ptr = ctable; U16 const* u16ptr = (U16 const*)ptr; U32 const maxSymbolValue = MEM_read16(u16ptr + 1); return maxSymbolValue; } /** * Returns the cost in bits of encoding the distribution in count using ctable. * Returns an error if ctable cannot represent all the symbols in count. */ static size_t ZSTD_fseBitCost( FSE_CTable const* ctable, unsigned const* count, unsigned const max) { unsigned const kAccuracyLog = 8; size_t cost = 0; unsigned s; FSE_CState_t cstate; FSE_initCState(&cstate, ctable); if (ZSTD_getFSEMaxSymbolValue(ctable) < max) { DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u", ZSTD_getFSEMaxSymbolValue(ctable), max); return ERROR(GENERIC); } for (s = 0; s <= max; ++s) { unsigned const tableLog = cstate.stateLog; unsigned const badCost = (tableLog + 1) << kAccuracyLog; unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog); if (count[s] == 0) continue; if (bitCost >= badCost) { DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s); return ERROR(GENERIC); } cost += count[s] * bitCost; } return cost >> kAccuracyLog; } /** * Returns the cost in bytes of encoding the normalized count header. * Returns an error if any of the helper functions return an error. */ static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max, size_t const nbSeq, unsigned const FSELog) { BYTE wksp[FSE_NCOUNTBOUND]; S16 norm[MaxSeq + 1]; const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq, max)); return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog); } typedef enum { ZSTD_defaultDisallowed = 0, ZSTD_defaultAllowed = 1 } ZSTD_defaultPolicy_e; MEM_STATIC symbolEncodingType_e ZSTD_selectEncodingType( FSE_repeat* repeatMode, unsigned const* count, unsigned const max, size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, FSE_CTable const* prevCTable, short const* defaultNorm, U32 defaultNormLog, ZSTD_defaultPolicy_e const isDefaultAllowed, ZSTD_strategy const strategy) { ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0); if (mostFrequent == nbSeq) { *repeatMode = FSE_repeat_none; if (isDefaultAllowed && nbSeq <= 2) { /* Prefer set_basic over set_rle when there are 2 or less symbols, * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol. * If basic encoding isn't possible, always choose RLE. */ DEBUGLOG(5, "Selected set_basic"); return set_basic; } DEBUGLOG(5, "Selected set_rle"); return set_rle; } if (strategy < ZSTD_lazy) { if (isDefaultAllowed) { size_t const staticFse_nbSeq_max = 1000; size_t const mult = 10 - strategy; size_t const baseLog = 3; size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog; /* 28-36 for offset, 56-72 for lengths */ assert(defaultNormLog >= 5 && defaultNormLog <= 6); /* xx_DEFAULTNORMLOG */ assert(mult <= 9 && mult >= 7); if ( (*repeatMode == FSE_repeat_valid) && (nbSeq < staticFse_nbSeq_max) ) { DEBUGLOG(5, "Selected set_repeat"); return set_repeat; } if ( (nbSeq < dynamicFse_nbSeq_min) || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) { DEBUGLOG(5, "Selected set_basic"); /* The format allows default tables to be repeated, but it isn't useful. * When using simple heuristics to select encoding type, we don't want * to confuse these tables with dictionaries. When running more careful * analysis, we don't need to waste time checking both repeating tables * and default tables. */ *repeatMode = FSE_repeat_none; return set_basic; } } } else { size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC); size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC); size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog); size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq); if (isDefaultAllowed) { assert(!ZSTD_isError(basicCost)); assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost))); } assert(!ZSTD_isError(NCountCost)); assert(compressedCost < ERROR(maxCode)); DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u", (U32)basicCost, (U32)repeatCost, (U32)compressedCost); if (basicCost <= repeatCost && basicCost <= compressedCost) { DEBUGLOG(5, "Selected set_basic"); assert(isDefaultAllowed); *repeatMode = FSE_repeat_none; return set_basic; } if (repeatCost <= compressedCost) { DEBUGLOG(5, "Selected set_repeat"); assert(!ZSTD_isError(repeatCost)); return set_repeat; } assert(compressedCost < basicCost && compressedCost < repeatCost); } DEBUGLOG(5, "Selected set_compressed"); *repeatMode = FSE_repeat_check; return set_compressed; } MEM_STATIC size_t ZSTD_buildCTable(void* dst, size_t dstCapacity, FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type, U32* count, U32 max, const BYTE* codeTable, size_t nbSeq, const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax, const FSE_CTable* prevCTable, size_t prevCTableSize, void* workspace, size_t workspaceSize) { BYTE* op = (BYTE*)dst; const BYTE* const oend = op + dstCapacity; switch (type) { case set_rle: *op = codeTable[0]; CHECK_F(FSE_buildCTable_rle(nextCTable, (BYTE)max)); return 1; case set_repeat: memcpy(nextCTable, prevCTable, prevCTableSize); return 0; case set_basic: CHECK_F(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize)); /* note : could be pre-calculated */ return 0; case set_compressed: { S16 norm[MaxSeq + 1]; size_t nbSeq_1 = nbSeq; const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); if (count[codeTable[nbSeq-1]] > 1) { count[codeTable[nbSeq-1]]--; nbSeq_1--; } assert(nbSeq_1 > 1); CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max)); { size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */ if (FSE_isError(NCountSize)) return NCountSize; CHECK_F(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize)); return NCountSize; } } default: return assert(0), ERROR(GENERIC); } } FORCE_INLINE_TEMPLATE size_t ZSTD_encodeSequences_body( void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, seqDef const* sequences, size_t nbSeq, int longOffsets) { BIT_CStream_t blockStream; FSE_CState_t stateMatchLength; FSE_CState_t stateOffsetBits; FSE_CState_t stateLitLength; CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */ /* first symbols */ FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]); FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]); FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]); BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]); if (MEM_32bits()) BIT_flushBits(&blockStream); BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]); if (MEM_32bits()) BIT_flushBits(&blockStream); if (longOffsets) { U32 const ofBits = ofCodeTable[nbSeq-1]; int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); if (extraBits) { BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits); BIT_flushBits(&blockStream); } BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits, ofBits - extraBits); } else { BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]); } BIT_flushBits(&blockStream); { size_t n; for (n=nbSeq-2 ; n<nbSeq ; n--) { /* intentional underflow */ BYTE const llCode = llCodeTable[n]; BYTE const ofCode = ofCodeTable[n]; BYTE const mlCode = mlCodeTable[n]; U32 const llBits = LL_bits[llCode]; U32 const ofBits = ofCode; U32 const mlBits = ML_bits[mlCode]; DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u", sequences[n].litLength, sequences[n].matchLength + MINMATCH, sequences[n].offset); /* 32b*/ /* 64b*/ /* (7)*/ /* (7)*/ FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */ FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */ if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/ FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */ if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog))) BIT_flushBits(&blockStream); /* (7)*/ BIT_addBits(&blockStream, sequences[n].litLength, llBits); if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream); BIT_addBits(&blockStream, sequences[n].matchLength, mlBits); if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream); if (longOffsets) { int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); if (extraBits) { BIT_addBits(&blockStream, sequences[n].offset, extraBits); BIT_flushBits(&blockStream); /* (7)*/ } BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits); /* 31 */ } else { BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */ } BIT_flushBits(&blockStream); /* (7)*/ } } DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog); FSE_flushCState(&blockStream, &stateMatchLength); DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog); FSE_flushCState(&blockStream, &stateOffsetBits); DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog); FSE_flushCState(&blockStream, &stateLitLength); { size_t const streamSize = BIT_closeCStream(&blockStream); if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */ return streamSize; } } static size_t ZSTD_encodeSequences_default( void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, seqDef const* sequences, size_t nbSeq, int longOffsets) { return ZSTD_encodeSequences_body(dst, dstCapacity, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets); } #if DYNAMIC_BMI2 static TARGET_ATTRIBUTE("bmi2") size_t ZSTD_encodeSequences_bmi2( void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, seqDef const* sequences, size_t nbSeq, int longOffsets) { return ZSTD_encodeSequences_body(dst, dstCapacity, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets); } #endif static size_t ZSTD_encodeSequences( void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2) { #if DYNAMIC_BMI2 if (bmi2) { return ZSTD_encodeSequences_bmi2(dst, dstCapacity, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets); } #endif (void)bmi2; return ZSTD_encodeSequences_default(dst, dstCapacity, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets); } MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr, ZSTD_entropyCTables_t const* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, ZSTD_CCtx_params const* cctxParams, void* dst, size_t dstCapacity, void* workspace, size_t wkspSize, const int bmi2) { const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; ZSTD_strategy const strategy = cctxParams->cParams.strategy; U32 count[MaxSeq+1]; FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable; FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */ const seqDef* const sequences = seqStorePtr->sequencesStart; const BYTE* const ofCodeTable = seqStorePtr->ofCode; const BYTE* const llCodeTable = seqStorePtr->llCode; const BYTE* const mlCodeTable = seqStorePtr->mlCode; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; BYTE* seqHead; BYTE* lastNCount = NULL; ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog))); /* Compress literals */ { const BYTE* const literals = seqStorePtr->litStart; size_t const litSize = seqStorePtr->lit - literals; int const disableLiteralCompression = (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0); size_t const cSize = ZSTD_compressLiterals( &prevEntropy->huf, &nextEntropy->huf, cctxParams->cParams.strategy, disableLiteralCompression, op, dstCapacity, literals, litSize, workspace, wkspSize, bmi2); if (ZSTD_isError(cSize)) return cSize; assert(cSize <= dstCapacity); op += cSize; } /* Sequences Header */ if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/) return ERROR(dstSize_tooSmall); if (nbSeq < 0x7F) *op++ = (BYTE)nbSeq; else if (nbSeq < LONGNBSEQ) op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; else op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; if (nbSeq==0) { /* Copy the old tables over as if we repeated them */ memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse)); return op - ostart; } /* seqHead : flags for FSE encoding type */ seqHead = op++; /* convert length/distances into codes */ ZSTD_seqToCodes(seqStorePtr); /* build CTable for Literal Lengths */ { U32 max = MaxLL; size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ DEBUGLOG(5, "Building LL table"); nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode; LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode, count, max, mostFrequent, nbSeq, LLFSELog, prevEntropy->fse.litlengthCTable, LL_defaultNorm, LL_defaultNormLog, ZSTD_defaultAllowed, strategy); assert(set_basic < set_compressed && set_rle < set_compressed); assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype, count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL, prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable), workspace, wkspSize); if (ZSTD_isError(countSize)) return countSize; if (LLtype == set_compressed) lastNCount = op; op += countSize; } } /* build CTable for Offsets */ { U32 max = MaxOff; size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; DEBUGLOG(5, "Building OF table"); nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode; Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode, count, max, mostFrequent, nbSeq, OffFSELog, prevEntropy->fse.offcodeCTable, OF_defaultNorm, OF_defaultNormLog, defaultPolicy, strategy); assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype, count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable), workspace, wkspSize); if (ZSTD_isError(countSize)) return countSize; if (Offtype == set_compressed) lastNCount = op; op += countSize; } } /* build CTable for MatchLengths */ { U32 max = MaxML; size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ DEBUGLOG(5, "Building ML table"); nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode; MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode, count, max, mostFrequent, nbSeq, MLFSELog, prevEntropy->fse.matchlengthCTable, ML_defaultNorm, ML_defaultNormLog, ZSTD_defaultAllowed, strategy); assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype, count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML, prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable), workspace, wkspSize); if (ZSTD_isError(countSize)) return countSize; if (MLtype == set_compressed) lastNCount = op; op += countSize; } } *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); { size_t const bitstreamSize = ZSTD_encodeSequences( op, oend - op, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets, bmi2); if (ZSTD_isError(bitstreamSize)) return bitstreamSize; op += bitstreamSize; /* zstd versions <= 1.3.4 mistakenly report corruption when * FSE_readNCount() recieves a buffer < 4 bytes. * Fixed by https://github.com/facebook/zstd/pull/1146. * This can happen when the last set_compressed table present is 2 * bytes and the bitstream is only one byte. * In this exceedingly rare case, we will simply emit an uncompressed * block, since it isn't worth optimizing. */ if (lastNCount && (op - lastNCount) < 4) { /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ assert(op - lastNCount == 3); DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " "emitting an uncompressed block."); return 0; } } return op - ostart; } MEM_STATIC size_t ZSTD_compressSequences(seqStore_t* seqStorePtr, const ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, const ZSTD_CCtx_params* cctxParams, void* dst, size_t dstCapacity, size_t srcSize, void* workspace, size_t wkspSize, int bmi2) { size_t const cSize = ZSTD_compressSequences_internal( seqStorePtr, prevEntropy, nextEntropy, cctxParams, dst, dstCapacity, workspace, wkspSize, bmi2); if (cSize == 0) return 0; /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block. * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block. */ if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) return 0; /* block not compressed */ if (ZSTD_isError(cSize)) return cSize; /* Check compressibility */ { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy); if (cSize >= maxCSize) return 0; /* block not compressed */ } return cSize; } /* ZSTD_selectBlockCompressor() : * Not static, but internal use only (used by long distance matcher) * assumption : strat is a valid strategy */ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode) { static const ZSTD_blockCompressor blockCompressor[3][(unsigned)ZSTD_btultra+1] = { { ZSTD_compressBlock_fast /* default for 0 */, ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btultra }, { ZSTD_compressBlock_fast_extDict /* default for 0 */, ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict, ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btultra_extDict }, { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */, ZSTD_compressBlock_fast_dictMatchState, ZSTD_compressBlock_doubleFast_dictMatchState, ZSTD_compressBlock_greedy_dictMatchState, ZSTD_compressBlock_lazy_dictMatchState, ZSTD_compressBlock_lazy2_dictMatchState, ZSTD_compressBlock_btlazy2_dictMatchState, ZSTD_compressBlock_btopt_dictMatchState, ZSTD_compressBlock_btultra_dictMatchState } }; ZSTD_blockCompressor selectedCompressor; ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1); assert((U32)strat >= (U32)ZSTD_fast); assert((U32)strat <= (U32)ZSTD_btultra); selectedCompressor = blockCompressor[(int)dictMode][(U32)strat]; assert(selectedCompressor != NULL); return selectedCompressor; } static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr, const BYTE* anchor, size_t lastLLSize) { memcpy(seqStorePtr->lit, anchor, lastLLSize); seqStorePtr->lit += lastLLSize; } void ZSTD_resetSeqStore(seqStore_t* ssPtr) { ssPtr->lit = ssPtr->litStart; ssPtr->sequences = ssPtr->sequencesStart; ssPtr->longLengthID = 0; } static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { ZSTD_matchState_t* const ms = &zc->blockState.matchState; size_t cSize; DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%zu, dictLimit=%u, nextToUpdate=%u)", dstCapacity, ms->window.dictLimit, ms->nextToUpdate); assert(srcSize <= ZSTD_BLOCKSIZE_MAX); /* Assert that we have correctly flushed the ctx params into the ms's copy */ ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams); if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) { ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.searchLength); cSize = 0; goto out; /* don't even attempt compression below a certain srcSize */ } ZSTD_resetSeqStore(&(zc->seqStore)); ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy; /* required for optimal parser to read stats from dictionary */ /* a gap between an attached dict and the current window is not safe, * they must remain adjacent, and when that stops being the case, the dict * must be unset */ assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit); /* limited update after a very long match */ { const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; const U32 current = (U32)(istart-base); if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */ if (current > ms->nextToUpdate + 384) ms->nextToUpdate = current - MIN(192, (U32)(current - ms->nextToUpdate - 384)); } /* select and store sequences */ { ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms); size_t lastLLSize; { int i; for (i = 0; i < ZSTD_REP_NUM; ++i) zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i]; } if (zc->externSeqStore.pos < zc->externSeqStore.size) { assert(!zc->appliedParams.ldmParams.enableLdm); /* Updates ldmSeqStore.pos */ lastLLSize = ZSTD_ldm_blockCompress(&zc->externSeqStore, ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); assert(zc->externSeqStore.pos <= zc->externSeqStore.size); } else if (zc->appliedParams.ldmParams.enableLdm) { rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0}; ldmSeqStore.seq = zc->ldmSequences; ldmSeqStore.capacity = zc->maxNbLdmSequences; /* Updates ldmSeqStore.size */ CHECK_F(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore, &zc->appliedParams.ldmParams, src, srcSize)); /* Updates ldmSeqStore.pos */ lastLLSize = ZSTD_ldm_blockCompress(&ldmSeqStore, ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); assert(ldmSeqStore.pos == ldmSeqStore.size); } else { /* not long range mode */ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode); lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); } { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize; ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize); } } /* encode sequences and literals */ cSize = ZSTD_compressSequences(&zc->seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, dst, dstCapacity, srcSize, zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */, zc->bmi2); out: if (!ZSTD_isError(cSize) && cSize != 0) { /* confirm repcodes and entropy tables when emitting a compressed block */ ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock; zc->blockState.prevCBlock = zc->blockState.nextCBlock; zc->blockState.nextCBlock = tmp; } /* We check that dictionaries have offset codes available for the first * block. After the first block, the offcode table might not have large * enough codes to represent the offsets in the data. */ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; return cSize; } /*! ZSTD_compress_frameChunk() : * Compress a chunk of data into one or multiple blocks. * All blocks will be terminated, all input will be consumed. * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. * Frame is supposed already started (header already produced) * @return : compressed size, or an error code */ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastFrameChunk) { size_t blockSize = cctx->blockSize; size_t remaining = srcSize; const BYTE* ip = (const BYTE*)src; BYTE* const ostart = (BYTE*)dst; BYTE* op = ostart; U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog; assert(cctx->appliedParams.cParams.windowLog <= 31); DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (U32)blockSize); if (cctx->appliedParams.fParams.checksumFlag && srcSize) XXH64_update(&cctx->xxhState, src, srcSize); while (remaining) { ZSTD_matchState_t* const ms = &cctx->blockState.matchState; U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE) return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */ if (remaining < blockSize) blockSize = remaining; if (ZSTD_window_needOverflowCorrection(ms->window, ip + blockSize)) { U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy); U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip); ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30); ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30); ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); ZSTD_reduceIndex(cctx, correction); if (ms->nextToUpdate < correction) ms->nextToUpdate = 0; else ms->nextToUpdate -= correction; ms->loadedDictEnd = 0; ms->dictMatchState = NULL; } ZSTD_window_enforceMaxDist(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit; { size_t cSize = ZSTD_compressBlock_internal(cctx, op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, ip, blockSize); if (ZSTD_isError(cSize)) return cSize; if (cSize == 0) { /* block is not compressible */ cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); if (ZSTD_isError(cSize)) return cSize; } else { U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); MEM_writeLE24(op, cBlockHeader24); cSize += ZSTD_blockHeaderSize; } ip += blockSize; assert(remaining >= blockSize); remaining -= blockSize; op += cSize; assert(dstCapacity >= cSize); dstCapacity -= cSize; DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u", (U32)cSize); } } if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending; return op-ostart; } static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, ZSTD_CCtx_params params, U64 pledgedSrcSize, U32 dictID) { BYTE* const op = (BYTE*)dst; U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */ U32 const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */ U32 const checksumFlag = params.fParams.checksumFlag>0; U32 const windowSize = (U32)1 << params.cParams.windowLog; U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize); BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3); U32 const fcsCode = params.fParams.contentSizeFlag ? (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */ BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) ); size_t pos=0; assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)); if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall); DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u", !params.fParams.noDictIDFlag, dictID, dictIDSizeCode); if (params.format == ZSTD_f_zstd1) { MEM_writeLE32(dst, ZSTD_MAGICNUMBER); pos = 4; } op[pos++] = frameHeaderDecriptionByte; if (!singleSegment) op[pos++] = windowLogByte; switch(dictIDSizeCode) { default: assert(0); /* impossible */ case 0 : break; case 1 : op[pos] = (BYTE)(dictID); pos++; break; case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break; case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break; } switch(fcsCode) { default: assert(0); /* impossible */ case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break; case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break; case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break; case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break; } return pos; } /* ZSTD_writeLastEmptyBlock() : * output an empty Block with end-of-frame mark to complete a frame * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) * or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize) */ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity) { if (dstCapacity < ZSTD_blockHeaderSize) return ERROR(dstSize_tooSmall); { U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */ MEM_writeLE24(dst, cBlockHeader24); return ZSTD_blockHeaderSize; } } size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq) { if (cctx->stage != ZSTDcs_init) return ERROR(stage_wrong); if (cctx->appliedParams.ldmParams.enableLdm) return ERROR(parameter_unsupported); cctx->externSeqStore.seq = seq; cctx->externSeqStore.size = nbSeq; cctx->externSeqStore.capacity = nbSeq; cctx->externSeqStore.pos = 0; return 0; } static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 frame, U32 lastFrameChunk) { ZSTD_matchState_t* const ms = &cctx->blockState.matchState; size_t fhSize = 0; DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u", cctx->stage, (U32)srcSize); if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */ if (frame && (cctx->stage==ZSTDcs_init)) { fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, cctx->pledgedSrcSizePlusOne-1, cctx->dictID); if (ZSTD_isError(fhSize)) return fhSize; dstCapacity -= fhSize; dst = (char*)dst + fhSize; cctx->stage = ZSTDcs_ongoing; } if (!srcSize) return fhSize; /* do not generate an empty block if no input */ if (!ZSTD_window_update(&ms->window, src, srcSize)) { ms->nextToUpdate = ms->window.dictLimit; } if (cctx->appliedParams.ldmParams.enableLdm) { ZSTD_window_update(&cctx->ldmState.window, src, srcSize); } if (!frame) { /* overflow check and correction for block mode */ if (ZSTD_window_needOverflowCorrection(ms->window, (const char*)src + srcSize)) { U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy); U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, 1 << cctx->appliedParams.cParams.windowLog, src); ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30); ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30); ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); ZSTD_reduceIndex(cctx, correction); if (ms->nextToUpdate < correction) ms->nextToUpdate = 0; else ms->nextToUpdate -= correction; ms->loadedDictEnd = 0; ms->dictMatchState = NULL; } } DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (U32)cctx->blockSize); { size_t const cSize = frame ? ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize); if (ZSTD_isError(cSize)) return cSize; cctx->consumedSrcSize += srcSize; cctx->producedCSize += (cSize + fhSize); assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); if (cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne) { DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize >= %u", (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize); return ERROR(srcSize_wrong); } } return cSize + fhSize; } } size_t ZSTD_compressContinue (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (U32)srcSize); return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */); } size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) { ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams; assert(!ZSTD_checkCParams(cParams)); return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog); } size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t const blockSizeMax = ZSTD_getBlockSize(cctx); if (srcSize > blockSizeMax) return ERROR(srcSize_wrong); return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */); } /*! ZSTD_loadDictionaryContent() : * @return : 0, or an error code */ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const void* src, size_t srcSize, ZSTD_dictTableLoadMethod_e dtlm) { const BYTE* const ip = (const BYTE*) src; const BYTE* const iend = ip + srcSize; ZSTD_window_update(&ms->window, src, srcSize); ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); /* Assert that we the ms params match the params we're being given */ ZSTD_assertEqualCParams(params->cParams, ms->cParams); if (srcSize <= HASH_READ_SIZE) return 0; switch(params->cParams.strategy) { case ZSTD_fast: ZSTD_fillHashTable(ms, iend, dtlm); break; case ZSTD_dfast: ZSTD_fillDoubleHashTable(ms, iend, dtlm); break; case ZSTD_greedy: case ZSTD_lazy: case ZSTD_lazy2: if (srcSize >= HASH_READ_SIZE) ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE); break; case ZSTD_btlazy2: /* we want the dictionary table fully sorted */ case ZSTD_btopt: case ZSTD_btultra: if (srcSize >= HASH_READ_SIZE) ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend); break; default: assert(0); /* not possible : not a valid strategy id */ } ms->nextToUpdate = (U32)(iend - ms->window.base); return 0; } /* Dictionaries that assign zero probability to symbols that show up causes problems when FSE encoding. Refuse dictionaries that assign zero probability to symbols that we may encounter during compression. NOTE: This behavior is not standard and could be improved in the future. */ static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) { U32 s; if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted); for (s = 0; s <= maxSymbolValue; ++s) { if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted); } return 0; } /* Dictionary format : * See : * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format */ /*! ZSTD_loadZstdDictionary() : * @return : dictID, or an error code * assumptions : magic number supposed already checked * dictSize supposed > 8 */ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const void* dict, size_t dictSize, ZSTD_dictTableLoadMethod_e dtlm, void* workspace) { const BYTE* dictPtr = (const BYTE*)dict; const BYTE* const dictEnd = dictPtr + dictSize; short offcodeNCount[MaxOff+1]; unsigned offcodeMaxValue = MaxOff; size_t dictID; ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog))); assert(dictSize > 8); assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY); dictPtr += 4; /* skip magic number */ dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr); dictPtr += 4; { unsigned maxSymbolValue = 255; size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr); if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted); if (maxSymbolValue < 255) return ERROR(dictionary_corrupted); dictPtr += hufHeaderSize; } { unsigned offcodeLog; size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted); /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */ /* fill all offset symbols to avoid garbage at end of table */ CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.offcodeCTable, offcodeNCount, MaxOff, offcodeLog, workspace, HUF_WORKSPACE_SIZE), dictionary_corrupted); dictPtr += offcodeHeaderSize; } { short matchlengthNCount[MaxML+1]; unsigned matchlengthMaxValue = MaxML, matchlengthLog; size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted); /* Every match length code must have non-zero probability */ CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML)); CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, workspace, HUF_WORKSPACE_SIZE), dictionary_corrupted); dictPtr += matchlengthHeaderSize; } { short litlengthNCount[MaxLL+1]; unsigned litlengthMaxValue = MaxLL, litlengthLog; size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted); /* Every literal length code must have non-zero probability */ CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL)); CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, workspace, HUF_WORKSPACE_SIZE), dictionary_corrupted); dictPtr += litlengthHeaderSize; } if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted); bs->rep[0] = MEM_readLE32(dictPtr+0); bs->rep[1] = MEM_readLE32(dictPtr+4); bs->rep[2] = MEM_readLE32(dictPtr+8); dictPtr += 12; { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); U32 offcodeMax = MaxOff; if (dictContentSize <= ((U32)-1) - 128 KB) { U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */ } /* All offset values <= dictContentSize + 128 KB must be representable */ CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff))); /* All repCodes must be <= dictContentSize and != 0*/ { U32 u; for (u=0; u<3; u++) { if (bs->rep[u] == 0) return ERROR(dictionary_corrupted); if (bs->rep[u] > dictContentSize) return ERROR(dictionary_corrupted); } } bs->entropy.huf.repeatMode = HUF_repeat_valid; bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid; bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid; bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid; CHECK_F(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm)); return dictID; } } /** ZSTD_compress_insertDictionary() : * @return : dictID, or an error code */ static size_t ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms, const ZSTD_CCtx_params* params, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, void* workspace) { DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize); if ((dict==NULL) || (dictSize<=8)) return 0; ZSTD_reset_compressedBlockState(bs); /* dict restricted modes */ if (dictContentType == ZSTD_dct_rawContent) return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm); if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) { if (dictContentType == ZSTD_dct_auto) { DEBUGLOG(4, "raw content dictionary detected"); return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm); } if (dictContentType == ZSTD_dct_fullDict) return ERROR(dictionary_wrong); assert(0); /* impossible */ } /* dict as full zstd dictionary */ return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, dtlm, workspace); } /*! ZSTD_compressBegin_internal() : * @return : 0, or an error code */ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params.cParams.windowLog); /* params are supposed to be fully validated at this point */ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); assert(!((dict) && (cdict))); /* either dict or cdict, not both */ if (cdict && cdict->dictContentSize>0) { return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff); } CHECK_F( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, ZSTDcrp_continue, zbuff) ); { size_t const dictID = ZSTD_compress_insertDictionary( cctx->blockState.prevCBlock, &cctx->blockState.matchState, &params, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace); if (ZSTD_isError(dictID)) return dictID; assert(dictID <= (size_t)(U32)-1); cctx->dictID = (U32)dictID; } return 0; } size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog); /* compression parameters verification and optimization */ CHECK_F( ZSTD_checkCParams(params.cParams) ); return ZSTD_compressBegin_internal(cctx, dict, dictSize, dictContentType, dtlm, cdict, params, pledgedSrcSize, ZSTDb_not_buffered); } /*! ZSTD_compressBegin_advanced() : * @return : 0, or an error code */ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) { ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params); return ZSTD_compressBegin_advanced_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL /*cdict*/, cctxParams, pledgedSrcSize); } size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) { ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize); ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params); DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (U32)dictSize); return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered); } size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel) { return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); } /*! ZSTD_writeEpilogue() : * Ends a frame. * @return : nb of bytes written into dst (or an error code) */ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) { BYTE* const ostart = (BYTE*)dst; BYTE* op = ostart; size_t fhSize = 0; DEBUGLOG(4, "ZSTD_writeEpilogue"); if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong); /* init missing */ /* special case : empty frame */ if (cctx->stage == ZSTDcs_init) { fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0); if (ZSTD_isError(fhSize)) return fhSize; dstCapacity -= fhSize; op += fhSize; cctx->stage = ZSTDcs_ongoing; } if (cctx->stage != ZSTDcs_ending) { /* write one last empty block, make it the "last" block */ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0; if (dstCapacity<4) return ERROR(dstSize_tooSmall); MEM_writeLE32(op, cBlockHeader24); op += ZSTD_blockHeaderSize; dstCapacity -= ZSTD_blockHeaderSize; } if (cctx->appliedParams.fParams.checksumFlag) { U32 const checksum = (U32) XXH64_digest(&cctx->xxhState); if (dstCapacity<4) return ERROR(dstSize_tooSmall); DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", checksum); MEM_writeLE32(op, checksum); op += 4; } cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ return op-ostart; } size_t ZSTD_compressEnd (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t endResult; size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 1 /* last chunk */); if (ZSTD_isError(cSize)) return cSize; endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize); if (ZSTD_isError(endResult)) return endResult; assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); DEBUGLOG(4, "end of frame : controlling src size"); if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) { DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u", (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize); return ERROR(srcSize_wrong); } } return cSize + endResult; } static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, ZSTD_parameters params) { ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params); DEBUGLOG(4, "ZSTD_compress_internal"); return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, cctxParams); } size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, ZSTD_parameters params) { DEBUGLOG(4, "ZSTD_compress_advanced"); CHECK_F(ZSTD_checkCParams(params.cParams)); return ZSTD_compress_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, params); } /* Internal */ size_t ZSTD_compress_advanced_internal( ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, ZSTD_CCtx_params params) { DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (U32)srcSize); CHECK_F( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, params, srcSize, ZSTDb_not_buffered) ); return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); } size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, int compressionLevel) { ZSTD_parameters const params = ZSTD_getParams(compressionLevel, srcSize + (!srcSize), dict ? dictSize : 0); ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params); assert(params.fParams.contentSizeFlag == 1); return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, cctxParams); } size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) { DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (U32)srcSize); assert(cctx != NULL); return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel); } size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) { size_t result; ZSTD_CCtx ctxBody; ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem); result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel); ZSTD_freeCCtxContent(&ctxBody); /* can't free ctxBody itself, as it's on stack; free only heap content */ return result; } /* ===== Dictionary API ===== */ /*! ZSTD_estimateCDictSize_advanced() : * Estimate amount of memory that will be needed to create a dictionary with following arguments */ size_t ZSTD_estimateCDictSize_advanced( size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod) { DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (U32)sizeof(ZSTD_CDict)); return sizeof(ZSTD_CDict) + HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize); } size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel) { ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, dictSize); return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy); } size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict) { if (cdict==NULL) return 0; /* support sizeof on NULL */ DEBUGLOG(5, "sizeof(*cdict) : %u", (U32)sizeof(*cdict)); return cdict->workspaceSize + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict); } static size_t ZSTD_initCDict_internal( ZSTD_CDict* cdict, const void* dictBuffer, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams) { DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (U32)dictContentType); assert(!ZSTD_checkCParams(cParams)); cdict->matchState.cParams = cParams; if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) { cdict->dictBuffer = NULL; cdict->dictContent = dictBuffer; } else { void* const internalBuffer = ZSTD_malloc(dictSize, cdict->customMem); cdict->dictBuffer = internalBuffer; cdict->dictContent = internalBuffer; if (!internalBuffer) return ERROR(memory_allocation); memcpy(internalBuffer, dictBuffer, dictSize); } cdict->dictContentSize = dictSize; /* Reset the state to no dictionary */ ZSTD_reset_compressedBlockState(&cdict->cBlockState); { void* const end = ZSTD_reset_matchState( &cdict->matchState, (U32*)cdict->workspace + HUF_WORKSPACE_SIZE_U32, &cParams, ZSTDcrp_continue, /* forCCtx */ 0); assert(end == (char*)cdict->workspace + cdict->workspaceSize); (void)end; } /* (Maybe) load the dictionary * Skips loading the dictionary if it is <= 8 bytes. */ { ZSTD_CCtx_params params; memset(&params, 0, sizeof(params)); params.compressionLevel = ZSTD_CLEVEL_DEFAULT; params.fParams.contentSizeFlag = 1; params.cParams = cParams; { size_t const dictID = ZSTD_compress_insertDictionary( &cdict->cBlockState, &cdict->matchState, &params, cdict->dictContent, cdict->dictContentSize, dictContentType, ZSTD_dtlm_full, cdict->workspace); if (ZSTD_isError(dictID)) return dictID; assert(dictID <= (size_t)(U32)-1); cdict->dictID = (U32)dictID; } } return 0; } ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams, ZSTD_customMem customMem) { DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (U32)dictContentType); if (!customMem.customAlloc ^ !customMem.customFree) return NULL; { ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem); size_t const workspaceSize = HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0); void* const workspace = ZSTD_malloc(workspaceSize, customMem); if (!cdict || !workspace) { ZSTD_free(cdict, customMem); ZSTD_free(workspace, customMem); return NULL; } cdict->customMem = customMem; cdict->workspace = workspace; cdict->workspaceSize = workspaceSize; if (ZSTD_isError( ZSTD_initCDict_internal(cdict, dictBuffer, dictSize, dictLoadMethod, dictContentType, cParams) )) { ZSTD_freeCDict(cdict); return NULL; } return cdict; } } ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel) { ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize); return ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, cParams, ZSTD_defaultCMem); } ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel) { ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize); return ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, cParams, ZSTD_defaultCMem); } size_t ZSTD_freeCDict(ZSTD_CDict* cdict) { if (cdict==NULL) return 0; /* support free on NULL */ { ZSTD_customMem const cMem = cdict->customMem; ZSTD_free(cdict->workspace, cMem); ZSTD_free(cdict->dictBuffer, cMem); ZSTD_free(cdict, cMem); return 0; } } /*! ZSTD_initStaticCDict_advanced() : * Generate a digested dictionary in provided memory area. * workspace: The memory area to emplace the dictionary into. * Provided pointer must 8-bytes aligned. * It must outlive dictionary usage. * workspaceSize: Use ZSTD_estimateCDictSize() * to determine how large workspace must be. * cParams : use ZSTD_getCParams() to transform a compression level * into its relevants cParams. * @return : pointer to ZSTD_CDict*, or NULL if error (size too small) * Note : there is no corresponding "free" function. * Since workspace was allocated externally, it must be freed externally. */ const ZSTD_CDict* ZSTD_initStaticCDict( void* workspace, size_t workspaceSize, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams) { size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0); size_t const neededSize = sizeof(ZSTD_CDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize) + HUF_WORKSPACE_SIZE + matchStateSize; ZSTD_CDict* const cdict = (ZSTD_CDict*) workspace; void* ptr; if ((size_t)workspace & 7) return NULL; /* 8-aligned */ DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u", (U32)workspaceSize, (U32)neededSize, (U32)(workspaceSize < neededSize)); if (workspaceSize < neededSize) return NULL; if (dictLoadMethod == ZSTD_dlm_byCopy) { memcpy(cdict+1, dict, dictSize); dict = cdict+1; ptr = (char*)workspace + sizeof(ZSTD_CDict) + dictSize; } else { ptr = cdict+1; } cdict->workspace = ptr; cdict->workspaceSize = HUF_WORKSPACE_SIZE + matchStateSize; if (ZSTD_isError( ZSTD_initCDict_internal(cdict, dict, dictSize, ZSTD_dlm_byRef, dictContentType, cParams) )) return NULL; return cdict; } ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict) { assert(cdict != NULL); return cdict->matchState.cParams; } /* ZSTD_compressBegin_usingCDict_advanced() : * cdict must be != NULL */ size_t ZSTD_compressBegin_usingCDict_advanced( ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) { DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced"); if (cdict==NULL) return ERROR(dictionary_wrong); { ZSTD_CCtx_params params = cctx->requestedParams; params.cParams = ZSTD_getCParamsFromCDict(cdict); /* Increase window log to fit the entire dictionary and source if the * source size is known. Limit the increase to 19, which is the * window log for compression level 1 with the largest source size. */ if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) { U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19); U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1; params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog); } params.fParams = fParams; return ZSTD_compressBegin_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, cdict, params, pledgedSrcSize, ZSTDb_not_buffered); } } /* ZSTD_compressBegin_usingCDict() : * pledgedSrcSize=0 means "unknown" * if pledgedSrcSize>0, it will enable contentSizeFlag */ size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) { ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag); return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN); } size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) { CHECK_F (ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize)); /* will check if cdict != NULL */ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); } /*! ZSTD_compress_usingCDict() : * Compression using a digested Dictionary. * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. * Note that compression parameters are decided at CDict creation time * while frame parameters are hardcoded */ size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict) { ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); } /* ****************************************************************** * Streaming ********************************************************************/ ZSTD_CStream* ZSTD_createCStream(void) { DEBUGLOG(3, "ZSTD_createCStream"); return ZSTD_createCStream_advanced(ZSTD_defaultCMem); } ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize) { return ZSTD_initStaticCCtx(workspace, workspaceSize); } ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem) { /* CStream and CCtx are now same object */ return ZSTD_createCCtx_advanced(customMem); } size_t ZSTD_freeCStream(ZSTD_CStream* zcs) { return ZSTD_freeCCtx(zcs); /* same object */ } /*====== Initialization ======*/ size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; } size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; } static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx, const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType, const ZSTD_CDict* const cdict, ZSTD_CCtx_params params, unsigned long long const pledgedSrcSize) { DEBUGLOG(4, "ZSTD_resetCStream_internal"); /* Finalize the compression parameters */ params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize); /* params are supposed to be fully validated at this point */ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); assert(!((dict) && (cdict))); /* either dict or cdict, not both */ CHECK_F( ZSTD_compressBegin_internal(cctx, dict, dictSize, dictContentType, ZSTD_dtlm_fast, cdict, params, pledgedSrcSize, ZSTDb_buffered) ); cctx->inToCompress = 0; cctx->inBuffPos = 0; cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize); /* for small input: avoid automatic flush on reaching end of block, since it would require to add a 3-bytes null block to end frame */ cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0; cctx->streamStage = zcss_load; cctx->frameEnded = 0; return 0; /* ready to go */ } /* ZSTD_resetCStream(): * pledgedSrcSize == 0 means "unknown" */ size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize) { ZSTD_CCtx_params params = zcs->requestedParams; DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (U32)pledgedSrcSize); if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; params.fParams.contentSizeFlag = 1; return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize); } /*! ZSTD_initCStream_internal() : * Note : for lib/compress only. Used by zstdmt_compress.c. * Assumption 1 : params are valid * Assumption 2 : either dict, or cdict, is defined, not both */ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, const void* dict, size_t dictSize, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_initCStream_internal"); params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize); assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); assert(!((dict) && (cdict))); /* either dict or cdict, not both */ if (dict && dictSize >= 8) { DEBUGLOG(4, "loading dictionary of size %u", (U32)dictSize); if (zcs->staticSize) { /* static CCtx : never uses malloc */ /* incompatible with internal cdict creation */ return ERROR(memory_allocation); } ZSTD_freeCDict(zcs->cdictLocal); zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, params.cParams, zcs->customMem); zcs->cdict = zcs->cdictLocal; if (zcs->cdictLocal == NULL) return ERROR(memory_allocation); } else { if (cdict) { params.cParams = ZSTD_getCParamsFromCDict(cdict); /* cParams are enforced from cdict; it includes windowLog */ } ZSTD_freeCDict(zcs->cdictLocal); zcs->cdictLocal = NULL; zcs->cdict = cdict; } return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize); } /* ZSTD_initCStream_usingCDict_advanced() : * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced"); if (!cdict) return ERROR(dictionary_wrong); /* cannot handle NULL cdict (does not know what to do) */ { ZSTD_CCtx_params params = zcs->requestedParams; params.cParams = ZSTD_getCParamsFromCDict(cdict); params.fParams = fParams; return ZSTD_initCStream_internal(zcs, NULL, 0, cdict, params, pledgedSrcSize); } } /* note : cdict must outlive compression session */ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict) { ZSTD_frameParameters const fParams = { 0 /* contentSizeFlag */, 0 /* checksum */, 0 /* hideDictID */ }; DEBUGLOG(4, "ZSTD_initCStream_usingCDict"); return ZSTD_initCStream_usingCDict_advanced(zcs, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN); /* note : will check that cdict != NULL */ } /* ZSTD_initCStream_advanced() : * pledgedSrcSize must be exact. * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. * dict is loaded with default parameters ZSTD_dm_auto and ZSTD_dlm_byCopy. */ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_initCStream_advanced: pledgedSrcSize=%u, flag=%u", (U32)pledgedSrcSize, params.fParams.contentSizeFlag); CHECK_F( ZSTD_checkCParams(params.cParams) ); if ((pledgedSrcSize==0) && (params.fParams.contentSizeFlag==0)) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* for compatibility with older programs relying on this behavior. Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. This line will be removed in the future. */ zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params); return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL /*cdict*/, zcs->requestedParams, pledgedSrcSize); } size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel) { ZSTD_CCtxParams_init(&zcs->requestedParams, compressionLevel); return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, zcs->requestedParams, ZSTD_CONTENTSIZE_UNKNOWN); } size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss) { U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; /* temporary : 0 interpreted as "unknown" during transition period. Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. `0` will be interpreted as "empty" in the future */ ZSTD_CCtxParams_init(&zcs->requestedParams, compressionLevel); return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, zcs->requestedParams, pledgedSrcSize); } size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel) { DEBUGLOG(4, "ZSTD_initCStream"); return ZSTD_initCStream_srcSize(zcs, compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN); } /*====== Compression ======*/ MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t const length = MIN(dstCapacity, srcSize); if (length) memcpy(dst, src, length); return length; } /** ZSTD_compressStream_generic(): * internal function for all *compressStream*() variants and *compress_generic() * non-static, because can be called from zstdmt_compress.c * @return : hint size for next input */ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective const flushMode) { const char* const istart = (const char*)input->src; const char* const iend = istart + input->size; const char* ip = istart + input->pos; char* const ostart = (char*)output->dst; char* const oend = ostart + output->size; char* op = ostart + output->pos; U32 someMoreWork = 1; /* check expectations */ DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (U32)flushMode); assert(zcs->inBuff != NULL); assert(zcs->inBuffSize > 0); assert(zcs->outBuff != NULL); assert(zcs->outBuffSize > 0); assert(output->pos <= output->size); assert(input->pos <= input->size); while (someMoreWork) { switch(zcs->streamStage) { case zcss_init: /* call ZSTD_initCStream() first ! */ return ERROR(init_missing); case zcss_load: if ( (flushMode == ZSTD_e_end) && ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip)) /* enough dstCapacity */ && (zcs->inBuffPos == 0) ) { /* shortcut to compression pass directly into output buffer */ size_t const cSize = ZSTD_compressEnd(zcs, op, oend-op, ip, iend-ip); DEBUGLOG(4, "ZSTD_compressEnd : %u", (U32)cSize); if (ZSTD_isError(cSize)) return cSize; ip = iend; op += cSize; zcs->frameEnded = 1; ZSTD_CCtx_reset(zcs); someMoreWork = 0; break; } /* complete loading into inBuffer */ { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; size_t const loaded = ZSTD_limitCopy( zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend-ip); zcs->inBuffPos += loaded; ip += loaded; if ( (flushMode == ZSTD_e_continue) && (zcs->inBuffPos < zcs->inBuffTarget) ) { /* not enough input to fill full block : stop here */ someMoreWork = 0; break; } if ( (flushMode == ZSTD_e_flush) && (zcs->inBuffPos == zcs->inToCompress) ) { /* empty */ someMoreWork = 0; break; } } /* compress current block (note : this stage cannot be stopped in the middle) */ DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode); { void* cDst; size_t cSize; size_t const iSize = zcs->inBuffPos - zcs->inToCompress; size_t oSize = oend-op; unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend); if (oSize >= ZSTD_compressBound(iSize)) cDst = op; /* compress into output buffer, to skip flush stage */ else cDst = zcs->outBuff, oSize = zcs->outBuffSize; cSize = lastBlock ? ZSTD_compressEnd(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) : ZSTD_compressContinue(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize); if (ZSTD_isError(cSize)) return cSize; zcs->frameEnded = lastBlock; /* prepare next block */ zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; if (zcs->inBuffTarget > zcs->inBuffSize) zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u", (U32)zcs->inBuffTarget, (U32)zcs->inBuffSize); if (!lastBlock) assert(zcs->inBuffTarget <= zcs->inBuffSize); zcs->inToCompress = zcs->inBuffPos; if (cDst == op) { /* no need to flush */ op += cSize; if (zcs->frameEnded) { DEBUGLOG(5, "Frame completed directly in outBuffer"); someMoreWork = 0; ZSTD_CCtx_reset(zcs); } break; } zcs->outBuffContentSize = cSize; zcs->outBuffFlushedSize = 0; zcs->streamStage = zcss_flush; /* pass-through to flush stage */ } /* fall-through */ case zcss_flush: DEBUGLOG(5, "flush stage"); { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u", (U32)toFlush, (U32)(oend-op), (U32)flushed); op += flushed; zcs->outBuffFlushedSize += flushed; if (toFlush!=flushed) { /* flush not fully completed, presumably because dst is too small */ assert(op==oend); someMoreWork = 0; break; } zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; if (zcs->frameEnded) { DEBUGLOG(5, "Frame completed on flush"); someMoreWork = 0; ZSTD_CCtx_reset(zcs); break; } zcs->streamStage = zcss_load; break; } default: /* impossible */ assert(0); } } input->pos = ip - istart; output->pos = op - ostart; if (zcs->frameEnded) return 0; { size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos; if (hintInSize==0) hintInSize = zcs->blockSize; return hintInSize; } } size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { /* check conditions */ if (output->pos > output->size) return ERROR(GENERIC); if (input->pos > input->size) return ERROR(GENERIC); return ZSTD_compressStream_generic(zcs, output, input, ZSTD_e_continue); } size_t ZSTD_compress_generic (ZSTD_CCtx* cctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective endOp) { DEBUGLOG(5, "ZSTD_compress_generic, endOp=%u ", (U32)endOp); /* check conditions */ if (output->pos > output->size) return ERROR(GENERIC); if (input->pos > input->size) return ERROR(GENERIC); assert(cctx!=NULL); /* transparent initialization stage */ if (cctx->streamStage == zcss_init) { ZSTD_CCtx_params params = cctx->requestedParams; ZSTD_prefixDict const prefixDict = cctx->prefixDict; memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */ assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */ DEBUGLOG(4, "ZSTD_compress_generic : transparent init stage"); if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1; /* auto-fix pledgedSrcSize */ params.cParams = ZSTD_getCParamsFromCCtxParams( &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/); #ifdef ZSTD_MULTITHREAD if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) { params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */ } if (params.nbWorkers > 0) { /* mt context creation */ if (cctx->mtctx == NULL) { DEBUGLOG(4, "ZSTD_compress_generic: creating new mtctx for nbWorkers=%u", params.nbWorkers); cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem); if (cctx->mtctx == NULL) return ERROR(memory_allocation); } /* mt compression */ DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers); CHECK_F( ZSTDMT_initCStream_internal( cctx->mtctx, prefixDict.dict, prefixDict.dictSize, ZSTD_dct_rawContent, cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) ); cctx->streamStage = zcss_load; cctx->appliedParams.nbWorkers = params.nbWorkers; } else #endif { CHECK_F( ZSTD_resetCStream_internal(cctx, prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) ); assert(cctx->streamStage == zcss_load); assert(cctx->appliedParams.nbWorkers == 0); } } /* compression stage */ #ifdef ZSTD_MULTITHREAD if (cctx->appliedParams.nbWorkers > 0) { if (cctx->cParamsChanged) { ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams); cctx->cParamsChanged = 0; } { size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp); if ( ZSTD_isError(flushMin) || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */ ZSTD_CCtx_reset(cctx); } DEBUGLOG(5, "completed ZSTD_compress_generic delegating to ZSTDMT_compressStream_generic"); return flushMin; } } #endif CHECK_F( ZSTD_compressStream_generic(cctx, output, input, endOp) ); DEBUGLOG(5, "completed ZSTD_compress_generic"); return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */ } size_t ZSTD_compress_generic_simpleArgs ( ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos, ZSTD_EndDirective endOp) { ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; ZSTD_inBuffer input = { src, srcSize, *srcPos }; /* ZSTD_compress_generic() will check validity of dstPos and srcPos */ size_t const cErr = ZSTD_compress_generic(cctx, &output, &input, endOp); *dstPos = output.pos; *srcPos = input.pos; return cErr; } /*====== Finalize ======*/ /*! ZSTD_flushStream() : * @return : amount of data remaining to flush */ size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { ZSTD_inBuffer input = { NULL, 0, 0 }; if (output->pos > output->size) return ERROR(GENERIC); CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_flush) ); return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */ } size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { ZSTD_inBuffer input = { NULL, 0, 0 }; if (output->pos > output->size) return ERROR(GENERIC); CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_end) ); { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE; size_t const checksumSize = zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4; size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize + lastBlockSize + checksumSize; DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (U32)toFlush); return toFlush; } } /*-===== Pre-defined compression levels =====-*/ #define ZSTD_MAX_CLEVEL 22 int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; } static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = { { /* "default" - guarantees a monotonically increasing memory budget */ /* W, C, H, S, L, TL, strat */ { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */ { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */ { 19, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */ { 20, 16, 17, 1, 5, 1, ZSTD_dfast }, /* level 3 */ { 20, 18, 18, 1, 5, 1, ZSTD_dfast }, /* level 4 */ { 20, 18, 18, 2, 5, 2, ZSTD_greedy }, /* level 5 */ { 21, 18, 19, 2, 5, 4, ZSTD_lazy }, /* level 6 */ { 21, 18, 19, 3, 5, 8, ZSTD_lazy2 }, /* level 7 */ { 21, 19, 19, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */ { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */ { 21, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */ { 21, 21, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */ { 22, 20, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */ { 22, 21, 22, 4, 5, 32, ZSTD_btlazy2 }, /* level 13 */ { 22, 21, 22, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */ { 22, 22, 22, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */ { 22, 21, 22, 4, 5, 48, ZSTD_btopt }, /* level 16 */ { 23, 22, 22, 4, 4, 64, ZSTD_btopt }, /* level 17 */ { 23, 23, 22, 6, 3,256, ZSTD_btopt }, /* level 18 */ { 23, 24, 22, 7, 3,256, ZSTD_btultra }, /* level 19 */ { 25, 25, 23, 7, 3,256, ZSTD_btultra }, /* level 20 */ { 26, 26, 24, 7, 3,512, ZSTD_btultra }, /* level 21 */ { 27, 27, 25, 9, 3,999, ZSTD_btultra }, /* level 22 */ }, { /* for srcSize <= 256 KB */ /* W, C, H, S, L, T, strat */ { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */ { 18, 14, 14, 1, 5, 1, ZSTD_dfast }, /* level 2 */ { 18, 16, 16, 1, 4, 1, ZSTD_dfast }, /* level 3 */ { 18, 16, 17, 2, 5, 2, ZSTD_greedy }, /* level 4.*/ { 18, 18, 18, 3, 5, 2, ZSTD_greedy }, /* level 5.*/ { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/ { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */ { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ { 18, 18, 19, 5, 4, 16, ZSTD_btlazy2 }, /* level 11.*/ { 18, 19, 19, 6, 4, 16, ZSTD_btlazy2 }, /* level 12.*/ { 18, 19, 19, 8, 4, 16, ZSTD_btlazy2 }, /* level 13 */ { 18, 18, 19, 4, 4, 24, ZSTD_btopt }, /* level 14.*/ { 18, 18, 19, 4, 3, 24, ZSTD_btopt }, /* level 15.*/ { 18, 19, 19, 6, 3, 64, ZSTD_btopt }, /* level 16.*/ { 18, 19, 19, 8, 3,128, ZSTD_btopt }, /* level 17.*/ { 18, 19, 19, 10, 3,256, ZSTD_btopt }, /* level 18.*/ { 18, 19, 19, 10, 3,256, ZSTD_btultra }, /* level 19.*/ { 18, 19, 19, 11, 3,512, ZSTD_btultra }, /* level 20.*/ { 18, 19, 19, 12, 3,512, ZSTD_btultra }, /* level 21.*/ { 18, 19, 19, 13, 3,999, ZSTD_btultra }, /* level 22.*/ }, { /* for srcSize <= 128 KB */ /* W, C, H, S, L, T, strat */ { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */ { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */ { 17, 15, 16, 2, 5, 1, ZSTD_dfast }, /* level 3 */ { 17, 17, 17, 2, 4, 1, ZSTD_dfast }, /* level 4 */ { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */ { 17, 17, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */ { 17, 17, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */ { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ { 17, 17, 17, 7, 4, 8, ZSTD_lazy2 }, /* level 11 */ { 17, 18, 17, 6, 4, 16, ZSTD_btlazy2 }, /* level 12 */ { 17, 18, 17, 8, 4, 16, ZSTD_btlazy2 }, /* level 13.*/ { 17, 18, 17, 4, 4, 32, ZSTD_btopt }, /* level 14.*/ { 17, 18, 17, 6, 3, 64, ZSTD_btopt }, /* level 15.*/ { 17, 18, 17, 7, 3,128, ZSTD_btopt }, /* level 16.*/ { 17, 18, 17, 7, 3,256, ZSTD_btopt }, /* level 17.*/ { 17, 18, 17, 8, 3,256, ZSTD_btopt }, /* level 18.*/ { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 19.*/ { 17, 18, 17, 9, 3,256, ZSTD_btultra }, /* level 20.*/ { 17, 18, 17, 10, 3,256, ZSTD_btultra }, /* level 21.*/ { 17, 18, 17, 11, 3,512, ZSTD_btultra }, /* level 22.*/ }, { /* for srcSize <= 16 KB */ /* W, C, H, S, L, T, strat */ { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */ { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */ { 14, 14, 14, 2, 4, 1, ZSTD_dfast }, /* level 3.*/ { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4.*/ { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/ { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */ { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */ { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/ { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/ { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/ { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/ { 14, 15, 14, 6, 3, 16, ZSTD_btopt }, /* level 12.*/ { 14, 15, 14, 6, 3, 24, ZSTD_btopt }, /* level 13.*/ { 14, 15, 15, 6, 3, 48, ZSTD_btopt }, /* level 14.*/ { 14, 15, 15, 6, 3, 64, ZSTD_btopt }, /* level 15.*/ { 14, 15, 15, 6, 3, 96, ZSTD_btopt }, /* level 16.*/ { 14, 15, 15, 6, 3,128, ZSTD_btopt }, /* level 17.*/ { 14, 15, 15, 8, 3,256, ZSTD_btopt }, /* level 18.*/ { 14, 15, 15, 6, 3,256, ZSTD_btultra }, /* level 19.*/ { 14, 15, 15, 8, 3,256, ZSTD_btultra }, /* level 20.*/ { 14, 15, 15, 9, 3,256, ZSTD_btultra }, /* level 21.*/ { 14, 15, 15, 10, 3,512, ZSTD_btultra }, /* level 22.*/ }, }; /*! ZSTD_getCParams() : * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. * Size values are optional, provide 0 if not known or unused */ ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { size_t const addedSize = srcSizeHint ? 0 : 500; U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : (U64)-1; U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */ int row = compressionLevel; DEBUGLOG(5, "ZSTD_getCParams (cLevel=%i)", compressionLevel); if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */ if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */ if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL; { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row]; if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel); /* acceleration factor */ return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize); } } /*! ZSTD_getParams() : * same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`). * All fields of `ZSTD_frameParameters` are set to default (0) */ ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { ZSTD_parameters params; ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSizeHint, dictSize); DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel); memset(&params, 0, sizeof(params)); params.cParams = cParams; params.fParams.contentSizeFlag = 1; return params; }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_840_0
crossvul-cpp_data_good_5829_0
/* * linux/ipc/shm.c * Copyright (C) 1992, 1993 Krishna Balasubramanian * Many improvements/fixes by Bruno Haible. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. * * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> * BIGMEM support, Andrea Arcangeli <andrea@suse.de> * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> * HIGHMEM support, Ingo Molnar <mingo@redhat.com> * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> * * support for audit of ipc object properties and permission changes * Dustin Kirkland <dustin.kirkland@us.ibm.com> * * namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> * * Better ipc lock (kern_ipc_perm.lock) handling * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013. */ #include <linux/slab.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/shm.h> #include <linux/init.h> #include <linux/file.h> #include <linux/mman.h> #include <linux/shmem_fs.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/capability.h> #include <linux/ptrace.h> #include <linux/seq_file.h> #include <linux/rwsem.h> #include <linux/nsproxy.h> #include <linux/mount.h> #include <linux/ipc_namespace.h> #include <asm/uaccess.h> #include "util.h" struct shm_file_data { int id; struct ipc_namespace *ns; struct file *file; const struct vm_operations_struct *vm_ops; }; #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) static const struct file_operations shm_file_operations; static const struct vm_operations_struct shm_vm_ops; #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) #define shm_unlock(shp) \ ipc_unlock(&(shp)->shm_perm) static int newseg(struct ipc_namespace *, struct ipc_params *); static void shm_open(struct vm_area_struct *vma); static void shm_close(struct vm_area_struct *vma); static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it); #endif void shm_init_ns(struct ipc_namespace *ns) { ns->shm_ctlmax = SHMMAX; ns->shm_ctlall = SHMALL; ns->shm_ctlmni = SHMMNI; ns->shm_rmid_forced = 0; ns->shm_tot = 0; ipc_init_ids(&shm_ids(ns)); } /* * Called with shm_ids.rwsem (writer) and the shp structure locked. * Only shm_ids.rwsem remains locked on exit. */ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_nattch){ shp->shm_perm.mode |= SHM_DEST; /* Do not find it any more */ shp->shm_perm.key = IPC_PRIVATE; shm_unlock(shp); } else shm_destroy(ns, shp); } #ifdef CONFIG_IPC_NS void shm_exit_ns(struct ipc_namespace *ns) { free_ipcs(ns, &shm_ids(ns), do_shm_rmid); idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); } #endif static int __init ipc_ns_init(void) { shm_init_ns(&init_ipc_ns); return 0; } pure_initcall(ipc_ns_init); void __init shm_init (void) { ipc_init_proc_interface("sysvipc/shm", #if BITS_PER_LONG <= 32 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", #else " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", #endif IPC_SHM_IDS, sysvipc_shm_proc_show); } static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct shmid_kernel, shm_perm); } static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct shmid_kernel, shm_perm); } /* * shm_lock_(check_) routines are called in the paths where the rwsem * is not necessarily held. */ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); if (IS_ERR(ipcp)) return (struct shmid_kernel *)ipcp; return container_of(ipcp, struct shmid_kernel, shm_perm); } static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) { rcu_read_lock(); ipc_lock_object(&ipcp->shm_perm); } static void shm_rcu_free(struct rcu_head *head) { struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); struct shmid_kernel *shp = ipc_rcu_to_struct(p); security_shm_free(shp); ipc_rcu_free(head); } static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) { ipc_rmid(&shm_ids(ns), &s->shm_perm); } /* This is called by fork, once for every shm attach. */ static void shm_open(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; shp = shm_lock(sfd->ns, sfd->id); BUG_ON(IS_ERR(shp)); shp->shm_atim = get_seconds(); shp->shm_lprid = task_tgid_vnr(current); shp->shm_nattch++; shm_unlock(shp); } /* * shm_destroy - free the struct shmid_kernel * * @ns: namespace * @shp: struct to free * * It has to be called with shp and shm_ids.rwsem (writer) locked, * but returns with shp unlocked and freed. */ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { struct file *shm_file; shm_file = shp->shm_file; shp->shm_file = NULL; ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; shm_rmid(ns, shp); shm_unlock(shp); if (!is_file_hugepages(shm_file)) shmem_lock(shm_file, 0, shp->mlock_user); else if (shp->mlock_user) user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user); fput(shm_file); ipc_rcu_putref(shp, shm_rcu_free); } /* * shm_may_destroy - identifies whether shm segment should be destroyed now * * Returns true if and only if there are no active users of the segment and * one of the following is true: * * 1) shmctl(id, IPC_RMID, NULL) was called for this shp * * 2) sysctl kernel.shm_rmid_forced is set to 1. */ static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { return (shp->shm_nattch == 0) && (ns->shm_rmid_forced || (shp->shm_perm.mode & SHM_DEST)); } /* * remove the attach descriptor vma. * free memory for segment if it is marked destroyed. * The descriptor has already been removed from the current->mm->mmap list * and will later be kfree()d. */ static void shm_close(struct vm_area_struct *vma) { struct file * file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; struct ipc_namespace *ns = sfd->ns; down_write(&shm_ids(ns).rwsem); /* remove from the list of attaches of the shm segment */ shp = shm_lock(ns, sfd->id); BUG_ON(IS_ERR(shp)); shp->shm_lprid = task_tgid_vnr(current); shp->shm_dtim = get_seconds(); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); up_write(&shm_ids(ns).rwsem); } /* Called with ns->shm_ids(ns).rwsem locked */ static int shm_try_destroy_current(int id, void *p, void *data) { struct ipc_namespace *ns = data; struct kern_ipc_perm *ipcp = p; struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_creator != current) return 0; /* * Mark it as orphaned to destroy the segment when * kernel.shm_rmid_forced is changed. * It is noop if the following shm_may_destroy() returns true. */ shp->shm_creator = NULL; /* * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID * is not set, it shouldn't be deleted here. */ if (!ns->shm_rmid_forced) return 0; if (shm_may_destroy(ns, shp)) { shm_lock_by_ptr(shp); shm_destroy(ns, shp); } return 0; } /* Called with ns->shm_ids(ns).rwsem locked */ static int shm_try_destroy_orphaned(int id, void *p, void *data) { struct ipc_namespace *ns = data; struct kern_ipc_perm *ipcp = p; struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); /* * We want to destroy segments without users and with already * exit'ed originating process. * * As shp->* are changed under rwsem, it's safe to skip shp locking. */ if (shp->shm_creator != NULL) return 0; if (shm_may_destroy(ns, shp)) { shm_lock_by_ptr(shp); shm_destroy(ns, shp); } return 0; } void shm_destroy_orphaned(struct ipc_namespace *ns) { down_write(&shm_ids(ns).rwsem); if (shm_ids(ns).in_use) idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); up_write(&shm_ids(ns).rwsem); } void exit_shm(struct task_struct *task) { struct ipc_namespace *ns = task->nsproxy->ipc_ns; if (shm_ids(ns).in_use == 0) return; /* Destroy all already created segments, but not mapped yet */ down_write(&shm_ids(ns).rwsem); if (shm_ids(ns).in_use) idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns); up_write(&shm_ids(ns).rwsem); } static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); return sfd->vm_ops->fault(vma, vmf); } #ifdef CONFIG_NUMA static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); int err = 0; if (sfd->vm_ops->set_policy) err = sfd->vm_ops->set_policy(vma, new); return err; } static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, unsigned long addr) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct mempolicy *pol = NULL; if (sfd->vm_ops->get_policy) pol = sfd->vm_ops->get_policy(vma, addr); else if (vma->vm_policy) pol = vma->vm_policy; return pol; } #endif static int shm_mmap(struct file * file, struct vm_area_struct * vma) { struct shm_file_data *sfd = shm_file_data(file); int ret; ret = sfd->file->f_op->mmap(sfd->file, vma); if (ret != 0) return ret; sfd->vm_ops = vma->vm_ops; #ifdef CONFIG_MMU BUG_ON(!sfd->vm_ops->fault); #endif vma->vm_ops = &shm_vm_ops; shm_open(vma); return ret; } static int shm_release(struct inode *ino, struct file *file) { struct shm_file_data *sfd = shm_file_data(file); put_ipc_ns(sfd->ns); shm_file_data(file) = NULL; kfree(sfd); return 0; } static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct shm_file_data *sfd = shm_file_data(file); if (!sfd->file->f_op->fsync) return -EINVAL; return sfd->file->f_op->fsync(sfd->file, start, end, datasync); } static long shm_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct shm_file_data *sfd = shm_file_data(file); if (!sfd->file->f_op->fallocate) return -EOPNOTSUPP; return sfd->file->f_op->fallocate(file, mode, offset, len); } static unsigned long shm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct shm_file_data *sfd = shm_file_data(file); return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, pgoff, flags); } static const struct file_operations shm_file_operations = { .mmap = shm_mmap, .fsync = shm_fsync, .release = shm_release, #ifndef CONFIG_MMU .get_unmapped_area = shm_get_unmapped_area, #endif .llseek = noop_llseek, .fallocate = shm_fallocate, }; static const struct file_operations shm_file_operations_huge = { .mmap = shm_mmap, .fsync = shm_fsync, .release = shm_release, .get_unmapped_area = shm_get_unmapped_area, .llseek = noop_llseek, .fallocate = shm_fallocate, }; int is_file_shm_hugepages(struct file *file) { return file->f_op == &shm_file_operations_huge; } static const struct vm_operations_struct shm_vm_ops = { .open = shm_open, /* callback for a new vm-area open */ .close = shm_close, /* callback for when the vm-area is released */ .fault = shm_fault, #if defined(CONFIG_NUMA) .set_policy = shm_set_policy, .get_policy = shm_get_policy, #endif }; /** * newseg - Create a new shared memory segment * @ns: namespace * @params: ptr to the structure that contains key, size and shmflg * * Called with shm_ids.rwsem held as a writer. */ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) { key_t key = params->key; int shmflg = params->flg; size_t size = params->u.size; int error; struct shmid_kernel *shp; size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; struct file * file; char name[13]; int id; vm_flags_t acctflag = 0; if (size < SHMMIN || size > ns->shm_ctlmax) return -EINVAL; if (ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; shp = ipc_rcu_alloc(sizeof(*shp)); if (!shp) return -ENOMEM; shp->shm_perm.key = key; shp->shm_perm.mode = (shmflg & S_IRWXUGO); shp->mlock_user = NULL; shp->shm_perm.security = NULL; error = security_shm_alloc(shp); if (error) { ipc_rcu_putref(shp, ipc_rcu_free); return error; } sprintf (name, "SYSV%08x", key); if (shmflg & SHM_HUGETLB) { struct hstate *hs; size_t hugesize; hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); if (!hs) { error = -EINVAL; goto no_file; } hugesize = ALIGN(size, huge_page_size(hs)); /* hugetlb_file_setup applies strict accounting */ if (shmflg & SHM_NORESERVE) acctflag = VM_NORESERVE; file = hugetlb_file_setup(name, hugesize, acctflag, &shp->mlock_user, HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); } else { /* * Do not allow no accounting for OVERCOMMIT_NEVER, even * if it's asked for. */ if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) acctflag = VM_NORESERVE; file = shmem_file_setup(name, size, acctflag); } error = PTR_ERR(file); if (IS_ERR(file)) goto no_file; id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); if (id < 0) { error = id; goto no_id; } shp->shm_cprid = task_tgid_vnr(current); shp->shm_lprid = 0; shp->shm_atim = shp->shm_dtim = 0; shp->shm_ctim = get_seconds(); shp->shm_segsz = size; shp->shm_nattch = 0; shp->shm_file = file; shp->shm_creator = current; /* * shmid gets reported as "inode#" in /proc/pid/maps. * proc-ps tools use this. Changing this will break them. */ file_inode(file)->i_ino = shp->shm_perm.id; ns->shm_tot += numpages; error = shp->shm_perm.id; ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); return error; no_id: if (is_file_hugepages(file) && shp->mlock_user) user_shm_unlock(size, shp->mlock_user); fput(file); no_file: ipc_rcu_putref(shp, shm_rcu_free); return error; } /* * Called with shm_ids.rwsem and ipcp locked. */ static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); return security_shm_associate(shp, shmflg); } /* * Called with shm_ids.rwsem and ipcp locked. */ static inline int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_segsz < params->u.size) return -EINVAL; return 0; } SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) { struct ipc_namespace *ns; struct ipc_ops shm_ops; struct ipc_params shm_params; ns = current->nsproxy->ipc_ns; shm_ops.getnew = newseg; shm_ops.associate = shm_security; shm_ops.more_checks = shm_more_checks; shm_params.key = key; shm_params.flg = shmflg; shm_params.u.size = size; return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); } static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) { switch(version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shmid_ds out; memset(&out, 0, sizeof(out)); ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); out.shm_segsz = in->shm_segsz; out.shm_atime = in->shm_atime; out.shm_dtime = in->shm_dtime; out.shm_ctime = in->shm_ctime; out.shm_cpid = in->shm_cpid; out.shm_lpid = in->shm_lpid; out.shm_nattch = in->shm_nattch; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } static inline unsigned long copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) { switch(version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; return 0; case IPC_OLD: { struct shmid_ds tbuf_old; if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; out->shm_perm.uid = tbuf_old.shm_perm.uid; out->shm_perm.gid = tbuf_old.shm_perm.gid; out->shm_perm.mode = tbuf_old.shm_perm.mode; return 0; } default: return -EINVAL; } } static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) { switch(version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shminfo out; if(in->shmmax > INT_MAX) out.shmmax = INT_MAX; else out.shmmax = (int)in->shmmax; out.shmmin = in->shmmin; out.shmmni = in->shmmni; out.shmseg = in->shmseg; out.shmall = in->shmall; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } /* * Calculate and add used RSS and swap pages of a shm. * Called with shm_ids.rwsem held as a reader */ static void shm_add_rss_swap(struct shmid_kernel *shp, unsigned long *rss_add, unsigned long *swp_add) { struct inode *inode; inode = file_inode(shp->shm_file); if (is_file_hugepages(shp->shm_file)) { struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_file(shp->shm_file); *rss_add += pages_per_huge_page(h) * mapping->nrpages; } else { #ifdef CONFIG_SHMEM struct shmem_inode_info *info = SHMEM_I(inode); spin_lock(&info->lock); *rss_add += inode->i_mapping->nrpages; *swp_add += info->swapped; spin_unlock(&info->lock); #else *rss_add += inode->i_mapping->nrpages; #endif } } /* * Called with shm_ids.rwsem held as a reader */ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, unsigned long *swp) { int next_id; int total, in_use; *rss = 0; *swp = 0; in_use = shm_ids(ns).in_use; for (total = 0, next_id = 0; total < in_use; next_id++) { struct kern_ipc_perm *ipc; struct shmid_kernel *shp; ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); if (ipc == NULL) continue; shp = container_of(ipc, struct shmid_kernel, shm_perm); shm_add_rss_swap(shp, rss, swp); total++; } } /* * This function handles some shmctl commands which require the rwsem * to be held in write mode. * NOTE: no locks must be held, the rwsem is taken inside this function. */ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, struct shmid_ds __user *buf, int version) { struct kern_ipc_perm *ipcp; struct shmid64_ds shmid64; struct shmid_kernel *shp; int err; if (cmd == IPC_SET) { if (copy_shmid_from_user(&shmid64, buf, version)) return -EFAULT; } down_write(&shm_ids(ns).rwsem); rcu_read_lock(); ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0); if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); goto out_unlock1; } shp = container_of(ipcp, struct shmid_kernel, shm_perm); err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock1; switch (cmd) { case IPC_RMID: ipc_lock_object(&shp->shm_perm); /* do_shm_rmid unlocks the ipc object and rcu */ do_shm_rmid(ns, ipcp); goto out_up; case IPC_SET: ipc_lock_object(&shp->shm_perm); err = ipc_update_perm(&shmid64.shm_perm, ipcp); if (err) goto out_unlock0; shp->shm_ctim = get_seconds(); break; default: err = -EINVAL; goto out_unlock1; } out_unlock0: ipc_unlock_object(&shp->shm_perm); out_unlock1: rcu_read_unlock(); out_up: up_write(&shm_ids(ns).rwsem); return err; } static int shmctl_nolock(struct ipc_namespace *ns, int shmid, int cmd, int version, void __user *buf) { int err; struct shmid_kernel *shp; /* preliminary security checks for *_INFO */ if (cmd == IPC_INFO || cmd == SHM_INFO) { err = security_shm_shmctl(NULL, cmd); if (err) return err; } switch (cmd) { case IPC_INFO: { struct shminfo64 shminfo; memset(&shminfo, 0, sizeof(shminfo)); shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; shminfo.shmmax = ns->shm_ctlmax; shminfo.shmall = ns->shm_ctlall; shminfo.shmmin = SHMMIN; if(copy_shminfo_to_user (buf, &shminfo, version)) return -EFAULT; down_read(&shm_ids(ns).rwsem); err = ipc_get_maxid(&shm_ids(ns)); up_read(&shm_ids(ns).rwsem); if(err<0) err = 0; goto out; } case SHM_INFO: { struct shm_info shm_info; memset(&shm_info, 0, sizeof(shm_info)); down_read(&shm_ids(ns).rwsem); shm_info.used_ids = shm_ids(ns).in_use; shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); shm_info.shm_tot = ns->shm_tot; shm_info.swap_attempts = 0; shm_info.swap_successes = 0; err = ipc_get_maxid(&shm_ids(ns)); up_read(&shm_ids(ns).rwsem); if (copy_to_user(buf, &shm_info, sizeof(shm_info))) { err = -EFAULT; goto out; } err = err < 0 ? 0 : err; goto out; } case SHM_STAT: case IPC_STAT: { struct shmid64_ds tbuf; int result; rcu_read_lock(); if (cmd == SHM_STAT) { shp = shm_obtain_object(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } result = shp->shm_perm.id; } else { shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } result = 0; } err = -EACCES; if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) goto out_unlock; err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock; memset(&tbuf, 0, sizeof(tbuf)); kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); tbuf.shm_segsz = shp->shm_segsz; tbuf.shm_atime = shp->shm_atim; tbuf.shm_dtime = shp->shm_dtim; tbuf.shm_ctime = shp->shm_ctim; tbuf.shm_cpid = shp->shm_cprid; tbuf.shm_lpid = shp->shm_lprid; tbuf.shm_nattch = shp->shm_nattch; rcu_read_unlock(); if (copy_shmid_to_user(buf, &tbuf, version)) err = -EFAULT; else err = result; goto out; } default: return -EINVAL; } out_unlock: rcu_read_unlock(); out: return err; } SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) { struct shmid_kernel *shp; int err, version; struct ipc_namespace *ns; if (cmd < 0 || shmid < 0) return -EINVAL; version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; switch (cmd) { case IPC_INFO: case SHM_INFO: case SHM_STAT: case IPC_STAT: return shmctl_nolock(ns, shmid, cmd, version, buf); case IPC_RMID: case IPC_SET: return shmctl_down(ns, shmid, cmd, buf, version); case SHM_LOCK: case SHM_UNLOCK: { struct file *shm_file; rcu_read_lock(); shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock1; } audit_ipc_obj(&(shp->shm_perm)); err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock1; ipc_lock_object(&shp->shm_perm); if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { kuid_t euid = current_euid(); err = -EPERM; if (!uid_eq(euid, shp->shm_perm.uid) && !uid_eq(euid, shp->shm_perm.cuid)) goto out_unlock0; if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) goto out_unlock0; } shm_file = shp->shm_file; /* check if shm_destroy() is tearing down shp */ if (shm_file == NULL) { err = -EIDRM; goto out_unlock0; } if (is_file_hugepages(shm_file)) goto out_unlock0; if (cmd == SHM_LOCK) { struct user_struct *user = current_user(); err = shmem_lock(shm_file, 1, user); if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { shp->shm_perm.mode |= SHM_LOCKED; shp->mlock_user = user; } goto out_unlock0; } /* SHM_UNLOCK */ if (!(shp->shm_perm.mode & SHM_LOCKED)) goto out_unlock0; shmem_lock(shm_file, 0, shp->mlock_user); shp->shm_perm.mode &= ~SHM_LOCKED; shp->mlock_user = NULL; get_file(shm_file); ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); shmem_unlock_mapping(shm_file->f_mapping); fput(shm_file); return err; } default: return -EINVAL; } out_unlock0: ipc_unlock_object(&shp->shm_perm); out_unlock1: rcu_read_unlock(); return err; } /* * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. * * NOTE! Despite the name, this is NOT a direct system call entrypoint. The * "raddr" thing points to kernel space, and there has to be a wrapper around * this. */ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, unsigned long shmlba) { struct shmid_kernel *shp; unsigned long addr; unsigned long size; struct file * file; int err; unsigned long flags; unsigned long prot; int acc_mode; struct ipc_namespace *ns; struct shm_file_data *sfd; struct path path; fmode_t f_mode; unsigned long populate = 0; err = -EINVAL; if (shmid < 0) goto out; else if ((addr = (ulong)shmaddr)) { if (addr & (shmlba - 1)) { if (shmflg & SHM_RND) addr &= ~(shmlba - 1); /* round down */ else #ifndef __ARCH_FORCE_SHMLBA if (addr & ~PAGE_MASK) #endif goto out; } flags = MAP_SHARED | MAP_FIXED; } else { if ((shmflg & SHM_REMAP)) goto out; flags = MAP_SHARED; } if (shmflg & SHM_RDONLY) { prot = PROT_READ; acc_mode = S_IRUGO; f_mode = FMODE_READ; } else { prot = PROT_READ | PROT_WRITE; acc_mode = S_IRUGO | S_IWUGO; f_mode = FMODE_READ | FMODE_WRITE; } if (shmflg & SHM_EXEC) { prot |= PROT_EXEC; acc_mode |= S_IXUGO; } /* * We cannot rely on the fs check since SYSV IPC does have an * additional creator id... */ ns = current->nsproxy->ipc_ns; rcu_read_lock(); shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } err = -EACCES; if (ipcperms(ns, &shp->shm_perm, acc_mode)) goto out_unlock; err = security_shm_shmat(shp, shmaddr, shmflg); if (err) goto out_unlock; ipc_lock_object(&shp->shm_perm); /* check if shm_destroy() is tearing down shp */ if (shp->shm_file == NULL) { ipc_unlock_object(&shp->shm_perm); err = -EIDRM; goto out_unlock; } path = shp->shm_file->f_path; path_get(&path); shp->shm_nattch++; size = i_size_read(path.dentry->d_inode); ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); err = -ENOMEM; sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); if (!sfd) { path_put(&path); goto out_nattch; } file = alloc_file(&path, f_mode, is_file_hugepages(shp->shm_file) ? &shm_file_operations_huge : &shm_file_operations); err = PTR_ERR(file); if (IS_ERR(file)) { kfree(sfd); path_put(&path); goto out_nattch; } file->private_data = sfd; file->f_mapping = shp->shm_file->f_mapping; sfd->id = shp->shm_perm.id; sfd->ns = get_ipc_ns(ns); sfd->file = shp->shm_file; sfd->vm_ops = NULL; err = security_mmap_file(file, prot, flags); if (err) goto out_fput; down_write(&current->mm->mmap_sem); if (addr && !(shmflg & SHM_REMAP)) { err = -EINVAL; if (find_vma_intersection(current->mm, addr, addr + size)) goto invalid; /* * If shm segment goes below stack, make sure there is some * space left for the stack to grow (at least 4 pages). */ if (addr < current->mm->start_stack && addr > current->mm->start_stack - size - PAGE_SIZE * 5) goto invalid; } addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate); *raddr = addr; err = 0; if (IS_ERR_VALUE(addr)) err = (long)addr; invalid: up_write(&current->mm->mmap_sem); if (populate) mm_populate(addr, populate); out_fput: fput(file); out_nattch: down_write(&shm_ids(ns).rwsem); shp = shm_lock(ns, shmid); BUG_ON(IS_ERR(shp)); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); up_write(&shm_ids(ns).rwsem); return err; out_unlock: rcu_read_unlock(); out: return err; } SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) { unsigned long ret; long err; err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); if (err) return err; force_successful_syscall_return(); return (long)ret; } /* * detach and kill segment if marked destroyed. * The work is done in shm_close. */ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long addr = (unsigned long)shmaddr; int retval = -EINVAL; #ifdef CONFIG_MMU loff_t size = 0; struct vm_area_struct *next; #endif if (addr & ~PAGE_MASK) return retval; down_write(&mm->mmap_sem); /* * This function tries to be smart and unmap shm segments that * were modified by partial mlock or munmap calls: * - It first determines the size of the shm segment that should be * unmapped: It searches for a vma that is backed by shm and that * started at address shmaddr. It records it's size and then unmaps * it. * - Then it unmaps all shm vmas that started at shmaddr and that * are within the initially determined size. * Errors from do_munmap are ignored: the function only fails if * it's called with invalid parameters or if it's called to unmap * a part of a vma. Both calls in this function are for full vmas, * the parameters are directly copied from the vma itself and always * valid - therefore do_munmap cannot fail. (famous last words?) */ /* * If it had been mremap()'d, the starting address would not * match the usual checks anyway. So assume all vma's are * above the starting address given. */ vma = find_vma(mm, addr); #ifdef CONFIG_MMU while (vma) { next = vma->vm_next; /* * Check if the starting address would match, i.e. it's * a fragment created by mprotect() and/or munmap(), or it * otherwise it starts at this address with no hassles. */ if ((vma->vm_ops == &shm_vm_ops) && (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { size = file_inode(vma->vm_file)->i_size; do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); /* * We discovered the size of the shm segment, so * break out of here and fall through to the next * loop that uses the size information to stop * searching for matching vma's. */ retval = 0; vma = next; break; } vma = next; } /* * We need look no further than the maximum address a fragment * could possibly have landed at. Also cast things to loff_t to * prevent overflows and make comparisons vs. equal-width types. */ size = PAGE_ALIGN(size); while (vma && (loff_t)(vma->vm_end - addr) <= size) { next = vma->vm_next; /* finding a matching vma now does not alter retval */ if ((vma->vm_ops == &shm_vm_ops) && (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); vma = next; } #else /* CONFIG_MMU */ /* under NOMMU conditions, the exact address to be destroyed must be * given */ if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); retval = 0; } #endif up_write(&mm->mmap_sem); return retval; } #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it) { struct user_namespace *user_ns = seq_user_ns(s); struct shmid_kernel *shp = it; unsigned long rss = 0, swp = 0; shm_add_rss_swap(shp, &rss, &swp); #if BITS_PER_LONG <= 32 #define SIZE_SPEC "%10lu" #else #define SIZE_SPEC "%21lu" #endif return seq_printf(s, "%10d %10d %4o " SIZE_SPEC " %5u %5u " "%5lu %5u %5u %5u %5u %10lu %10lu %10lu " SIZE_SPEC " " SIZE_SPEC "\n", shp->shm_perm.key, shp->shm_perm.id, shp->shm_perm.mode, shp->shm_segsz, shp->shm_cprid, shp->shm_lprid, shp->shm_nattch, from_kuid_munged(user_ns, shp->shm_perm.uid), from_kgid_munged(user_ns, shp->shm_perm.gid), from_kuid_munged(user_ns, shp->shm_perm.cuid), from_kgid_munged(user_ns, shp->shm_perm.cgid), shp->shm_atim, shp->shm_dtim, shp->shm_ctim, rss * PAGE_SIZE, swp * PAGE_SIZE); } #endif
./CrossVul/dataset_final_sorted/CWE-362/c/good_5829_0
crossvul-cpp_data_bad_2386_0
/* Key garbage collector * * Copyright (C) 2009-2011 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/security.h> #include <keys/keyring-type.h> #include "internal.h" /* * Delay between key revocation/expiry in seconds */ unsigned key_gc_delay = 5 * 60; /* * Reaper for unused keys. */ static void key_garbage_collector(struct work_struct *work); DECLARE_WORK(key_gc_work, key_garbage_collector); /* * Reaper for links from keyrings to dead keys. */ static void key_gc_timer_func(unsigned long); static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0); static time_t key_gc_next_run = LONG_MAX; static struct key_type *key_gc_dead_keytype; static unsigned long key_gc_flags; #define KEY_GC_KEY_EXPIRED 0 /* A key expired and needs unlinking */ #define KEY_GC_REAP_KEYTYPE 1 /* A keytype is being unregistered */ #define KEY_GC_REAPING_KEYTYPE 2 /* Cleared when keytype reaped */ /* * Any key whose type gets unregistered will be re-typed to this if it can't be * immediately unlinked. */ struct key_type key_type_dead = { .name = "dead", }; /* * Schedule a garbage collection run. * - time precision isn't particularly important */ void key_schedule_gc(time_t gc_at) { unsigned long expires; time_t now = current_kernel_time().tv_sec; kenter("%ld", gc_at - now); if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) { kdebug("IMMEDIATE"); schedule_work(&key_gc_work); } else if (gc_at < key_gc_next_run) { kdebug("DEFERRED"); key_gc_next_run = gc_at; expires = jiffies + (gc_at - now) * HZ; mod_timer(&key_gc_timer, expires); } } /* * Schedule a dead links collection run. */ void key_schedule_gc_links(void) { set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags); schedule_work(&key_gc_work); } /* * Some key's cleanup time was met after it expired, so we need to get the * reaper to go through a cycle finding expired keys. */ static void key_gc_timer_func(unsigned long data) { kenter(""); key_gc_next_run = LONG_MAX; key_schedule_gc_links(); } /* * Reap keys of dead type. * * We use three flags to make sure we see three complete cycles of the garbage * collector: the first to mark keys of that type as being dead, the second to * collect dead links and the third to clean up the dead keys. We have to be * careful as there may already be a cycle in progress. * * The caller must be holding key_types_sem. */ void key_gc_keytype(struct key_type *ktype) { kenter("%s", ktype->name); key_gc_dead_keytype = ktype; set_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags); smp_mb(); set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags); kdebug("schedule"); schedule_work(&key_gc_work); kdebug("sleep"); wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, TASK_UNINTERRUPTIBLE); key_gc_dead_keytype = NULL; kleave(""); } /* * Garbage collect a list of unreferenced, detached keys */ static noinline void key_gc_unused_keys(struct list_head *keys) { while (!list_empty(keys)) { struct key *key = list_entry(keys->next, struct key, graveyard_link); list_del(&key->graveyard_link); kdebug("- %u", key->serial); key_check(key); security_key_free(key); /* deal with the user's key tracking and quota */ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { spin_lock(&key->user->lock); key->user->qnkeys--; key->user->qnbytes -= key->quotalen; spin_unlock(&key->user->lock); } atomic_dec(&key->user->nkeys); if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) atomic_dec(&key->user->nikeys); key_user_put(key->user); /* now throw away the key memory */ if (key->type->destroy) key->type->destroy(key); kfree(key->description); #ifdef KEY_DEBUGGING key->magic = KEY_DEBUG_MAGIC_X; #endif kmem_cache_free(key_jar, key); } } /* * Garbage collector for unused keys. * * This is done in process context so that we don't have to disable interrupts * all over the place. key_put() schedules this rather than trying to do the * cleanup itself, which means key_put() doesn't have to sleep. */ static void key_garbage_collector(struct work_struct *work) { static LIST_HEAD(graveyard); static u8 gc_state; /* Internal persistent state */ #define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */ #define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */ #define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */ #define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */ #define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */ #define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */ #define KEY_GC_FOUND_DEAD_KEY 0x80 /* - We found at least one dead key */ struct rb_node *cursor; struct key *key; time_t new_timer, limit; kenter("[%lx,%x]", key_gc_flags, gc_state); limit = current_kernel_time().tv_sec; if (limit > key_gc_delay) limit -= key_gc_delay; else limit = key_gc_delay; /* Work out what we're going to be doing in this pass */ gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2; gc_state <<= 1; if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags)) gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER; if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) gc_state |= KEY_GC_REAPING_DEAD_1; kdebug("new pass %x", gc_state); new_timer = LONG_MAX; /* As only this function is permitted to remove things from the key * serial tree, if cursor is non-NULL then it will always point to a * valid node in the tree - even if lock got dropped. */ spin_lock(&key_serial_lock); cursor = rb_first(&key_serial_tree); continue_scanning: while (cursor) { key = rb_entry(cursor, struct key, serial_node); cursor = rb_next(cursor); if (atomic_read(&key->usage) == 0) goto found_unreferenced_key; if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) { if (key->type == key_gc_dead_keytype) { gc_state |= KEY_GC_FOUND_DEAD_KEY; set_bit(KEY_FLAG_DEAD, &key->flags); key->perm = 0; goto skip_dead_key; } } if (gc_state & KEY_GC_SET_TIMER) { if (key->expiry > limit && key->expiry < new_timer) { kdebug("will expire %x in %ld", key_serial(key), key->expiry - limit); new_timer = key->expiry; } } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) if (key->type == key_gc_dead_keytype) gc_state |= KEY_GC_FOUND_DEAD_KEY; if ((gc_state & KEY_GC_REAPING_LINKS) || unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) { if (key->type == &key_type_keyring) goto found_keyring; } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) if (key->type == key_gc_dead_keytype) goto destroy_dead_key; skip_dead_key: if (spin_is_contended(&key_serial_lock) || need_resched()) goto contended; } contended: spin_unlock(&key_serial_lock); maybe_resched: if (cursor) { cond_resched(); spin_lock(&key_serial_lock); goto continue_scanning; } /* We've completed the pass. Set the timer if we need to and queue a * new cycle if necessary. We keep executing cycles until we find one * where we didn't reap any keys. */ kdebug("pass complete"); if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) { new_timer += key_gc_delay; key_schedule_gc(new_timer); } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2) || !list_empty(&graveyard)) { /* Make sure that all pending keyring payload destructions are * fulfilled and that people aren't now looking at dead or * dying keys that they don't have a reference upon or a link * to. */ kdebug("gc sync"); synchronize_rcu(); } if (!list_empty(&graveyard)) { kdebug("gc keys"); key_gc_unused_keys(&graveyard); } if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2))) { if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) { /* No remaining dead keys: short circuit the remaining * keytype reap cycles. */ kdebug("dead short"); gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2); gc_state |= KEY_GC_REAPING_DEAD_3; } else { gc_state |= KEY_GC_REAP_AGAIN; } } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) { kdebug("dead wake"); smp_mb(); clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags); wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE); } if (gc_state & KEY_GC_REAP_AGAIN) schedule_work(&key_gc_work); kleave(" [end %x]", gc_state); return; /* We found an unreferenced key - once we've removed it from the tree, * we can safely drop the lock. */ found_unreferenced_key: kdebug("unrefd key %d", key->serial); rb_erase(&key->serial_node, &key_serial_tree); spin_unlock(&key_serial_lock); list_add_tail(&key->graveyard_link, &graveyard); gc_state |= KEY_GC_REAP_AGAIN; goto maybe_resched; /* We found a keyring and we need to check the payload for links to * dead or expired keys. We don't flag another reap immediately as we * have to wait for the old payload to be destroyed by RCU before we * can reap the keys to which it refers. */ found_keyring: spin_unlock(&key_serial_lock); keyring_gc(key, limit); goto maybe_resched; /* We found a dead key that is still referenced. Reset its type and * destroy its payload with its semaphore held. */ destroy_dead_key: spin_unlock(&key_serial_lock); kdebug("destroy key %d", key->serial); down_write(&key->sem); key->type = &key_type_dead; if (key_gc_dead_keytype->destroy) key_gc_dead_keytype->destroy(key); memset(&key->payload, KEY_DESTROY, sizeof(key->payload)); up_write(&key->sem); goto maybe_resched; }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_2386_0
crossvul-cpp_data_good_3275_3
/* * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/dcache.h> #include <linux/fs.h> #include <linux/gfp.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/srcu.h> #include <linux/fsnotify_backend.h> #include "fsnotify.h" /* * Clear all of the marks on an inode when it is being evicted from core */ void __fsnotify_inode_delete(struct inode *inode) { fsnotify_clear_marks_by_inode(inode); } EXPORT_SYMBOL_GPL(__fsnotify_inode_delete); void __fsnotify_vfsmount_delete(struct vfsmount *mnt) { fsnotify_clear_marks_by_mount(mnt); } /** * fsnotify_unmount_inodes - an sb is unmounting. handle any watched inodes. * @sb: superblock being unmounted. * * Called during unmount with no locks held, so needs to be safe against * concurrent modifiers. We temporarily drop sb->s_inode_list_lock and CAN block. */ void fsnotify_unmount_inodes(struct super_block *sb) { struct inode *inode, *iput_inode = NULL; spin_lock(&sb->s_inode_list_lock); list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { /* * We cannot __iget() an inode in state I_FREEING, * I_WILL_FREE, or I_NEW which is fine because by that point * the inode cannot have any associated watches. */ spin_lock(&inode->i_lock); if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) { spin_unlock(&inode->i_lock); continue; } /* * If i_count is zero, the inode cannot have any watches and * doing an __iget/iput with MS_ACTIVE clear would actually * evict all inodes with zero i_count from icache which is * unnecessarily violent and may in fact be illegal to do. */ if (!atomic_read(&inode->i_count)) { spin_unlock(&inode->i_lock); continue; } __iget(inode); spin_unlock(&inode->i_lock); spin_unlock(&sb->s_inode_list_lock); if (iput_inode) iput(iput_inode); /* for each watch, send FS_UNMOUNT and then remove it */ fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0); fsnotify_inode_delete(inode); iput_inode = inode; spin_lock(&sb->s_inode_list_lock); } spin_unlock(&sb->s_inode_list_lock); if (iput_inode) iput(iput_inode); } /* * Given an inode, first check if we care what happens to our children. Inotify * and dnotify both tell their parents about events. If we care about any event * on a child we run all of our children and set a dentry flag saying that the * parent cares. Thus when an event happens on a child it can quickly tell if * if there is a need to find a parent and send the event to the parent. */ void __fsnotify_update_child_dentry_flags(struct inode *inode) { struct dentry *alias; int watched; if (!S_ISDIR(inode->i_mode)) return; /* determine if the children should tell inode about their events */ watched = fsnotify_inode_watches_children(inode); spin_lock(&inode->i_lock); /* run all of the dentries associated with this inode. Since this is a * directory, there damn well better only be one item on this list */ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { struct dentry *child; /* run all of the children of the original inode and fix their * d_flags to indicate parental interest (their parent is the * original inode) */ spin_lock(&alias->d_lock); list_for_each_entry(child, &alias->d_subdirs, d_child) { if (!child->d_inode) continue; spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED); if (watched) child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; else child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; spin_unlock(&child->d_lock); } spin_unlock(&alias->d_lock); } spin_unlock(&inode->i_lock); } /* Notify this dentry's parent about a child's events. */ int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask) { struct dentry *parent; struct inode *p_inode; int ret = 0; if (!dentry) dentry = path->dentry; if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED)) return 0; parent = dget_parent(dentry); p_inode = parent->d_inode; if (unlikely(!fsnotify_inode_watches_children(p_inode))) __fsnotify_update_child_dentry_flags(p_inode); else if (p_inode->i_fsnotify_mask & mask) { struct name_snapshot name; /* we are notifying a parent so come up with the new mask which * specifies these are events which came from a child. */ mask |= FS_EVENT_ON_CHILD; take_dentry_name_snapshot(&name, dentry); if (path) ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH, name.name, 0); else ret = fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE, name.name, 0); release_dentry_name_snapshot(&name); } dput(parent); return ret; } EXPORT_SYMBOL_GPL(__fsnotify_parent); static int send_to_group(struct inode *to_tell, struct fsnotify_mark *inode_mark, struct fsnotify_mark *vfsmount_mark, __u32 mask, const void *data, int data_is, u32 cookie, const unsigned char *file_name, struct fsnotify_iter_info *iter_info) { struct fsnotify_group *group = NULL; __u32 inode_test_mask = 0; __u32 vfsmount_test_mask = 0; if (unlikely(!inode_mark && !vfsmount_mark)) { BUG(); return 0; } /* clear ignored on inode modification */ if (mask & FS_MODIFY) { if (inode_mark && !(inode_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) inode_mark->ignored_mask = 0; if (vfsmount_mark && !(vfsmount_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) vfsmount_mark->ignored_mask = 0; } /* does the inode mark tell us to do something? */ if (inode_mark) { group = inode_mark->group; inode_test_mask = (mask & ~FS_EVENT_ON_CHILD); inode_test_mask &= inode_mark->mask; inode_test_mask &= ~inode_mark->ignored_mask; } /* does the vfsmount_mark tell us to do something? */ if (vfsmount_mark) { vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD); group = vfsmount_mark->group; vfsmount_test_mask &= vfsmount_mark->mask; vfsmount_test_mask &= ~vfsmount_mark->ignored_mask; if (inode_mark) vfsmount_test_mask &= ~inode_mark->ignored_mask; } pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p" " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x" " data=%p data_is=%d cookie=%d\n", __func__, group, to_tell, mask, inode_mark, inode_test_mask, vfsmount_mark, vfsmount_test_mask, data, data_is, cookie); if (!inode_test_mask && !vfsmount_test_mask) return 0; return group->ops->handle_event(group, to_tell, inode_mark, vfsmount_mark, mask, data, data_is, file_name, cookie, iter_info); } /* * This is the main call to fsnotify. The VFS calls into hook specific functions * in linux/fsnotify.h. Those functions then in turn call here. Here will call * out to all of the registered fsnotify_group. Those groups can then use the * notification event in whatever means they feel necessary. */ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, const unsigned char *file_name, u32 cookie) { struct hlist_node *inode_node = NULL, *vfsmount_node = NULL; struct fsnotify_mark *inode_mark = NULL, *vfsmount_mark = NULL; struct fsnotify_group *inode_group, *vfsmount_group; struct fsnotify_mark_connector *inode_conn, *vfsmount_conn; struct fsnotify_iter_info iter_info; struct mount *mnt; int ret = 0; /* global tests shouldn't care about events on child only the specific event */ __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD); if (data_is == FSNOTIFY_EVENT_PATH) mnt = real_mount(((const struct path *)data)->mnt); else mnt = NULL; /* * Optimization: srcu_read_lock() has a memory barrier which can * be expensive. It protects walking the *_fsnotify_marks lists. * However, if we do not walk the lists, we do not have to do * SRCU because we have no references to any objects and do not * need SRCU to keep them "alive". */ if (!to_tell->i_fsnotify_marks && (!mnt || !mnt->mnt_fsnotify_marks)) return 0; /* * if this is a modify event we may need to clear the ignored masks * otherwise return if neither the inode nor the vfsmount care about * this type of event. */ if (!(mask & FS_MODIFY) && !(test_mask & to_tell->i_fsnotify_mask) && !(mnt && test_mask & mnt->mnt_fsnotify_mask)) return 0; iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); if ((mask & FS_MODIFY) || (test_mask & to_tell->i_fsnotify_mask)) { inode_conn = srcu_dereference(to_tell->i_fsnotify_marks, &fsnotify_mark_srcu); if (inode_conn) inode_node = srcu_dereference(inode_conn->list.first, &fsnotify_mark_srcu); } if (mnt && ((mask & FS_MODIFY) || (test_mask & mnt->mnt_fsnotify_mask))) { inode_conn = srcu_dereference(to_tell->i_fsnotify_marks, &fsnotify_mark_srcu); if (inode_conn) inode_node = srcu_dereference(inode_conn->list.first, &fsnotify_mark_srcu); vfsmount_conn = srcu_dereference(mnt->mnt_fsnotify_marks, &fsnotify_mark_srcu); if (vfsmount_conn) vfsmount_node = srcu_dereference( vfsmount_conn->list.first, &fsnotify_mark_srcu); } /* * We need to merge inode & vfsmount mark lists so that inode mark * ignore masks are properly reflected for mount mark notifications. * That's why this traversal is so complicated... */ while (inode_node || vfsmount_node) { inode_group = NULL; inode_mark = NULL; vfsmount_group = NULL; vfsmount_mark = NULL; if (inode_node) { inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu), struct fsnotify_mark, obj_list); inode_group = inode_mark->group; } if (vfsmount_node) { vfsmount_mark = hlist_entry(srcu_dereference(vfsmount_node, &fsnotify_mark_srcu), struct fsnotify_mark, obj_list); vfsmount_group = vfsmount_mark->group; } if (inode_group && vfsmount_group) { int cmp = fsnotify_compare_groups(inode_group, vfsmount_group); if (cmp > 0) { inode_group = NULL; inode_mark = NULL; } else if (cmp < 0) { vfsmount_group = NULL; vfsmount_mark = NULL; } } iter_info.inode_mark = inode_mark; iter_info.vfsmount_mark = vfsmount_mark; ret = send_to_group(to_tell, inode_mark, vfsmount_mark, mask, data, data_is, cookie, file_name, &iter_info); if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS)) goto out; if (inode_group) inode_node = srcu_dereference(inode_node->next, &fsnotify_mark_srcu); if (vfsmount_group) vfsmount_node = srcu_dereference(vfsmount_node->next, &fsnotify_mark_srcu); } ret = 0; out: srcu_read_unlock(&fsnotify_mark_srcu, iter_info.srcu_idx); return ret; } EXPORT_SYMBOL_GPL(fsnotify); extern struct kmem_cache *fsnotify_mark_connector_cachep; static __init int fsnotify_init(void) { int ret; BUG_ON(hweight32(ALL_FSNOTIFY_EVENTS) != 23); ret = init_srcu_struct(&fsnotify_mark_srcu); if (ret) panic("initializing fsnotify_mark_srcu"); fsnotify_mark_connector_cachep = KMEM_CACHE(fsnotify_mark_connector, SLAB_PANIC); return 0; } core_initcall(fsnotify_init);
./CrossVul/dataset_final_sorted/CWE-362/c/good_3275_3
crossvul-cpp_data_good_1595_0
/* * linux/fs/exec.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* * #!-checking implemented by tytso. */ /* * Demand-loading implemented 01.12.91 - no need to read anything but * the header into memory. The inode of the executable is put into * "current->executable", and page faults do the actual loading. Clean. * * Once more I can proudly say that linux stood up to being changed: it * was less than 2 hours work to get demand-loading completely implemented. * * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead, * current->executable is only used by the procfs. This allows a dispatch * table to check for several different types of binary formats. We keep * trying until we recognize the file or we run out of supported binary * formats. */ #include <linux/slab.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/mm.h> #include <linux/vmacache.h> #include <linux/stat.h> #include <linux/fcntl.h> #include <linux/swap.h> #include <linux/string.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/perf_event.h> #include <linux/highmem.h> #include <linux/spinlock.h> #include <linux/key.h> #include <linux/personality.h> #include <linux/binfmts.h> #include <linux/utsname.h> #include <linux/pid_namespace.h> #include <linux/module.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/tsacct_kern.h> #include <linux/cn_proc.h> #include <linux/audit.h> #include <linux/tracehook.h> #include <linux/kmod.h> #include <linux/fsnotify.h> #include <linux/fs_struct.h> #include <linux/pipe_fs_i.h> #include <linux/oom.h> #include <linux/compat.h> #include <asm/uaccess.h> #include <asm/mmu_context.h> #include <asm/tlb.h> #include <trace/events/task.h> #include "internal.h" #include <trace/events/sched.h> int suid_dumpable = 0; static LIST_HEAD(formats); static DEFINE_RWLOCK(binfmt_lock); void __register_binfmt(struct linux_binfmt * fmt, int insert) { BUG_ON(!fmt); if (WARN_ON(!fmt->load_binary)) return; write_lock(&binfmt_lock); insert ? list_add(&fmt->lh, &formats) : list_add_tail(&fmt->lh, &formats); write_unlock(&binfmt_lock); } EXPORT_SYMBOL(__register_binfmt); void unregister_binfmt(struct linux_binfmt * fmt) { write_lock(&binfmt_lock); list_del(&fmt->lh); write_unlock(&binfmt_lock); } EXPORT_SYMBOL(unregister_binfmt); static inline void put_binfmt(struct linux_binfmt * fmt) { module_put(fmt->module); } #ifdef CONFIG_USELIB /* * Note that a shared library must be both readable and executable due to * security reasons. * * Also note that we take the address to load from from the file itself. */ SYSCALL_DEFINE1(uselib, const char __user *, library) { struct linux_binfmt *fmt; struct file *file; struct filename *tmp = getname(library); int error = PTR_ERR(tmp); static const struct open_flags uselib_flags = { .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC, .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN, .intent = LOOKUP_OPEN, .lookup_flags = LOOKUP_FOLLOW, }; if (IS_ERR(tmp)) goto out; file = do_filp_open(AT_FDCWD, tmp, &uselib_flags); putname(tmp); error = PTR_ERR(file); if (IS_ERR(file)) goto out; error = -EINVAL; if (!S_ISREG(file_inode(file)->i_mode)) goto exit; error = -EACCES; if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) goto exit; fsnotify_open(file); error = -ENOEXEC; read_lock(&binfmt_lock); list_for_each_entry(fmt, &formats, lh) { if (!fmt->load_shlib) continue; if (!try_module_get(fmt->module)) continue; read_unlock(&binfmt_lock); error = fmt->load_shlib(file); read_lock(&binfmt_lock); put_binfmt(fmt); if (error != -ENOEXEC) break; } read_unlock(&binfmt_lock); exit: fput(file); out: return error; } #endif /* #ifdef CONFIG_USELIB */ #ifdef CONFIG_MMU /* * The nascent bprm->mm is not visible until exec_mmap() but it can * use a lot of memory, account these pages in current->mm temporary * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we * change the counter back via acct_arg_size(0). */ static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages) { struct mm_struct *mm = current->mm; long diff = (long)(pages - bprm->vma_pages); if (!mm || !diff) return; bprm->vma_pages = pages; add_mm_counter(mm, MM_ANONPAGES, diff); } static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, int write) { struct page *page; int ret; #ifdef CONFIG_STACK_GROWSUP if (write) { ret = expand_downwards(bprm->vma, pos); if (ret < 0) return NULL; } #endif ret = get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL); if (ret <= 0) return NULL; if (write) { unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; struct rlimit *rlim; acct_arg_size(bprm, size / PAGE_SIZE); /* * We've historically supported up to 32 pages (ARG_MAX) * of argument strings even with small stacks */ if (size <= ARG_MAX) return page; /* * Limit to 1/4-th the stack size for the argv+env strings. * This ensures that: * - the remaining binfmt code will not run out of stack space, * - the program will have a reasonable amount of stack left * to work from. */ rlim = current->signal->rlim; if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) { put_page(page); return NULL; } } return page; } static void put_arg_page(struct page *page) { put_page(page); } static void free_arg_page(struct linux_binprm *bprm, int i) { } static void free_arg_pages(struct linux_binprm *bprm) { } static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos, struct page *page) { flush_cache_page(bprm->vma, pos, page_to_pfn(page)); } static int __bprm_mm_init(struct linux_binprm *bprm) { int err; struct vm_area_struct *vma = NULL; struct mm_struct *mm = bprm->mm; bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) return -ENOMEM; down_write(&mm->mmap_sem); vma->vm_mm = mm; /* * Place the stack at the largest stack address the architecture * supports. Later, we'll move this to an appropriate place. We don't * use STACK_TOP because that can depend on attributes which aren't * configured yet. */ BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP); vma->vm_end = STACK_TOP_MAX; vma->vm_start = vma->vm_end - PAGE_SIZE; vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); INIT_LIST_HEAD(&vma->anon_vma_chain); err = insert_vm_struct(mm, vma); if (err) goto err; mm->stack_vm = mm->total_vm = 1; arch_bprm_mm_init(mm, vma); up_write(&mm->mmap_sem); bprm->p = vma->vm_end - sizeof(void *); return 0; err: up_write(&mm->mmap_sem); bprm->vma = NULL; kmem_cache_free(vm_area_cachep, vma); return err; } static bool valid_arg_len(struct linux_binprm *bprm, long len) { return len <= MAX_ARG_STRLEN; } #else static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages) { } static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, int write) { struct page *page; page = bprm->page[pos / PAGE_SIZE]; if (!page && write) { page = alloc_page(GFP_HIGHUSER|__GFP_ZERO); if (!page) return NULL; bprm->page[pos / PAGE_SIZE] = page; } return page; } static void put_arg_page(struct page *page) { } static void free_arg_page(struct linux_binprm *bprm, int i) { if (bprm->page[i]) { __free_page(bprm->page[i]); bprm->page[i] = NULL; } } static void free_arg_pages(struct linux_binprm *bprm) { int i; for (i = 0; i < MAX_ARG_PAGES; i++) free_arg_page(bprm, i); } static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos, struct page *page) { } static int __bprm_mm_init(struct linux_binprm *bprm) { bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *); return 0; } static bool valid_arg_len(struct linux_binprm *bprm, long len) { return len <= bprm->p; } #endif /* CONFIG_MMU */ /* * Create a new mm_struct and populate it with a temporary stack * vm_area_struct. We don't have enough context at this point to set the stack * flags, permissions, and offset, so we use temporary values. We'll update * them later in setup_arg_pages(). */ static int bprm_mm_init(struct linux_binprm *bprm) { int err; struct mm_struct *mm = NULL; bprm->mm = mm = mm_alloc(); err = -ENOMEM; if (!mm) goto err; err = __bprm_mm_init(bprm); if (err) goto err; return 0; err: if (mm) { bprm->mm = NULL; mmdrop(mm); } return err; } struct user_arg_ptr { #ifdef CONFIG_COMPAT bool is_compat; #endif union { const char __user *const __user *native; #ifdef CONFIG_COMPAT const compat_uptr_t __user *compat; #endif } ptr; }; static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) { const char __user *native; #ifdef CONFIG_COMPAT if (unlikely(argv.is_compat)) { compat_uptr_t compat; if (get_user(compat, argv.ptr.compat + nr)) return ERR_PTR(-EFAULT); return compat_ptr(compat); } #endif if (get_user(native, argv.ptr.native + nr)) return ERR_PTR(-EFAULT); return native; } /* * count() counts the number of strings in array ARGV. */ static int count(struct user_arg_ptr argv, int max) { int i = 0; if (argv.ptr.native != NULL) { for (;;) { const char __user *p = get_user_arg_ptr(argv, i); if (!p) break; if (IS_ERR(p)) return -EFAULT; if (i >= max) return -E2BIG; ++i; if (fatal_signal_pending(current)) return -ERESTARTNOHAND; cond_resched(); } } return i; } /* * 'copy_strings()' copies argument/environment strings from the old * processes's memory to the new process's stack. The call to get_user_pages() * ensures the destination page is created and not swapped out. */ static int copy_strings(int argc, struct user_arg_ptr argv, struct linux_binprm *bprm) { struct page *kmapped_page = NULL; char *kaddr = NULL; unsigned long kpos = 0; int ret; while (argc-- > 0) { const char __user *str; int len; unsigned long pos; ret = -EFAULT; str = get_user_arg_ptr(argv, argc); if (IS_ERR(str)) goto out; len = strnlen_user(str, MAX_ARG_STRLEN); if (!len) goto out; ret = -E2BIG; if (!valid_arg_len(bprm, len)) goto out; /* We're going to work our way backwords. */ pos = bprm->p; str += len; bprm->p -= len; while (len > 0) { int offset, bytes_to_copy; if (fatal_signal_pending(current)) { ret = -ERESTARTNOHAND; goto out; } cond_resched(); offset = pos % PAGE_SIZE; if (offset == 0) offset = PAGE_SIZE; bytes_to_copy = offset; if (bytes_to_copy > len) bytes_to_copy = len; offset -= bytes_to_copy; pos -= bytes_to_copy; str -= bytes_to_copy; len -= bytes_to_copy; if (!kmapped_page || kpos != (pos & PAGE_MASK)) { struct page *page; page = get_arg_page(bprm, pos, 1); if (!page) { ret = -E2BIG; goto out; } if (kmapped_page) { flush_kernel_dcache_page(kmapped_page); kunmap(kmapped_page); put_arg_page(kmapped_page); } kmapped_page = page; kaddr = kmap(kmapped_page); kpos = pos & PAGE_MASK; flush_arg_page(bprm, kpos, kmapped_page); } if (copy_from_user(kaddr+offset, str, bytes_to_copy)) { ret = -EFAULT; goto out; } } } ret = 0; out: if (kmapped_page) { flush_kernel_dcache_page(kmapped_page); kunmap(kmapped_page); put_arg_page(kmapped_page); } return ret; } /* * Like copy_strings, but get argv and its values from kernel memory. */ int copy_strings_kernel(int argc, const char *const *__argv, struct linux_binprm *bprm) { int r; mm_segment_t oldfs = get_fs(); struct user_arg_ptr argv = { .ptr.native = (const char __user *const __user *)__argv, }; set_fs(KERNEL_DS); r = copy_strings(argc, argv, bprm); set_fs(oldfs); return r; } EXPORT_SYMBOL(copy_strings_kernel); #ifdef CONFIG_MMU /* * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once * the binfmt code determines where the new stack should reside, we shift it to * its final location. The process proceeds as follows: * * 1) Use shift to calculate the new vma endpoints. * 2) Extend vma to cover both the old and new ranges. This ensures the * arguments passed to subsequent functions are consistent. * 3) Move vma's page tables to the new range. * 4) Free up any cleared pgd range. * 5) Shrink the vma to cover only the new range. */ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) { struct mm_struct *mm = vma->vm_mm; unsigned long old_start = vma->vm_start; unsigned long old_end = vma->vm_end; unsigned long length = old_end - old_start; unsigned long new_start = old_start - shift; unsigned long new_end = old_end - shift; struct mmu_gather tlb; BUG_ON(new_start > new_end); /* * ensure there are no vmas between where we want to go * and where we are */ if (vma != find_vma(mm, new_start)) return -EFAULT; /* * cover the whole range: [new_start, old_end) */ if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL)) return -ENOMEM; /* * move the page tables downwards, on failure we rely on * process cleanup to remove whatever mess we made. */ if (length != move_page_tables(vma, old_start, vma, new_start, length, false)) return -ENOMEM; lru_add_drain(); tlb_gather_mmu(&tlb, mm, old_start, old_end); if (new_end > old_start) { /* * when the old and new regions overlap clear from new_end. */ free_pgd_range(&tlb, new_end, old_end, new_end, vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); } else { /* * otherwise, clean from old_start; this is done to not touch * the address space in [new_end, old_start) some architectures * have constraints on va-space that make this illegal (IA64) - * for the others its just a little faster. */ free_pgd_range(&tlb, old_start, old_end, new_end, vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); } tlb_finish_mmu(&tlb, old_start, old_end); /* * Shrink the vma to just the new range. Always succeeds. */ vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL); return 0; } /* * Finalizes the stack vm_area_struct. The flags and permissions are updated, * the stack is optionally relocated, and some extra space is added. */ int setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, int executable_stack) { unsigned long ret; unsigned long stack_shift; struct mm_struct *mm = current->mm; struct vm_area_struct *vma = bprm->vma; struct vm_area_struct *prev = NULL; unsigned long vm_flags; unsigned long stack_base; unsigned long stack_size; unsigned long stack_expand; unsigned long rlim_stack; #ifdef CONFIG_STACK_GROWSUP /* Limit stack size */ stack_base = rlimit_max(RLIMIT_STACK); if (stack_base > STACK_SIZE_MAX) stack_base = STACK_SIZE_MAX; /* Make sure we didn't let the argument array grow too large. */ if (vma->vm_end - vma->vm_start > stack_base) return -ENOMEM; stack_base = PAGE_ALIGN(stack_top - stack_base); stack_shift = vma->vm_start - stack_base; mm->arg_start = bprm->p - stack_shift; bprm->p = vma->vm_end - stack_shift; #else stack_top = arch_align_stack(stack_top); stack_top = PAGE_ALIGN(stack_top); if (unlikely(stack_top < mmap_min_addr) || unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr)) return -ENOMEM; stack_shift = vma->vm_end - stack_top; bprm->p -= stack_shift; mm->arg_start = bprm->p; #endif if (bprm->loader) bprm->loader -= stack_shift; bprm->exec -= stack_shift; down_write(&mm->mmap_sem); vm_flags = VM_STACK_FLAGS; /* * Adjust stack execute permissions; explicitly enable for * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone * (arch default) otherwise. */ if (unlikely(executable_stack == EXSTACK_ENABLE_X)) vm_flags |= VM_EXEC; else if (executable_stack == EXSTACK_DISABLE_X) vm_flags &= ~VM_EXEC; vm_flags |= mm->def_flags; vm_flags |= VM_STACK_INCOMPLETE_SETUP; ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end, vm_flags); if (ret) goto out_unlock; BUG_ON(prev != vma); /* Move stack pages down in memory. */ if (stack_shift) { ret = shift_arg_pages(vma, stack_shift); if (ret) goto out_unlock; } /* mprotect_fixup is overkill to remove the temporary stack flags */ vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP; stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */ stack_size = vma->vm_end - vma->vm_start; /* * Align this down to a page boundary as expand_stack * will align it up. */ rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK; #ifdef CONFIG_STACK_GROWSUP if (stack_size + stack_expand > rlim_stack) stack_base = vma->vm_start + rlim_stack; else stack_base = vma->vm_end + stack_expand; #else if (stack_size + stack_expand > rlim_stack) stack_base = vma->vm_end - rlim_stack; else stack_base = vma->vm_start - stack_expand; #endif current->mm->start_stack = bprm->p; ret = expand_stack(vma, stack_base); if (ret) ret = -EFAULT; out_unlock: up_write(&mm->mmap_sem); return ret; } EXPORT_SYMBOL(setup_arg_pages); #endif /* CONFIG_MMU */ static struct file *do_open_execat(int fd, struct filename *name, int flags) { struct file *file; int err; struct open_flags open_exec_flags = { .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC, .acc_mode = MAY_EXEC | MAY_OPEN, .intent = LOOKUP_OPEN, .lookup_flags = LOOKUP_FOLLOW, }; if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0) return ERR_PTR(-EINVAL); if (flags & AT_SYMLINK_NOFOLLOW) open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW; if (flags & AT_EMPTY_PATH) open_exec_flags.lookup_flags |= LOOKUP_EMPTY; file = do_filp_open(fd, name, &open_exec_flags); if (IS_ERR(file)) goto out; err = -EACCES; if (!S_ISREG(file_inode(file)->i_mode)) goto exit; if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) goto exit; err = deny_write_access(file); if (err) goto exit; if (name->name[0] != '\0') fsnotify_open(file); out: return file; exit: fput(file); return ERR_PTR(err); } struct file *open_exec(const char *name) { struct filename *filename = getname_kernel(name); struct file *f = ERR_CAST(filename); if (!IS_ERR(filename)) { f = do_open_execat(AT_FDCWD, filename, 0); putname(filename); } return f; } EXPORT_SYMBOL(open_exec); int kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count) { mm_segment_t old_fs; loff_t pos = offset; int result; old_fs = get_fs(); set_fs(get_ds()); /* The cast to a user pointer is valid due to the set_fs() */ result = vfs_read(file, (void __user *)addr, count, &pos); set_fs(old_fs); return result; } EXPORT_SYMBOL(kernel_read); ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len) { ssize_t res = vfs_read(file, (void __user *)addr, len, &pos); if (res > 0) flush_icache_range(addr, addr + len); return res; } EXPORT_SYMBOL(read_code); static int exec_mmap(struct mm_struct *mm) { struct task_struct *tsk; struct mm_struct *old_mm, *active_mm; /* Notify parent that we're no longer interested in the old VM */ tsk = current; old_mm = current->mm; mm_release(tsk, old_mm); if (old_mm) { sync_mm_rss(old_mm); /* * Make sure that if there is a core dump in progress * for the old mm, we get out and die instead of going * through with the exec. We must hold mmap_sem around * checking core_state and changing tsk->mm. */ down_read(&old_mm->mmap_sem); if (unlikely(old_mm->core_state)) { up_read(&old_mm->mmap_sem); return -EINTR; } } task_lock(tsk); active_mm = tsk->active_mm; tsk->mm = mm; tsk->active_mm = mm; activate_mm(active_mm, mm); tsk->mm->vmacache_seqnum = 0; vmacache_flush(tsk); task_unlock(tsk); if (old_mm) { up_read(&old_mm->mmap_sem); BUG_ON(active_mm != old_mm); setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm); mm_update_next_owner(old_mm); mmput(old_mm); return 0; } mmdrop(active_mm); return 0; } /* * This function makes sure the current process has its own signal table, * so that flush_signal_handlers can later reset the handlers without * disturbing other processes. (Other processes might share the signal * table via the CLONE_SIGHAND option to clone().) */ static int de_thread(struct task_struct *tsk) { struct signal_struct *sig = tsk->signal; struct sighand_struct *oldsighand = tsk->sighand; spinlock_t *lock = &oldsighand->siglock; if (thread_group_empty(tsk)) goto no_thread_group; /* * Kill all other threads in the thread group. */ spin_lock_irq(lock); if (signal_group_exit(sig)) { /* * Another group action in progress, just * return so that the signal is processed. */ spin_unlock_irq(lock); return -EAGAIN; } sig->group_exit_task = tsk; sig->notify_count = zap_other_threads(tsk); if (!thread_group_leader(tsk)) sig->notify_count--; while (sig->notify_count) { __set_current_state(TASK_KILLABLE); spin_unlock_irq(lock); schedule(); if (unlikely(__fatal_signal_pending(tsk))) goto killed; spin_lock_irq(lock); } spin_unlock_irq(lock); /* * At this point all other threads have exited, all we have to * do is to wait for the thread group leader to become inactive, * and to assume its PID: */ if (!thread_group_leader(tsk)) { struct task_struct *leader = tsk->group_leader; for (;;) { threadgroup_change_begin(tsk); write_lock_irq(&tasklist_lock); /* * Do this under tasklist_lock to ensure that * exit_notify() can't miss ->group_exit_task */ sig->notify_count = -1; if (likely(leader->exit_state)) break; __set_current_state(TASK_KILLABLE); write_unlock_irq(&tasklist_lock); threadgroup_change_end(tsk); schedule(); if (unlikely(__fatal_signal_pending(tsk))) goto killed; } /* * The only record we have of the real-time age of a * process, regardless of execs it's done, is start_time. * All the past CPU time is accumulated in signal_struct * from sister threads now dead. But in this non-leader * exec, nothing survives from the original leader thread, * whose birth marks the true age of this process now. * When we take on its identity by switching to its PID, we * also take its birthdate (always earlier than our own). */ tsk->start_time = leader->start_time; tsk->real_start_time = leader->real_start_time; BUG_ON(!same_thread_group(leader, tsk)); BUG_ON(has_group_leader_pid(tsk)); /* * An exec() starts a new thread group with the * TGID of the previous thread group. Rehash the * two threads with a switched PID, and release * the former thread group leader: */ /* Become a process group leader with the old leader's pid. * The old leader becomes a thread of the this thread group. * Note: The old leader also uses this pid until release_task * is called. Odd but simple and correct. */ tsk->pid = leader->pid; change_pid(tsk, PIDTYPE_PID, task_pid(leader)); transfer_pid(leader, tsk, PIDTYPE_PGID); transfer_pid(leader, tsk, PIDTYPE_SID); list_replace_rcu(&leader->tasks, &tsk->tasks); list_replace_init(&leader->sibling, &tsk->sibling); tsk->group_leader = tsk; leader->group_leader = tsk; tsk->exit_signal = SIGCHLD; leader->exit_signal = -1; BUG_ON(leader->exit_state != EXIT_ZOMBIE); leader->exit_state = EXIT_DEAD; /* * We are going to release_task()->ptrace_unlink() silently, * the tracer can sleep in do_wait(). EXIT_DEAD guarantees * the tracer wont't block again waiting for this thread. */ if (unlikely(leader->ptrace)) __wake_up_parent(leader, leader->parent); write_unlock_irq(&tasklist_lock); threadgroup_change_end(tsk); release_task(leader); } sig->group_exit_task = NULL; sig->notify_count = 0; no_thread_group: /* we have changed execution domain */ tsk->exit_signal = SIGCHLD; exit_itimers(sig); flush_itimer_signals(); if (atomic_read(&oldsighand->count) != 1) { struct sighand_struct *newsighand; /* * This ->sighand is shared with the CLONE_SIGHAND * but not CLONE_THREAD task, switch to the new one. */ newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); if (!newsighand) return -ENOMEM; atomic_set(&newsighand->count, 1); memcpy(newsighand->action, oldsighand->action, sizeof(newsighand->action)); write_lock_irq(&tasklist_lock); spin_lock(&oldsighand->siglock); rcu_assign_pointer(tsk->sighand, newsighand); spin_unlock(&oldsighand->siglock); write_unlock_irq(&tasklist_lock); __cleanup_sighand(oldsighand); } BUG_ON(!thread_group_leader(tsk)); return 0; killed: /* protects against exit_notify() and __exit_signal() */ read_lock(&tasklist_lock); sig->group_exit_task = NULL; sig->notify_count = 0; read_unlock(&tasklist_lock); return -EAGAIN; } char *get_task_comm(char *buf, struct task_struct *tsk) { /* buf must be at least sizeof(tsk->comm) in size */ task_lock(tsk); strncpy(buf, tsk->comm, sizeof(tsk->comm)); task_unlock(tsk); return buf; } EXPORT_SYMBOL_GPL(get_task_comm); /* * These functions flushes out all traces of the currently running executable * so that a new one can be started */ void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec) { task_lock(tsk); trace_task_rename(tsk, buf); strlcpy(tsk->comm, buf, sizeof(tsk->comm)); task_unlock(tsk); perf_event_comm(tsk, exec); } int flush_old_exec(struct linux_binprm * bprm) { int retval; /* * Make sure we have a private signal table and that * we are unassociated from the previous thread group. */ retval = de_thread(current); if (retval) goto out; /* * Must be called _before_ exec_mmap() as bprm->mm is * not visibile until then. This also enables the update * to be lockless. */ set_mm_exe_file(bprm->mm, bprm->file); /* * Release all of the old mmap stuff */ acct_arg_size(bprm, 0); retval = exec_mmap(bprm->mm); if (retval) goto out; bprm->mm = NULL; /* We're using it now */ set_fs(USER_DS); current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | PF_NOFREEZE | PF_NO_SETAFFINITY); flush_thread(); current->personality &= ~bprm->per_clear; return 0; out: return retval; } EXPORT_SYMBOL(flush_old_exec); void would_dump(struct linux_binprm *bprm, struct file *file) { if (inode_permission(file_inode(file), MAY_READ) < 0) bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; } EXPORT_SYMBOL(would_dump); void setup_new_exec(struct linux_binprm * bprm) { arch_pick_mmap_layout(current->mm); /* This is the point of no return */ current->sas_ss_sp = current->sas_ss_size = 0; if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid())) set_dumpable(current->mm, SUID_DUMP_USER); else set_dumpable(current->mm, suid_dumpable); perf_event_exec(); __set_task_comm(current, kbasename(bprm->filename), true); /* Set the new mm task size. We have to do that late because it may * depend on TIF_32BIT which is only updated in flush_thread() on * some architectures like powerpc */ current->mm->task_size = TASK_SIZE; /* install the new credentials */ if (!uid_eq(bprm->cred->uid, current_euid()) || !gid_eq(bprm->cred->gid, current_egid())) { current->pdeath_signal = 0; } else { would_dump(bprm, bprm->file); if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP) set_dumpable(current->mm, suid_dumpable); } /* An exec changes our domain. We are no longer part of the thread group */ current->self_exec_id++; flush_signal_handlers(current, 0); do_close_on_exec(current->files); } EXPORT_SYMBOL(setup_new_exec); /* * Prepare credentials and lock ->cred_guard_mutex. * install_exec_creds() commits the new creds and drops the lock. * Or, if exec fails before, free_bprm() should release ->cred and * and unlock. */ int prepare_bprm_creds(struct linux_binprm *bprm) { if (mutex_lock_interruptible(&current->signal->cred_guard_mutex)) return -ERESTARTNOINTR; bprm->cred = prepare_exec_creds(); if (likely(bprm->cred)) return 0; mutex_unlock(&current->signal->cred_guard_mutex); return -ENOMEM; } static void free_bprm(struct linux_binprm *bprm) { free_arg_pages(bprm); if (bprm->cred) { mutex_unlock(&current->signal->cred_guard_mutex); abort_creds(bprm->cred); } if (bprm->file) { allow_write_access(bprm->file); fput(bprm->file); } /* If a binfmt changed the interp, free it. */ if (bprm->interp != bprm->filename) kfree(bprm->interp); kfree(bprm); } int bprm_change_interp(char *interp, struct linux_binprm *bprm) { /* If a binfmt changed the interp, free it first. */ if (bprm->interp != bprm->filename) kfree(bprm->interp); bprm->interp = kstrdup(interp, GFP_KERNEL); if (!bprm->interp) return -ENOMEM; return 0; } EXPORT_SYMBOL(bprm_change_interp); /* * install the new credentials for this executable */ void install_exec_creds(struct linux_binprm *bprm) { security_bprm_committing_creds(bprm); commit_creds(bprm->cred); bprm->cred = NULL; /* * Disable monitoring for regular users * when executing setuid binaries. Must * wait until new credentials are committed * by commit_creds() above */ if (get_dumpable(current->mm) != SUID_DUMP_USER) perf_event_exit_task(current); /* * cred_guard_mutex must be held at least to this point to prevent * ptrace_attach() from altering our determination of the task's * credentials; any time after this it may be unlocked. */ security_bprm_committed_creds(bprm); mutex_unlock(&current->signal->cred_guard_mutex); } EXPORT_SYMBOL(install_exec_creds); /* * determine how safe it is to execute the proposed program * - the caller must hold ->cred_guard_mutex to protect against * PTRACE_ATTACH or seccomp thread-sync */ static void check_unsafe_exec(struct linux_binprm *bprm) { struct task_struct *p = current, *t; unsigned n_fs; if (p->ptrace) { if (p->ptrace & PT_PTRACE_CAP) bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP; else bprm->unsafe |= LSM_UNSAFE_PTRACE; } /* * This isn't strictly necessary, but it makes it harder for LSMs to * mess up. */ if (task_no_new_privs(current)) bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS; t = p; n_fs = 1; spin_lock(&p->fs->lock); rcu_read_lock(); while_each_thread(p, t) { if (t->fs == p->fs) n_fs++; } rcu_read_unlock(); if (p->fs->users > n_fs) bprm->unsafe |= LSM_UNSAFE_SHARE; else p->fs->in_exec = 1; spin_unlock(&p->fs->lock); } static void bprm_fill_uid(struct linux_binprm *bprm) { struct inode *inode; unsigned int mode; kuid_t uid; kgid_t gid; /* clear any previous set[ug]id data from a previous binary */ bprm->cred->euid = current_euid(); bprm->cred->egid = current_egid(); if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) return; if (task_no_new_privs(current)) return; inode = file_inode(bprm->file); mode = READ_ONCE(inode->i_mode); if (!(mode & (S_ISUID|S_ISGID))) return; /* Be careful if suid/sgid is set */ mutex_lock(&inode->i_mutex); /* reload atomically mode/uid/gid now that lock held */ mode = inode->i_mode; uid = inode->i_uid; gid = inode->i_gid; mutex_unlock(&inode->i_mutex); /* We ignore suid/sgid if there are no mappings for them in the ns */ if (!kuid_has_mapping(bprm->cred->user_ns, uid) || !kgid_has_mapping(bprm->cred->user_ns, gid)) return; if (mode & S_ISUID) { bprm->per_clear |= PER_CLEAR_ON_SETID; bprm->cred->euid = uid; } if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) { bprm->per_clear |= PER_CLEAR_ON_SETID; bprm->cred->egid = gid; } } /* * Fill the binprm structure from the inode. * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes * * This may be called multiple times for binary chains (scripts for example). */ int prepare_binprm(struct linux_binprm *bprm) { int retval; bprm_fill_uid(bprm); /* fill in binprm security blob */ retval = security_bprm_set_creds(bprm); if (retval) return retval; bprm->cred_prepared = 1; memset(bprm->buf, 0, BINPRM_BUF_SIZE); return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE); } EXPORT_SYMBOL(prepare_binprm); /* * Arguments are '\0' separated strings found at the location bprm->p * points to; chop off the first by relocating brpm->p to right after * the first '\0' encountered. */ int remove_arg_zero(struct linux_binprm *bprm) { int ret = 0; unsigned long offset; char *kaddr; struct page *page; if (!bprm->argc) return 0; do { offset = bprm->p & ~PAGE_MASK; page = get_arg_page(bprm, bprm->p, 0); if (!page) { ret = -EFAULT; goto out; } kaddr = kmap_atomic(page); for (; offset < PAGE_SIZE && kaddr[offset]; offset++, bprm->p++) ; kunmap_atomic(kaddr); put_arg_page(page); if (offset == PAGE_SIZE) free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1); } while (offset == PAGE_SIZE); bprm->p++; bprm->argc--; ret = 0; out: return ret; } EXPORT_SYMBOL(remove_arg_zero); #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e)) /* * cycle the list of binary formats handler, until one recognizes the image */ int search_binary_handler(struct linux_binprm *bprm) { bool need_retry = IS_ENABLED(CONFIG_MODULES); struct linux_binfmt *fmt; int retval; /* This allows 4 levels of binfmt rewrites before failing hard. */ if (bprm->recursion_depth > 5) return -ELOOP; retval = security_bprm_check(bprm); if (retval) return retval; retval = -ENOENT; retry: read_lock(&binfmt_lock); list_for_each_entry(fmt, &formats, lh) { if (!try_module_get(fmt->module)) continue; read_unlock(&binfmt_lock); bprm->recursion_depth++; retval = fmt->load_binary(bprm); read_lock(&binfmt_lock); put_binfmt(fmt); bprm->recursion_depth--; if (retval < 0 && !bprm->mm) { /* we got to flush_old_exec() and failed after it */ read_unlock(&binfmt_lock); force_sigsegv(SIGSEGV, current); return retval; } if (retval != -ENOEXEC || !bprm->file) { read_unlock(&binfmt_lock); return retval; } } read_unlock(&binfmt_lock); if (need_retry) { if (printable(bprm->buf[0]) && printable(bprm->buf[1]) && printable(bprm->buf[2]) && printable(bprm->buf[3])) return retval; if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0) return retval; need_retry = false; goto retry; } return retval; } EXPORT_SYMBOL(search_binary_handler); static int exec_binprm(struct linux_binprm *bprm) { pid_t old_pid, old_vpid; int ret; /* Need to fetch pid before load_binary changes it */ old_pid = current->pid; rcu_read_lock(); old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent)); rcu_read_unlock(); ret = search_binary_handler(bprm); if (ret >= 0) { audit_bprm(bprm); trace_sched_process_exec(current, old_pid, bprm); ptrace_event(PTRACE_EVENT_EXEC, old_vpid); proc_exec_connector(current); } return ret; } /* * sys_execve() executes a new program. */ static int do_execveat_common(int fd, struct filename *filename, struct user_arg_ptr argv, struct user_arg_ptr envp, int flags) { char *pathbuf = NULL; struct linux_binprm *bprm; struct file *file; struct files_struct *displaced; int retval; if (IS_ERR(filename)) return PTR_ERR(filename); /* * We move the actual failure in case of RLIMIT_NPROC excess from * set*uid() to execve() because too many poorly written programs * don't check setuid() return code. Here we additionally recheck * whether NPROC limit is still exceeded. */ if ((current->flags & PF_NPROC_EXCEEDED) && atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) { retval = -EAGAIN; goto out_ret; } /* We're below the limit (still or again), so we don't want to make * further execve() calls fail. */ current->flags &= ~PF_NPROC_EXCEEDED; retval = unshare_files(&displaced); if (retval) goto out_ret; retval = -ENOMEM; bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); if (!bprm) goto out_files; retval = prepare_bprm_creds(bprm); if (retval) goto out_free; check_unsafe_exec(bprm); current->in_execve = 1; file = do_open_execat(fd, filename, flags); retval = PTR_ERR(file); if (IS_ERR(file)) goto out_unmark; sched_exec(); bprm->file = file; if (fd == AT_FDCWD || filename->name[0] == '/') { bprm->filename = filename->name; } else { if (filename->name[0] == '\0') pathbuf = kasprintf(GFP_TEMPORARY, "/dev/fd/%d", fd); else pathbuf = kasprintf(GFP_TEMPORARY, "/dev/fd/%d/%s", fd, filename->name); if (!pathbuf) { retval = -ENOMEM; goto out_unmark; } /* * Record that a name derived from an O_CLOEXEC fd will be * inaccessible after exec. Relies on having exclusive access to * current->files (due to unshare_files above). */ if (close_on_exec(fd, rcu_dereference_raw(current->files->fdt))) bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE; bprm->filename = pathbuf; } bprm->interp = bprm->filename; retval = bprm_mm_init(bprm); if (retval) goto out_unmark; bprm->argc = count(argv, MAX_ARG_STRINGS); if ((retval = bprm->argc) < 0) goto out; bprm->envc = count(envp, MAX_ARG_STRINGS); if ((retval = bprm->envc) < 0) goto out; retval = prepare_binprm(bprm); if (retval < 0) goto out; retval = copy_strings_kernel(1, &bprm->filename, bprm); if (retval < 0) goto out; bprm->exec = bprm->p; retval = copy_strings(bprm->envc, envp, bprm); if (retval < 0) goto out; retval = copy_strings(bprm->argc, argv, bprm); if (retval < 0) goto out; retval = exec_binprm(bprm); if (retval < 0) goto out; /* execve succeeded */ current->fs->in_exec = 0; current->in_execve = 0; acct_update_integrals(current); task_numa_free(current); free_bprm(bprm); kfree(pathbuf); putname(filename); if (displaced) put_files_struct(displaced); return retval; out: if (bprm->mm) { acct_arg_size(bprm, 0); mmput(bprm->mm); } out_unmark: current->fs->in_exec = 0; current->in_execve = 0; out_free: free_bprm(bprm); kfree(pathbuf); out_files: if (displaced) reset_files_struct(displaced); out_ret: putname(filename); return retval; } int do_execve(struct filename *filename, const char __user *const __user *__argv, const char __user *const __user *__envp) { struct user_arg_ptr argv = { .ptr.native = __argv }; struct user_arg_ptr envp = { .ptr.native = __envp }; return do_execveat_common(AT_FDCWD, filename, argv, envp, 0); } int do_execveat(int fd, struct filename *filename, const char __user *const __user *__argv, const char __user *const __user *__envp, int flags) { struct user_arg_ptr argv = { .ptr.native = __argv }; struct user_arg_ptr envp = { .ptr.native = __envp }; return do_execveat_common(fd, filename, argv, envp, flags); } #ifdef CONFIG_COMPAT static int compat_do_execve(struct filename *filename, const compat_uptr_t __user *__argv, const compat_uptr_t __user *__envp) { struct user_arg_ptr argv = { .is_compat = true, .ptr.compat = __argv, }; struct user_arg_ptr envp = { .is_compat = true, .ptr.compat = __envp, }; return do_execveat_common(AT_FDCWD, filename, argv, envp, 0); } static int compat_do_execveat(int fd, struct filename *filename, const compat_uptr_t __user *__argv, const compat_uptr_t __user *__envp, int flags) { struct user_arg_ptr argv = { .is_compat = true, .ptr.compat = __argv, }; struct user_arg_ptr envp = { .is_compat = true, .ptr.compat = __envp, }; return do_execveat_common(fd, filename, argv, envp, flags); } #endif void set_binfmt(struct linux_binfmt *new) { struct mm_struct *mm = current->mm; if (mm->binfmt) module_put(mm->binfmt->module); mm->binfmt = new; if (new) __module_get(new->module); } EXPORT_SYMBOL(set_binfmt); /* * set_dumpable stores three-value SUID_DUMP_* into mm->flags. */ void set_dumpable(struct mm_struct *mm, int value) { unsigned long old, new; if (WARN_ON((unsigned)value > SUID_DUMP_ROOT)) return; do { old = ACCESS_ONCE(mm->flags); new = (old & ~MMF_DUMPABLE_MASK) | value; } while (cmpxchg(&mm->flags, old, new) != old); } SYSCALL_DEFINE3(execve, const char __user *, filename, const char __user *const __user *, argv, const char __user *const __user *, envp) { return do_execve(getname(filename), argv, envp); } SYSCALL_DEFINE5(execveat, int, fd, const char __user *, filename, const char __user *const __user *, argv, const char __user *const __user *, envp, int, flags) { int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0; return do_execveat(fd, getname_flags(filename, lookup_flags, NULL), argv, envp, flags); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename, const compat_uptr_t __user *, argv, const compat_uptr_t __user *, envp) { return compat_do_execve(getname(filename), argv, envp); } COMPAT_SYSCALL_DEFINE5(execveat, int, fd, const char __user *, filename, const compat_uptr_t __user *, argv, const compat_uptr_t __user *, envp, int, flags) { int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0; return compat_do_execveat(fd, getname_flags(filename, lookup_flags, NULL), argv, envp, flags); } #endif
./CrossVul/dataset_final_sorted/CWE-362/c/good_1595_0
crossvul-cpp_data_bad_5592_0
/* Manage a process's keyrings * * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/keyctl.h> #include <linux/fs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/security.h> #include <linux/user_namespace.h> #include <asm/uaccess.h> #include "internal.h" /* Session keyring create vs join semaphore */ static DEFINE_MUTEX(key_session_mutex); /* User keyring creation semaphore */ static DEFINE_MUTEX(key_user_keyring_mutex); /* The root user's tracking struct */ struct key_user root_key_user = { .usage = ATOMIC_INIT(3), .cons_lock = __MUTEX_INITIALIZER(root_key_user.cons_lock), .lock = __SPIN_LOCK_UNLOCKED(root_key_user.lock), .nkeys = ATOMIC_INIT(2), .nikeys = ATOMIC_INIT(2), .uid = GLOBAL_ROOT_UID, }; /* * Install the user and user session keyrings for the current process's UID. */ int install_user_keyrings(void) { struct user_struct *user; const struct cred *cred; struct key *uid_keyring, *session_keyring; key_perm_t user_keyring_perm; char buf[20]; int ret; uid_t uid; user_keyring_perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL; cred = current_cred(); user = cred->user; uid = from_kuid(cred->user_ns, user->uid); kenter("%p{%u}", user, uid); if (user->uid_keyring) { kleave(" = 0 [exist]"); return 0; } mutex_lock(&key_user_keyring_mutex); ret = 0; if (!user->uid_keyring) { /* get the UID-specific keyring * - there may be one in existence already as it may have been * pinned by a session, but the user_struct pointing to it * may have been destroyed by setuid */ sprintf(buf, "_uid.%u", uid); uid_keyring = find_keyring_by_name(buf, true); if (IS_ERR(uid_keyring)) { uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID, cred, user_keyring_perm, KEY_ALLOC_IN_QUOTA, NULL); if (IS_ERR(uid_keyring)) { ret = PTR_ERR(uid_keyring); goto error; } } /* get a default session keyring (which might also exist * already) */ sprintf(buf, "_uid_ses.%u", uid); session_keyring = find_keyring_by_name(buf, true); if (IS_ERR(session_keyring)) { session_keyring = keyring_alloc(buf, user->uid, INVALID_GID, cred, user_keyring_perm, KEY_ALLOC_IN_QUOTA, NULL); if (IS_ERR(session_keyring)) { ret = PTR_ERR(session_keyring); goto error_release; } /* we install a link from the user session keyring to * the user keyring */ ret = key_link(session_keyring, uid_keyring); if (ret < 0) goto error_release_both; } /* install the keyrings */ user->uid_keyring = uid_keyring; user->session_keyring = session_keyring; } mutex_unlock(&key_user_keyring_mutex); kleave(" = 0"); return 0; error_release_both: key_put(session_keyring); error_release: key_put(uid_keyring); error: mutex_unlock(&key_user_keyring_mutex); kleave(" = %d", ret); return ret; } /* * Install a fresh thread keyring directly to new credentials. This keyring is * allowed to overrun the quota. */ int install_thread_keyring_to_cred(struct cred *new) { struct key *keyring; keyring = keyring_alloc("_tid", new->uid, new->gid, new, KEY_POS_ALL | KEY_USR_VIEW, KEY_ALLOC_QUOTA_OVERRUN, NULL); if (IS_ERR(keyring)) return PTR_ERR(keyring); new->thread_keyring = keyring; return 0; } /* * Install a fresh thread keyring, discarding the old one. */ static int install_thread_keyring(void) { struct cred *new; int ret; new = prepare_creds(); if (!new) return -ENOMEM; BUG_ON(new->thread_keyring); ret = install_thread_keyring_to_cred(new); if (ret < 0) { abort_creds(new); return ret; } return commit_creds(new); } /* * Install a process keyring directly to a credentials struct. * * Returns -EEXIST if there was already a process keyring, 0 if one installed, * and other value on any other error */ int install_process_keyring_to_cred(struct cred *new) { struct key *keyring; if (new->process_keyring) return -EEXIST; keyring = keyring_alloc("_pid", new->uid, new->gid, new, KEY_POS_ALL | KEY_USR_VIEW, KEY_ALLOC_QUOTA_OVERRUN, NULL); if (IS_ERR(keyring)) return PTR_ERR(keyring); new->process_keyring = keyring; return 0; } /* * Make sure a process keyring is installed for the current process. The * existing process keyring is not replaced. * * Returns 0 if there is a process keyring by the end of this function, some * error otherwise. */ static int install_process_keyring(void) { struct cred *new; int ret; new = prepare_creds(); if (!new) return -ENOMEM; ret = install_process_keyring_to_cred(new); if (ret < 0) { abort_creds(new); return ret != -EEXIST ? ret : 0; } return commit_creds(new); } /* * Install a session keyring directly to a credentials struct. */ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring) { unsigned long flags; struct key *old; might_sleep(); /* create an empty session keyring */ if (!keyring) { flags = KEY_ALLOC_QUOTA_OVERRUN; if (cred->session_keyring) flags = KEY_ALLOC_IN_QUOTA; keyring = keyring_alloc("_ses", cred->uid, cred->gid, cred, KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ, flags, NULL); if (IS_ERR(keyring)) return PTR_ERR(keyring); } else { atomic_inc(&keyring->usage); } /* install the keyring */ old = cred->session_keyring; rcu_assign_pointer(cred->session_keyring, keyring); if (old) key_put(old); return 0; } /* * Install a session keyring, discarding the old one. If a keyring is not * supplied, an empty one is invented. */ static int install_session_keyring(struct key *keyring) { struct cred *new; int ret; new = prepare_creds(); if (!new) return -ENOMEM; ret = install_session_keyring_to_cred(new, keyring); if (ret < 0) { abort_creds(new); return ret; } return commit_creds(new); } /* * Handle the fsuid changing. */ void key_fsuid_changed(struct task_struct *tsk) { /* update the ownership of the thread keyring */ BUG_ON(!tsk->cred); if (tsk->cred->thread_keyring) { down_write(&tsk->cred->thread_keyring->sem); tsk->cred->thread_keyring->uid = tsk->cred->fsuid; up_write(&tsk->cred->thread_keyring->sem); } } /* * Handle the fsgid changing. */ void key_fsgid_changed(struct task_struct *tsk) { /* update the ownership of the thread keyring */ BUG_ON(!tsk->cred); if (tsk->cred->thread_keyring) { down_write(&tsk->cred->thread_keyring->sem); tsk->cred->thread_keyring->gid = tsk->cred->fsgid; up_write(&tsk->cred->thread_keyring->sem); } } /* * Search the process keyrings attached to the supplied cred for the first * matching key. * * The search criteria are the type and the match function. The description is * given to the match function as a parameter, but doesn't otherwise influence * the search. Typically the match function will compare the description * parameter to the key's description. * * This can only search keyrings that grant Search permission to the supplied * credentials. Keyrings linked to searched keyrings will also be searched if * they grant Search permission too. Keys can only be found if they grant * Search permission to the credentials. * * Returns a pointer to the key with the key usage count incremented if * successful, -EAGAIN if we didn't find any matching key or -ENOKEY if we only * matched negative keys. * * In the case of a successful return, the possession attribute is set on the * returned key reference. */ key_ref_t search_my_process_keyrings(struct key_type *type, const void *description, key_match_func_t match, bool no_state_check, const struct cred *cred) { key_ref_t key_ref, ret, err; /* we want to return -EAGAIN or -ENOKEY if any of the keyrings were * searchable, but we failed to find a key or we found a negative key; * otherwise we want to return a sample error (probably -EACCES) if * none of the keyrings were searchable * * in terms of priority: success > -ENOKEY > -EAGAIN > other error */ key_ref = NULL; ret = NULL; err = ERR_PTR(-EAGAIN); /* search the thread keyring first */ if (cred->thread_keyring) { key_ref = keyring_search_aux( make_key_ref(cred->thread_keyring, 1), cred, type, description, match, no_state_check); if (!IS_ERR(key_ref)) goto found; switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ case -ENOKEY: /* negative key */ ret = key_ref; break; default: err = key_ref; break; } } /* search the process keyring second */ if (cred->process_keyring) { key_ref = keyring_search_aux( make_key_ref(cred->process_keyring, 1), cred, type, description, match, no_state_check); if (!IS_ERR(key_ref)) goto found; switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ if (ret) break; case -ENOKEY: /* negative key */ ret = key_ref; break; default: err = key_ref; break; } } /* search the session keyring */ if (cred->session_keyring) { rcu_read_lock(); key_ref = keyring_search_aux( make_key_ref(rcu_dereference(cred->session_keyring), 1), cred, type, description, match, no_state_check); rcu_read_unlock(); if (!IS_ERR(key_ref)) goto found; switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ if (ret) break; case -ENOKEY: /* negative key */ ret = key_ref; break; default: err = key_ref; break; } } /* or search the user-session keyring */ else if (cred->user->session_keyring) { key_ref = keyring_search_aux( make_key_ref(cred->user->session_keyring, 1), cred, type, description, match, no_state_check); if (!IS_ERR(key_ref)) goto found; switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ if (ret) break; case -ENOKEY: /* negative key */ ret = key_ref; break; default: err = key_ref; break; } } /* no key - decide on the error we're going to go for */ key_ref = ret ? ret : err; found: return key_ref; } /* * Search the process keyrings attached to the supplied cred for the first * matching key in the manner of search_my_process_keyrings(), but also search * the keys attached to the assumed authorisation key using its credentials if * one is available. * * Return same as search_my_process_keyrings(). */ key_ref_t search_process_keyrings(struct key_type *type, const void *description, key_match_func_t match, const struct cred *cred) { struct request_key_auth *rka; key_ref_t key_ref, ret = ERR_PTR(-EACCES), err; might_sleep(); key_ref = search_my_process_keyrings(type, description, match, false, cred); if (!IS_ERR(key_ref)) goto found; err = key_ref; /* if this process has an instantiation authorisation key, then we also * search the keyrings of the process mentioned there * - we don't permit access to request_key auth keys via this method */ if (cred->request_key_auth && cred == current_cred() && type != &key_type_request_key_auth ) { /* defend against the auth key being revoked */ down_read(&cred->request_key_auth->sem); if (key_validate(cred->request_key_auth) == 0) { rka = cred->request_key_auth->payload.data; key_ref = search_process_keyrings(type, description, match, rka->cred); up_read(&cred->request_key_auth->sem); if (!IS_ERR(key_ref)) goto found; ret = key_ref; } else { up_read(&cred->request_key_auth->sem); } } /* no key - decide on the error we're going to go for */ if (err == ERR_PTR(-ENOKEY) || ret == ERR_PTR(-ENOKEY)) key_ref = ERR_PTR(-ENOKEY); else if (err == ERR_PTR(-EACCES)) key_ref = ret; else key_ref = err; found: return key_ref; } /* * See if the key we're looking at is the target key. */ int lookup_user_key_possessed(const struct key *key, const void *target) { return key == target; } /* * Look up a key ID given us by userspace with a given permissions mask to get * the key it refers to. * * Flags can be passed to request that special keyrings be created if referred * to directly, to permit partially constructed keys to be found and to skip * validity and permission checks on the found key. * * Returns a pointer to the key with an incremented usage count if successful; * -EINVAL if the key ID is invalid; -ENOKEY if the key ID does not correspond * to a key or the best found key was a negative key; -EKEYREVOKED or * -EKEYEXPIRED if the best found key was revoked or expired; -EACCES if the * found key doesn't grant the requested permit or the LSM denied access to it; * or -ENOMEM if a special keyring couldn't be created. * * In the case of a successful return, the possession attribute is set on the * returned key reference. */ key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags, key_perm_t perm) { struct request_key_auth *rka; const struct cred *cred; struct key *key; key_ref_t key_ref, skey_ref; int ret; try_again: cred = get_current_cred(); key_ref = ERR_PTR(-ENOKEY); switch (id) { case KEY_SPEC_THREAD_KEYRING: if (!cred->thread_keyring) { if (!(lflags & KEY_LOOKUP_CREATE)) goto error; ret = install_thread_keyring(); if (ret < 0) { key_ref = ERR_PTR(ret); goto error; } goto reget_creds; } key = cred->thread_keyring; atomic_inc(&key->usage); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_PROCESS_KEYRING: if (!cred->process_keyring) { if (!(lflags & KEY_LOOKUP_CREATE)) goto error; ret = install_process_keyring(); if (ret < 0) { key_ref = ERR_PTR(ret); goto error; } goto reget_creds; } key = cred->process_keyring; atomic_inc(&key->usage); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_SESSION_KEYRING: if (!cred->session_keyring) { /* always install a session keyring upon access if one * doesn't exist yet */ ret = install_user_keyrings(); if (ret < 0) goto error; if (lflags & KEY_LOOKUP_CREATE) ret = join_session_keyring(NULL); else ret = install_session_keyring( cred->user->session_keyring); if (ret < 0) goto error; goto reget_creds; } else if (cred->session_keyring == cred->user->session_keyring && lflags & KEY_LOOKUP_CREATE) { ret = join_session_keyring(NULL); if (ret < 0) goto error; goto reget_creds; } rcu_read_lock(); key = rcu_dereference(cred->session_keyring); atomic_inc(&key->usage); rcu_read_unlock(); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_KEYRING: if (!cred->user->uid_keyring) { ret = install_user_keyrings(); if (ret < 0) goto error; } key = cred->user->uid_keyring; atomic_inc(&key->usage); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_SESSION_KEYRING: if (!cred->user->session_keyring) { ret = install_user_keyrings(); if (ret < 0) goto error; } key = cred->user->session_keyring; atomic_inc(&key->usage); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_GROUP_KEYRING: /* group keyrings are not yet supported */ key_ref = ERR_PTR(-EINVAL); goto error; case KEY_SPEC_REQKEY_AUTH_KEY: key = cred->request_key_auth; if (!key) goto error; atomic_inc(&key->usage); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_REQUESTOR_KEYRING: if (!cred->request_key_auth) goto error; down_read(&cred->request_key_auth->sem); if (test_bit(KEY_FLAG_REVOKED, &cred->request_key_auth->flags)) { key_ref = ERR_PTR(-EKEYREVOKED); key = NULL; } else { rka = cred->request_key_auth->payload.data; key = rka->dest_keyring; atomic_inc(&key->usage); } up_read(&cred->request_key_auth->sem); if (!key) goto error; key_ref = make_key_ref(key, 1); break; default: key_ref = ERR_PTR(-EINVAL); if (id < 1) goto error; key = key_lookup(id); if (IS_ERR(key)) { key_ref = ERR_CAST(key); goto error; } key_ref = make_key_ref(key, 0); /* check to see if we possess the key */ skey_ref = search_process_keyrings(key->type, key, lookup_user_key_possessed, cred); if (!IS_ERR(skey_ref)) { key_put(key); key_ref = skey_ref; } break; } /* unlink does not use the nominated key in any way, so can skip all * the permission checks as it is only concerned with the keyring */ if (lflags & KEY_LOOKUP_FOR_UNLINK) { ret = 0; goto error; } if (!(lflags & KEY_LOOKUP_PARTIAL)) { ret = wait_for_key_construction(key, true); switch (ret) { case -ERESTARTSYS: goto invalid_key; default: if (perm) goto invalid_key; case 0: break; } } else if (perm) { ret = key_validate(key); if (ret < 0) goto invalid_key; } ret = -EIO; if (!(lflags & KEY_LOOKUP_PARTIAL) && !test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) goto invalid_key; /* check the permissions */ ret = key_task_permission(key_ref, cred, perm); if (ret < 0) goto invalid_key; key->last_used_at = current_kernel_time().tv_sec; error: put_cred(cred); return key_ref; invalid_key: key_ref_put(key_ref); key_ref = ERR_PTR(ret); goto error; /* if we attempted to install a keyring, then it may have caused new * creds to be installed */ reget_creds: put_cred(cred); goto try_again; } /* * Join the named keyring as the session keyring if possible else attempt to * create a new one of that name and join that. * * If the name is NULL, an empty anonymous keyring will be installed as the * session keyring. * * Named session keyrings are joined with a semaphore held to prevent the * keyrings from going away whilst the attempt is made to going them and also * to prevent a race in creating compatible session keyrings. */ long join_session_keyring(const char *name) { const struct cred *old; struct cred *new; struct key *keyring; long ret, serial; new = prepare_creds(); if (!new) return -ENOMEM; old = current_cred(); /* if no name is provided, install an anonymous keyring */ if (!name) { ret = install_session_keyring_to_cred(new, NULL); if (ret < 0) goto error; serial = new->session_keyring->serial; ret = commit_creds(new); if (ret == 0) ret = serial; goto okay; } /* allow the user to join or create a named keyring */ mutex_lock(&key_session_mutex); /* look for an existing keyring of this name */ keyring = find_keyring_by_name(name, false); if (PTR_ERR(keyring) == -ENOKEY) { /* not found - try and create a new one */ keyring = keyring_alloc( name, old->uid, old->gid, old, KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_LINK, KEY_ALLOC_IN_QUOTA, NULL); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto error2; } } else if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto error2; } else if (keyring == new->session_keyring) { ret = 0; goto error2; } /* we've got a keyring - now to install it */ ret = install_session_keyring_to_cred(new, keyring); if (ret < 0) goto error2; commit_creds(new); mutex_unlock(&key_session_mutex); ret = keyring->serial; key_put(keyring); okay: return ret; error2: mutex_unlock(&key_session_mutex); error: abort_creds(new); return ret; } /* * Replace a process's session keyring on behalf of one of its children when * the target process is about to resume userspace execution. */ void key_change_session_keyring(struct callback_head *twork) { const struct cred *old = current_cred(); struct cred *new = container_of(twork, struct cred, rcu); if (unlikely(current->flags & PF_EXITING)) { put_cred(new); return; } new-> uid = old-> uid; new-> euid = old-> euid; new-> suid = old-> suid; new->fsuid = old->fsuid; new-> gid = old-> gid; new-> egid = old-> egid; new-> sgid = old-> sgid; new->fsgid = old->fsgid; new->user = get_uid(old->user); new->user_ns = get_user_ns(old->user_ns); new->group_info = get_group_info(old->group_info); new->securebits = old->securebits; new->cap_inheritable = old->cap_inheritable; new->cap_permitted = old->cap_permitted; new->cap_effective = old->cap_effective; new->cap_bset = old->cap_bset; new->jit_keyring = old->jit_keyring; new->thread_keyring = key_get(old->thread_keyring); new->process_keyring = key_get(old->process_keyring); security_transfer_creds(new, old); commit_creds(new); }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_5592_0
crossvul-cpp_data_bad_2290_0
/* * Kernel-based Virtual Machine driver for Linux * * derived from drivers/kvm/kvm_main.c * * Copyright (C) 2006 Qumranet, Inc. * Copyright (C) 2008 Qumranet, Inc. * Copyright IBM Corporation, 2008 * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * Amit Shah <amit.shah@qumranet.com> * Ben-Ami Yassour <benami@il.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include <linux/kvm_host.h> #include "irq.h" #include "mmu.h" #include "i8254.h" #include "tss.h" #include "kvm_cache_regs.h" #include "x86.h" #include "cpuid.h" #include <linux/clocksource.h> #include <linux/interrupt.h> #include <linux/kvm.h> #include <linux/fs.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/mman.h> #include <linux/highmem.h> #include <linux/iommu.h> #include <linux/intel-iommu.h> #include <linux/cpufreq.h> #include <linux/user-return-notifier.h> #include <linux/srcu.h> #include <linux/slab.h> #include <linux/perf_event.h> #include <linux/uaccess.h> #include <linux/hash.h> #include <linux/pci.h> #include <linux/timekeeper_internal.h> #include <linux/pvclock_gtod.h> #include <trace/events/kvm.h> #define CREATE_TRACE_POINTS #include "trace.h" #include <asm/debugreg.h> #include <asm/msr.h> #include <asm/desc.h> #include <asm/mtrr.h> #include <asm/mce.h> #include <asm/i387.h> #include <asm/fpu-internal.h> /* Ugh! */ #include <asm/xcr.h> #include <asm/pvclock.h> #include <asm/div64.h> #define MAX_IO_MSRS 256 #define KVM_MAX_MCE_BANKS 32 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P) #define emul_to_vcpu(ctxt) \ container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) /* EFER defaults: * - enable syscall per default because its emulated by KVM * - enable LME and LMA per default on 64 bit KVM */ #ifdef CONFIG_X86_64 static u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); #else static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); #endif #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU static void update_cr8_intercept(struct kvm_vcpu *vcpu); static void process_nmi(struct kvm_vcpu *vcpu); static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); struct kvm_x86_ops *kvm_x86_ops; EXPORT_SYMBOL_GPL(kvm_x86_ops); static bool ignore_msrs = 0; module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); unsigned int min_timer_period_us = 500; module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); bool kvm_has_tsc_control; EXPORT_SYMBOL_GPL(kvm_has_tsc_control); u32 kvm_max_guest_tsc_khz; EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz); /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ static u32 tsc_tolerance_ppm = 250; module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); static bool backwards_tsc_observed = false; #define KVM_NR_SHARED_MSRS 16 struct kvm_shared_msrs_global { int nr; u32 msrs[KVM_NR_SHARED_MSRS]; }; struct kvm_shared_msrs { struct user_return_notifier urn; bool registered; struct kvm_shared_msr_values { u64 host; u64 curr; } values[KVM_NR_SHARED_MSRS]; }; static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; static struct kvm_shared_msrs __percpu *shared_msrs; struct kvm_stats_debugfs_item debugfs_entries[] = { { "pf_fixed", VCPU_STAT(pf_fixed) }, { "pf_guest", VCPU_STAT(pf_guest) }, { "tlb_flush", VCPU_STAT(tlb_flush) }, { "invlpg", VCPU_STAT(invlpg) }, { "exits", VCPU_STAT(exits) }, { "io_exits", VCPU_STAT(io_exits) }, { "mmio_exits", VCPU_STAT(mmio_exits) }, { "signal_exits", VCPU_STAT(signal_exits) }, { "irq_window", VCPU_STAT(irq_window_exits) }, { "nmi_window", VCPU_STAT(nmi_window_exits) }, { "halt_exits", VCPU_STAT(halt_exits) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "hypercalls", VCPU_STAT(hypercalls) }, { "request_irq", VCPU_STAT(request_irq_exits) }, { "irq_exits", VCPU_STAT(irq_exits) }, { "host_state_reload", VCPU_STAT(host_state_reload) }, { "efer_reload", VCPU_STAT(efer_reload) }, { "fpu_reload", VCPU_STAT(fpu_reload) }, { "insn_emulation", VCPU_STAT(insn_emulation) }, { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, { "irq_injections", VCPU_STAT(irq_injections) }, { "nmi_injections", VCPU_STAT(nmi_injections) }, { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, { "mmu_pte_write", VM_STAT(mmu_pte_write) }, { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) }, { "mmu_flooded", VM_STAT(mmu_flooded) }, { "mmu_recycled", VM_STAT(mmu_recycled) }, { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, { "mmu_unsync", VM_STAT(mmu_unsync) }, { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, { "largepages", VM_STAT(lpages) }, { NULL } }; u64 __read_mostly host_xcr0; static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) { int i; for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++) vcpu->arch.apf.gfns[i] = ~0; } static void kvm_on_user_return(struct user_return_notifier *urn) { unsigned slot; struct kvm_shared_msrs *locals = container_of(urn, struct kvm_shared_msrs, urn); struct kvm_shared_msr_values *values; for (slot = 0; slot < shared_msrs_global.nr; ++slot) { values = &locals->values[slot]; if (values->host != values->curr) { wrmsrl(shared_msrs_global.msrs[slot], values->host); values->curr = values->host; } } locals->registered = false; user_return_notifier_unregister(urn); } static void shared_msr_update(unsigned slot, u32 msr) { u64 value; unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); /* only read, and nobody should modify it at this time, * so don't need lock */ if (slot >= shared_msrs_global.nr) { printk(KERN_ERR "kvm: invalid MSR slot!"); return; } rdmsrl_safe(msr, &value); smsr->values[slot].host = value; smsr->values[slot].curr = value; } void kvm_define_shared_msr(unsigned slot, u32 msr) { BUG_ON(slot >= KVM_NR_SHARED_MSRS); if (slot >= shared_msrs_global.nr) shared_msrs_global.nr = slot + 1; shared_msrs_global.msrs[slot] = msr; /* we need ensured the shared_msr_global have been updated */ smp_wmb(); } EXPORT_SYMBOL_GPL(kvm_define_shared_msr); static void kvm_shared_msr_cpu_online(void) { unsigned i; for (i = 0; i < shared_msrs_global.nr; ++i) shared_msr_update(i, shared_msrs_global.msrs[i]); } void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) { unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); if (((value ^ smsr->values[slot].curr) & mask) == 0) return; smsr->values[slot].curr = value; wrmsrl(shared_msrs_global.msrs[slot], value); if (!smsr->registered) { smsr->urn.on_user_return = kvm_on_user_return; user_return_notifier_register(&smsr->urn); smsr->registered = true; } } EXPORT_SYMBOL_GPL(kvm_set_shared_msr); static void drop_user_return_notifiers(void) { unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); if (smsr->registered) kvm_on_user_return(&smsr->urn); } u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) { return vcpu->arch.apic_base; } EXPORT_SYMBOL_GPL(kvm_get_apic_base); int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { u64 old_state = vcpu->arch.apic_base & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); u64 new_state = msr_info->data & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE); if (!msr_info->host_initiated && ((msr_info->data & reserved_bits) != 0 || new_state == X2APIC_ENABLE || (new_state == MSR_IA32_APICBASE_ENABLE && old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) || (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) && old_state == 0))) return 1; kvm_lapic_set_base(vcpu, msr_info->data); return 0; } EXPORT_SYMBOL_GPL(kvm_set_apic_base); asmlinkage __visible void kvm_spurious_fault(void) { /* Fault while not rebooting. We want the trace. */ BUG(); } EXPORT_SYMBOL_GPL(kvm_spurious_fault); #define EXCPT_BENIGN 0 #define EXCPT_CONTRIBUTORY 1 #define EXCPT_PF 2 static int exception_class(int vector) { switch (vector) { case PF_VECTOR: return EXCPT_PF; case DE_VECTOR: case TS_VECTOR: case NP_VECTOR: case SS_VECTOR: case GP_VECTOR: return EXCPT_CONTRIBUTORY; default: break; } return EXCPT_BENIGN; } #define EXCPT_FAULT 0 #define EXCPT_TRAP 1 #define EXCPT_ABORT 2 #define EXCPT_INTERRUPT 3 static int exception_type(int vector) { unsigned int mask; if (WARN_ON(vector > 31 || vector == NMI_VECTOR)) return EXCPT_INTERRUPT; mask = 1 << vector; /* #DB is trap, as instruction watchpoints are handled elsewhere */ if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR))) return EXCPT_TRAP; if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR))) return EXCPT_ABORT; /* Reserved exceptions will result in fault */ return EXCPT_FAULT; } static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error, u32 error_code, bool reinject) { u32 prev_nr; int class1, class2; kvm_make_request(KVM_REQ_EVENT, vcpu); if (!vcpu->arch.exception.pending) { queue: vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = has_error; vcpu->arch.exception.nr = nr; vcpu->arch.exception.error_code = error_code; vcpu->arch.exception.reinject = reinject; return; } /* to check exception */ prev_nr = vcpu->arch.exception.nr; if (prev_nr == DF_VECTOR) { /* triple fault -> shutdown */ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } class1 = exception_class(prev_nr); class2 = exception_class(nr); if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { /* generate double fault per SDM Table 5-5 */ vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = true; vcpu->arch.exception.nr = DF_VECTOR; vcpu->arch.exception.error_code = 0; } else /* replace previous exception with a new one in a hope that instruction re-execution will regenerate lost exception */ goto queue; } void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception); void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception); void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) { if (err) kvm_inject_gp(vcpu, 0); else kvm_x86_ops->skip_emulated_instruction(vcpu); } EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { ++vcpu->stat.pf_guest; vcpu->arch.cr2 = fault->address; kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); } EXPORT_SYMBOL_GPL(kvm_inject_page_fault); static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { if (mmu_is_nested(vcpu) && !fault->nested_page_fault) vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); else vcpu->arch.mmu.inject_page_fault(vcpu, fault); return fault->nested_page_fault; } void kvm_inject_nmi(struct kvm_vcpu *vcpu) { atomic_inc(&vcpu->arch.nmi_queued); kvm_make_request(KVM_REQ_NMI, vcpu); } EXPORT_SYMBOL_GPL(kvm_inject_nmi); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception_e); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); /* * Checks if cpl <= required_cpl; if true, return true. Otherwise queue * a #GP and return false. */ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) { if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) return true; kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return false; } EXPORT_SYMBOL_GPL(kvm_require_cpl); /* * This function will be used to read from the physical memory of the currently * running guest. The difference to kvm_read_guest_page is that this function * can read from guest physical or from the guest's guest physical memory. */ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gfn_t ngfn, void *data, int offset, int len, u32 access) { struct x86_exception exception; gfn_t real_gfn; gpa_t ngpa; ngpa = gfn_to_gpa(ngfn); real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception); if (real_gfn == UNMAPPED_GVA) return -EFAULT; real_gfn = gpa_to_gfn(real_gfn); return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len); } EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len, u32 access) { return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, data, offset, len, access); } /* * Load the pae pdptrs. Return true is they are all valid. */ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) { gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; int i; int ret; u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, offset * sizeof(u64), sizeof(pdpte), PFERR_USER_MASK|PFERR_WRITE_MASK); if (ret < 0) { ret = 0; goto out; } for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { if (is_present_gpte(pdpte[i]) && (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { ret = 0; goto out; } } ret = 1; memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty); out: return ret; } EXPORT_SYMBOL_GPL(load_pdptrs); static bool pdptrs_changed(struct kvm_vcpu *vcpu) { u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; bool changed = true; int offset; gfn_t gfn; int r; if (is_long_mode(vcpu) || !is_pae(vcpu)) return false; if (!test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail)) return true; gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1); r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), PFERR_USER_MASK | PFERR_WRITE_MASK); if (r < 0) goto out; changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; out: return changed; } int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { unsigned long old_cr0 = kvm_read_cr0(vcpu); unsigned long update_bits = X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW; cr0 |= X86_CR0_ET; #ifdef CONFIG_X86_64 if (cr0 & 0xffffffff00000000UL) return 1; #endif cr0 &= ~CR0_RESERVED_BITS; if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) return 1; if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) return 1; if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { #ifdef CONFIG_X86_64 if ((vcpu->arch.efer & EFER_LME)) { int cs_db, cs_l; if (!is_pae(vcpu)) return 1; kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); if (cs_l) return 1; } else #endif if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; } if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) return 1; kvm_x86_ops->set_cr0(vcpu, cr0); if ((cr0 ^ old_cr0) & X86_CR0_PG) { kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); } if ((cr0 ^ old_cr0) & update_bits) kvm_mmu_reset_context(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr0); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) { (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); } EXPORT_SYMBOL_GPL(kvm_lmsw); static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) { if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && !vcpu->guest_xcr0_loaded) { /* kvm_set_xcr() also depends on this */ xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); vcpu->guest_xcr0_loaded = 1; } } static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) { if (vcpu->guest_xcr0_loaded) { if (vcpu->arch.xcr0 != host_xcr0) xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); vcpu->guest_xcr0_loaded = 0; } } int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { u64 xcr0 = xcr; u64 old_xcr0 = vcpu->arch.xcr0; u64 valid_bits; /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ if (index != XCR_XFEATURE_ENABLED_MASK) return 1; if (!(xcr0 & XSTATE_FP)) return 1; if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) return 1; /* * Do not allow the guest to set bits that we do not support * saving. However, xcr0 bit 0 is always set, even if the * emulated CPU does not support XSAVE (see fx_init). */ valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP; if (xcr0 & ~valid_bits) return 1; if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR))) return 1; kvm_put_guest_xcr0(vcpu); vcpu->arch.xcr0 = xcr0; if ((xcr0 ^ old_xcr0) & XSTATE_EXTEND_MASK) kvm_update_cpuid(vcpu); return 0; } int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { if (kvm_x86_ops->get_cpl(vcpu) != 0 || __kvm_set_xcr(vcpu, index, xcr)) { kvm_inject_gp(vcpu, 0); return 1; } return 0; } EXPORT_SYMBOL_GPL(kvm_set_xcr); int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP; if (cr4 & CR4_RESERVED_BITS) return 1; if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) return 1; if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) return 1; if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP)) return 1; if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) return 1; if (is_long_mode(vcpu)) { if (!(cr4 & X86_CR4_PAE)) return 1; } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) && ((cr4 ^ old_cr4) & pdptr_bits) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { if (!guest_cpuid_has_pcid(vcpu)) return 1; /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) return 1; } if (kvm_x86_ops->set_cr4(vcpu, cr4)) return 1; if (((cr4 ^ old_cr4) & pdptr_bits) || (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) kvm_mmu_reset_context(vcpu); if ((cr4 ^ old_cr4) & X86_CR4_SMAP) update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false); if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) kvm_update_cpuid(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr4); int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { kvm_mmu_sync_roots(vcpu); kvm_mmu_flush_tlb(vcpu); return 0; } if (is_long_mode(vcpu)) { if (cr3 & CR3_L_MODE_RESERVED_BITS) return 1; } else if (is_pae(vcpu) && is_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) return 1; vcpu->arch.cr3 = cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); kvm_mmu_new_cr3(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr3); int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) { if (cr8 & CR8_RESERVED_BITS) return 1; if (irqchip_in_kernel(vcpu->kvm)) kvm_lapic_set_tpr(vcpu, cr8); else vcpu->arch.cr8 = cr8; return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr8); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) { if (irqchip_in_kernel(vcpu->kvm)) return kvm_lapic_get_cr8(vcpu); else return vcpu->arch.cr8; } EXPORT_SYMBOL_GPL(kvm_get_cr8); static void kvm_update_dr6(struct kvm_vcpu *vcpu) { if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); } static void kvm_update_dr7(struct kvm_vcpu *vcpu) { unsigned long dr7; if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) dr7 = vcpu->arch.guest_debug_dr7; else dr7 = vcpu->arch.dr7; kvm_x86_ops->set_dr7(vcpu, dr7); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; if (dr7 & DR7_BP_EN_MASK) vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; } static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) { u64 fixed = DR6_FIXED_1; if (!guest_cpuid_has_rtm(vcpu)) fixed |= DR6_RTM; return fixed; } static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { switch (dr) { case 0 ... 3: vcpu->arch.db[dr] = val; if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) vcpu->arch.eff_db[dr] = val; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* #UD */ /* fall through */ case 6: if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); kvm_update_dr6(vcpu); break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* #UD */ /* fall through */ default: /* 7 */ if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; kvm_update_dr7(vcpu); break; } return 0; } int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { int res; res = __kvm_set_dr(vcpu, dr, val); if (res > 0) kvm_queue_exception(vcpu, UD_VECTOR); else if (res < 0) kvm_inject_gp(vcpu, 0); return res; } EXPORT_SYMBOL_GPL(kvm_set_dr); static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { switch (dr) { case 0 ... 3: *val = vcpu->arch.db[dr]; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ case 6: if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) *val = vcpu->arch.dr6; else *val = kvm_x86_ops->get_dr6(vcpu); break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ default: /* 7 */ *val = vcpu->arch.dr7; break; } return 0; } int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { if (_kvm_get_dr(vcpu, dr, val)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } return 0; } EXPORT_SYMBOL_GPL(kvm_get_dr); bool kvm_rdpmc(struct kvm_vcpu *vcpu) { u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); u64 data; int err; err = kvm_pmu_read_pmc(vcpu, ecx, &data); if (err) return err; kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data); kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32); return err; } EXPORT_SYMBOL_GPL(kvm_rdpmc); /* * List of msr numbers which we expose to userspace through KVM_GET_MSRS * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. * * This list is modified at module load time to reflect the * capabilities of the host cpu. This capabilities test skips MSRs that are * kvm-specific. Those are put in the beginning of the list. */ #define KVM_SAVE_MSRS_BEGIN 12 static u32 msrs_to_save[] = { MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, MSR_KVM_PV_EOI_EN, MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_STAR, #ifdef CONFIG_X86_64 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS }; static unsigned num_msrs_to_save; static const u32 emulated_msrs[] = { MSR_IA32_TSC_ADJUST, MSR_IA32_TSCDEADLINE, MSR_IA32_MISC_ENABLE, MSR_IA32_MCG_STATUS, MSR_IA32_MCG_CTL, }; bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) { if (efer & efer_reserved_bits) return false; if (efer & EFER_FFXSR) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) return false; } if (efer & EFER_SVME) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) return false; } return true; } EXPORT_SYMBOL_GPL(kvm_valid_efer); static int set_efer(struct kvm_vcpu *vcpu, u64 efer) { u64 old_efer = vcpu->arch.efer; if (!kvm_valid_efer(vcpu, efer)) return 1; if (is_paging(vcpu) && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) return 1; efer &= ~EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA; kvm_x86_ops->set_efer(vcpu, efer); /* Update reserved bits */ if ((efer ^ old_efer) & EFER_NX) kvm_mmu_reset_context(vcpu); return 0; } void kvm_enable_efer_bits(u64 mask) { efer_reserved_bits &= ~mask; } EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); /* * Writes msr value into into the appropriate "register". * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return kvm_x86_ops->set_msr(vcpu, msr); } /* * Adapt set_msr() to msr_io()'s calling convention */ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { struct msr_data msr; msr.data = *data; msr.index = index; msr.host_initiated = true; return kvm_set_msr(vcpu, &msr); } #ifdef CONFIG_X86_64 struct pvclock_gtod_data { seqcount_t seq; struct { /* extract of a clocksource struct */ int vclock_mode; cycle_t cycle_last; cycle_t mask; u32 mult; u32 shift; } clock; u64 boot_ns; u64 nsec_base; }; static struct pvclock_gtod_data pvclock_gtod_data; static void update_pvclock_gtod(struct timekeeper *tk) { struct pvclock_gtod_data *vdata = &pvclock_gtod_data; u64 boot_ns; boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot)); write_seqcount_begin(&vdata->seq); /* copy pvclock gtod data */ vdata->clock.vclock_mode = tk->tkr.clock->archdata.vclock_mode; vdata->clock.cycle_last = tk->tkr.cycle_last; vdata->clock.mask = tk->tkr.mask; vdata->clock.mult = tk->tkr.mult; vdata->clock.shift = tk->tkr.shift; vdata->boot_ns = boot_ns; vdata->nsec_base = tk->tkr.xtime_nsec; write_seqcount_end(&vdata->seq); } #endif static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) { int version; int r; struct pvclock_wall_clock wc; struct timespec boot; if (!wall_clock) return; r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); if (r) return; if (version & 1) ++version; /* first time write, random junk */ ++version; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); /* * The guest calculates current wall clock time by adding * system time (updated by kvm_guest_time_update below) to the * wall clock specified here. guest system time equals host * system time for us, thus we must fill in host boot time here. */ getboottime(&boot); if (kvm->arch.kvmclock_offset) { struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); boot = timespec_sub(boot, ts); } wc.sec = boot.tv_sec; wc.nsec = boot.tv_nsec; wc.version = version; kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); version++; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); } static uint32_t div_frac(uint32_t dividend, uint32_t divisor) { uint32_t quotient, remainder; /* Don't try to replace with do_div(), this one calculates * "(dividend << 32) / divisor" */ __asm__ ( "divl %4" : "=a" (quotient), "=d" (remainder) : "0" (0), "1" (dividend), "r" (divisor) ); return quotient; } static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, s8 *pshift, u32 *pmultiplier) { uint64_t scaled64; int32_t shift = 0; uint64_t tps64; uint32_t tps32; tps64 = base_khz * 1000LL; scaled64 = scaled_khz * 1000LL; while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { tps64 >>= 1; shift--; } tps32 = (uint32_t)tps64; while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) scaled64 >>= 1; else tps32 <<= 1; shift++; } *pshift = shift; *pmultiplier = div_frac(scaled64, tps32); pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n", __func__, base_khz, scaled_khz, shift, *pmultiplier); } static inline u64 get_kernel_ns(void) { return ktime_get_boot_ns(); } #ifdef CONFIG_X86_64 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); #endif static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); unsigned long max_tsc_khz; static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) { return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, vcpu->arch.virtual_tsc_shift); } static u32 adjust_tsc_khz(u32 khz, s32 ppm) { u64 v = (u64)khz * (1000000 + ppm); do_div(v, 1000000); return v; } static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz) { u32 thresh_lo, thresh_hi; int use_scaling = 0; /* tsc_khz can be zero if TSC calibration fails */ if (this_tsc_khz == 0) return; /* Compute a scale to convert nanoseconds in TSC cycles */ kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000, &vcpu->arch.virtual_tsc_shift, &vcpu->arch.virtual_tsc_mult); vcpu->arch.virtual_tsc_khz = this_tsc_khz; /* * Compute the variation in TSC rate which is acceptable * within the range of tolerance and decide if the * rate being applied is within that bounds of the hardware * rate. If so, no scaling or compensation need be done. */ thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) { pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi); use_scaling = 1; } kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling); } static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) { u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, vcpu->arch.virtual_tsc_mult, vcpu->arch.virtual_tsc_shift); tsc += vcpu->arch.this_tsc_write; return tsc; } void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 bool vcpus_matched; bool do_request = false; struct kvm_arch *ka = &vcpu->kvm->arch; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == atomic_read(&vcpu->kvm->online_vcpus)); if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC) if (!ka->use_master_clock) do_request = 1; if (!vcpus_matched && ka->use_master_clock) do_request = 1; if (do_request) kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, atomic_read(&vcpu->kvm->online_vcpus), ka->use_master_clock, gtod->clock.vclock_mode); #endif } static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) { u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu); vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; } void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct kvm *kvm = vcpu->kvm; u64 offset, ns, elapsed; unsigned long flags; s64 usdiff; bool matched; bool already_matched; u64 data = msr->data; raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); ns = get_kernel_ns(); elapsed = ns - kvm->arch.last_tsc_nsec; if (vcpu->arch.virtual_tsc_khz) { int faulted = 0; /* n.b - signed multiplication and division required */ usdiff = data - kvm->arch.last_tsc_write; #ifdef CONFIG_X86_64 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; #else /* do_div() only does unsigned */ asm("1: idivl %[divisor]\n" "2: xor %%edx, %%edx\n" " movl $0, %[faulted]\n" "3:\n" ".section .fixup,\"ax\"\n" "4: movl $1, %[faulted]\n" " jmp 3b\n" ".previous\n" _ASM_EXTABLE(1b, 4b) : "=A"(usdiff), [faulted] "=r" (faulted) : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz)); #endif do_div(elapsed, 1000); usdiff -= elapsed; if (usdiff < 0) usdiff = -usdiff; /* idivl overflow => difference is larger than USEC_PER_SEC */ if (faulted) usdiff = USEC_PER_SEC; } else usdiff = USEC_PER_SEC; /* disable TSC match window below */ /* * Special case: TSC write with a small delta (1 second) of virtual * cycle time against real time is interpreted as an attempt to * synchronize the CPU. * * For a reliable TSC, we can match TSC offsets, and for an unstable * TSC, we add elapsed time in this computation. We could let the * compensation code attempt to catch up if we fall behind, but * it's better to try to match offsets from the beginning. */ if (usdiff < USEC_PER_SEC && vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { if (!check_tsc_unstable()) { offset = kvm->arch.cur_tsc_offset; pr_debug("kvm: matched tsc offset for %llu\n", data); } else { u64 delta = nsec_to_cycles(vcpu, elapsed); data += delta; offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); pr_debug("kvm: adjusted tsc offset by %llu\n", delta); } matched = true; already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); } else { /* * We split periods of matched TSC writes into generations. * For each generation, we track the original measured * nanosecond time, offset, and write, so if TSCs are in * sync, we can match exact offset, and if not, we can match * exact software computation in compute_guest_tsc() * * These values are tracked in kvm->arch.cur_xxx variables. */ kvm->arch.cur_tsc_generation++; kvm->arch.cur_tsc_nsec = ns; kvm->arch.cur_tsc_write = data; kvm->arch.cur_tsc_offset = offset; matched = false; pr_debug("kvm: new tsc generation %llu, clock %llu\n", kvm->arch.cur_tsc_generation, data); } /* * We also track th most recent recorded KHZ, write and time to * allow the matching interval to be extended at each write. */ kvm->arch.last_tsc_nsec = ns; kvm->arch.last_tsc_write = data; kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; vcpu->arch.last_guest_tsc = data; /* Keep track of which generation this VCPU has synchronized to */ vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated) update_ia32_tsc_adjust_msr(vcpu, offset); kvm_x86_ops->write_tsc_offset(vcpu, offset); raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); spin_lock(&kvm->arch.pvclock_gtod_sync_lock); if (!matched) { kvm->arch.nr_vcpus_matched_tsc = 0; } else if (!already_matched) { kvm->arch.nr_vcpus_matched_tsc++; } kvm_track_tsc_matching(vcpu); spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); } EXPORT_SYMBOL_GPL(kvm_write_tsc); #ifdef CONFIG_X86_64 static cycle_t read_tsc(void) { cycle_t ret; u64 last; /* * Empirically, a fence (of type that depends on the CPU) * before rdtsc is enough to ensure that rdtsc is ordered * with respect to loads. The various CPU manuals are unclear * as to whether rdtsc can be reordered with later loads, * but no one has ever seen it happen. */ rdtsc_barrier(); ret = (cycle_t)vget_cycles(); last = pvclock_gtod_data.clock.cycle_last; if (likely(ret >= last)) return ret; /* * GCC likes to generate cmov here, but this branch is extremely * predictable (it's just a funciton of time and the likely is * very likely) and there's a data dependence, so force GCC * to generate a branch instead. I don't barrier() because * we don't actually need a barrier, and if this function * ever gets inlined it will generate worse code. */ asm volatile (""); return last; } static inline u64 vgettsc(cycle_t *cycle_now) { long v; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; *cycle_now = read_tsc(); v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask; return v * gtod->clock.mult; } static int do_monotonic_boot(s64 *t, cycle_t *cycle_now) { struct pvclock_gtod_data *gtod = &pvclock_gtod_data; unsigned long seq; int mode; u64 ns; do { seq = read_seqcount_begin(&gtod->seq); mode = gtod->clock.vclock_mode; ns = gtod->nsec_base; ns += vgettsc(cycle_now); ns >>= gtod->clock.shift; ns += gtod->boot_ns; } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); *t = ns; return mode; } /* returns true if host is using tsc clocksource */ static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) { /* checked again under seqlock below */ if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) return false; return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC; } #endif /* * * Assuming a stable TSC across physical CPUS, and a stable TSC * across virtual CPUs, the following condition is possible. * Each numbered line represents an event visible to both * CPUs at the next numbered event. * * "timespecX" represents host monotonic time. "tscX" represents * RDTSC value. * * VCPU0 on CPU0 | VCPU1 on CPU1 * * 1. read timespec0,tsc0 * 2. | timespec1 = timespec0 + N * | tsc1 = tsc0 + M * 3. transition to guest | transition to guest * 4. ret0 = timespec0 + (rdtsc - tsc0) | * 5. | ret1 = timespec1 + (rdtsc - tsc1) * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M)) * * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity: * * - ret0 < ret1 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M)) * ... * - 0 < N - M => M < N * * That is, when timespec0 != timespec1, M < N. Unfortunately that is not * always the case (the difference between two distinct xtime instances * might be smaller then the difference between corresponding TSC reads, * when updating guest vcpus pvclock areas). * * To avoid that problem, do not allow visibility of distinct * system_timestamp/tsc_timestamp values simultaneously: use a master * copy of host monotonic time values. Update that master copy * in lockstep. * * Rely on synchronization of host TSCs and guest TSCs for monotonicity. * */ static void pvclock_update_vm_gtod_copy(struct kvm *kvm) { #ifdef CONFIG_X86_64 struct kvm_arch *ka = &kvm->arch; int vclock_mode; bool host_tsc_clocksource, vcpus_matched; vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == atomic_read(&kvm->online_vcpus)); /* * If the host uses TSC clock, then passthrough TSC as stable * to the guest. */ host_tsc_clocksource = kvm_get_time_and_clockread( &ka->master_kernel_ns, &ka->master_cycle_now); ka->use_master_clock = host_tsc_clocksource && vcpus_matched && !backwards_tsc_observed; if (ka->use_master_clock) atomic_set(&kvm_guest_has_master_clock, 1); vclock_mode = pvclock_gtod_data.clock.vclock_mode; trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, vcpus_matched); #endif } static void kvm_gen_update_masterclock(struct kvm *kvm) { #ifdef CONFIG_X86_64 int i; struct kvm_vcpu *vcpu; struct kvm_arch *ka = &kvm->arch; spin_lock(&ka->pvclock_gtod_sync_lock); kvm_make_mclock_inprogress_request(kvm); /* no guest entries from this point */ pvclock_update_vm_gtod_copy(kvm); kvm_for_each_vcpu(i, vcpu, kvm) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); /* guest entries allowed */ kvm_for_each_vcpu(i, vcpu, kvm) clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests); spin_unlock(&ka->pvclock_gtod_sync_lock); #endif } static int kvm_guest_time_update(struct kvm_vcpu *v) { unsigned long flags, this_tsc_khz; struct kvm_vcpu_arch *vcpu = &v->arch; struct kvm_arch *ka = &v->kvm->arch; s64 kernel_ns; u64 tsc_timestamp, host_tsc; struct pvclock_vcpu_time_info guest_hv_clock; u8 pvclock_flags; bool use_master_clock; kernel_ns = 0; host_tsc = 0; /* * If the host uses TSC clock, then passthrough TSC as stable * to the guest. */ spin_lock(&ka->pvclock_gtod_sync_lock); use_master_clock = ka->use_master_clock; if (use_master_clock) { host_tsc = ka->master_cycle_now; kernel_ns = ka->master_kernel_ns; } spin_unlock(&ka->pvclock_gtod_sync_lock); /* Keep irq disabled to prevent changes to the clock */ local_irq_save(flags); this_tsc_khz = __get_cpu_var(cpu_tsc_khz); if (unlikely(this_tsc_khz == 0)) { local_irq_restore(flags); kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); return 1; } if (!use_master_clock) { host_tsc = native_read_tsc(); kernel_ns = get_kernel_ns(); } tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc); /* * We may have to catch up the TSC to match elapsed wall clock * time for two reasons, even if kvmclock is used. * 1) CPU could have been running below the maximum TSC rate * 2) Broken TSC compensation resets the base at each VCPU * entry to avoid unknown leaps of TSC even when running * again on the same CPU. This may cause apparent elapsed * time to disappear, and the guest to stand still or run * very slowly. */ if (vcpu->tsc_catchup) { u64 tsc = compute_guest_tsc(v, kernel_ns); if (tsc > tsc_timestamp) { adjust_tsc_offset_guest(v, tsc - tsc_timestamp); tsc_timestamp = tsc; } } local_irq_restore(flags); if (!vcpu->pv_time_enabled) return 0; if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) { kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz, &vcpu->hv_clock.tsc_shift, &vcpu->hv_clock.tsc_to_system_mul); vcpu->hw_tsc_khz = this_tsc_khz; } /* With all the info we got, fill in the values */ vcpu->hv_clock.tsc_timestamp = tsc_timestamp; vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; vcpu->last_guest_tsc = tsc_timestamp; /* * The interface expects us to write an even number signaling that the * update is finished. Since the guest won't see the intermediate * state, we just increase by 2 at the end. */ vcpu->hv_clock.version += 2; if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, &guest_hv_clock, sizeof(guest_hv_clock)))) return 0; /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); if (vcpu->pvclock_set_guest_stopped_request) { pvclock_flags |= PVCLOCK_GUEST_STOPPED; vcpu->pvclock_set_guest_stopped_request = false; } /* If the host uses TSC clocksource, then it is stable */ if (use_master_clock) pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; vcpu->hv_clock.flags = pvclock_flags; kvm_write_guest_cached(v->kvm, &vcpu->pv_time, &vcpu->hv_clock, sizeof(vcpu->hv_clock)); return 0; } /* * kvmclock updates which are isolated to a given vcpu, such as * vcpu->cpu migration, should not allow system_timestamp from * the rest of the vcpus to remain static. Otherwise ntp frequency * correction applies to one vcpu's system_timestamp but not * the others. * * So in those cases, request a kvmclock update for all vcpus. * We need to rate-limit these requests though, as they can * considerably slow guests that have a large number of vcpus. * The time for a remote vcpu to update its kvmclock is bound * by the delay we use to rate-limit the updates. */ #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100) static void kvmclock_update_fn(struct work_struct *work) { int i; struct delayed_work *dwork = to_delayed_work(work); struct kvm_arch *ka = container_of(dwork, struct kvm_arch, kvmclock_update_work); struct kvm *kvm = container_of(ka, struct kvm, arch); struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); kvm_vcpu_kick(vcpu); } } static void kvm_gen_kvmclock_update(struct kvm_vcpu *v) { struct kvm *kvm = v->kvm; kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); schedule_delayed_work(&kvm->arch.kvmclock_update_work, KVMCLOCK_UPDATE_DELAY); } #define KVMCLOCK_SYNC_PERIOD (300 * HZ) static void kvmclock_sync_fn(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct kvm_arch *ka = container_of(dwork, struct kvm_arch, kvmclock_sync_work); struct kvm *kvm = container_of(ka, struct kvm, arch); schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); schedule_delayed_work(&kvm->arch.kvmclock_sync_work, KVMCLOCK_SYNC_PERIOD); } static bool msr_mtrr_valid(unsigned msr) { switch (msr) { case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: case MSR_MTRRfix64K_00000: case MSR_MTRRfix16K_80000: case MSR_MTRRfix16K_A0000: case MSR_MTRRfix4K_C0000: case MSR_MTRRfix4K_C8000: case MSR_MTRRfix4K_D0000: case MSR_MTRRfix4K_D8000: case MSR_MTRRfix4K_E0000: case MSR_MTRRfix4K_E8000: case MSR_MTRRfix4K_F0000: case MSR_MTRRfix4K_F8000: case MSR_MTRRdefType: case MSR_IA32_CR_PAT: return true; case 0x2f8: return true; } return false; } static bool valid_pat_type(unsigned t) { return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ } static bool valid_mtrr_type(unsigned t) { return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ } static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) { int i; u64 mask; if (!msr_mtrr_valid(msr)) return false; if (msr == MSR_IA32_CR_PAT) { for (i = 0; i < 8; i++) if (!valid_pat_type((data >> (i * 8)) & 0xff)) return false; return true; } else if (msr == MSR_MTRRdefType) { if (data & ~0xcff) return false; return valid_mtrr_type(data & 0xff); } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { for (i = 0; i < 8 ; i++) if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) return false; return true; } /* variable MTRRs */ WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR)); mask = (~0ULL) << cpuid_maxphyaddr(vcpu); if ((msr & 1) == 0) { /* MTRR base */ if (!valid_mtrr_type(data & 0xff)) return false; mask |= 0xf00; } else /* MTRR mask */ mask |= 0x7ff; if (data & mask) { kvm_inject_gp(vcpu, 0); return false; } return true; } static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; if (!mtrr_valid(vcpu, msr, data)) return 1; if (msr == MSR_MTRRdefType) { vcpu->arch.mtrr_state.def_type = data; vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10; } else if (msr == MSR_MTRRfix64K_00000) p[0] = data; else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) p[1 + msr - MSR_MTRRfix16K_80000] = data; else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) p[3 + msr - MSR_MTRRfix4K_C0000] = data; else if (msr == MSR_IA32_CR_PAT) vcpu->arch.pat = data; else { /* Variable MTRRs */ int idx, is_mtrr_mask; u64 *pt; idx = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * idx; if (!is_mtrr_mask) pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; else pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; *pt = data; } kvm_mmu_reset_context(vcpu); return 0; } static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_MCG_STATUS: vcpu->arch.mcg_status = data; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; if (data != 0 && data != ~(u64)0) return -1; vcpu->arch.mcg_ctl = data; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MC0_CTL + 4 * bank_num) { u32 offset = msr - MSR_IA32_MC0_CTL; /* only 0 or all 1s can be written to IA32_MCi_CTL * some Linux kernels though clear bit 10 in bank 4 to * workaround a BIOS/GART TBL issue on AMD K8s, ignore * this to avoid an uncatched #GP in the guest */ if ((offset & 0x3) == 0 && data != 0 && (data | (1 << 10)) != ~(u64)0) return -1; vcpu->arch.mce_banks[offset] = data; break; } return 1; } return 0; } static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; int lm = is_long_mode(vcpu); u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 : kvm->arch.xen_hvm_config.blob_size_32; u32 page_num = data & ~PAGE_MASK; u64 page_addr = data & PAGE_MASK; u8 *page; int r; r = -E2BIG; if (page_num >= blob_size) goto out; r = -ENOMEM; page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE); if (IS_ERR(page)) { r = PTR_ERR(page); goto out; } if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE)) goto out_free; r = 0; out_free: kfree(page); out: return r; } static bool kvm_hv_hypercall_enabled(struct kvm *kvm) { return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; } static bool kvm_hv_msr_partition_wide(u32 msr) { bool r = false; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: case HV_X64_MSR_HYPERCALL: case HV_X64_MSR_REFERENCE_TSC: case HV_X64_MSR_TIME_REF_COUNT: r = true; break; } return r; } static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) { struct kvm *kvm = vcpu->kvm; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: kvm->arch.hv_guest_os_id = data; /* setting guest os id to zero disables hypercall page */ if (!kvm->arch.hv_guest_os_id) kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; break; case HV_X64_MSR_HYPERCALL: { u64 gfn; unsigned long addr; u8 instructions[4]; /* if guest os id is not set hypercall should remain disabled */ if (!kvm->arch.hv_guest_os_id) break; if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { kvm->arch.hv_hypercall = data; break; } gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; addr = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(addr)) return 1; kvm_x86_ops->patch_hypercall(vcpu, instructions); ((unsigned char *)instructions)[3] = 0xc3; /* ret */ if (__copy_to_user((void __user *)addr, instructions, 4)) return 1; kvm->arch.hv_hypercall = data; mark_page_dirty(kvm, gfn); break; } case HV_X64_MSR_REFERENCE_TSC: { u64 gfn; HV_REFERENCE_TSC_PAGE tsc_ref; memset(&tsc_ref, 0, sizeof(tsc_ref)); kvm->arch.hv_tsc_page = data; if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE)) break; gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT, &tsc_ref, sizeof(tsc_ref))) return 1; mark_page_dirty(kvm, gfn); break; } default: vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; } static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { case HV_X64_MSR_APIC_ASSIST_PAGE: { u64 gfn; unsigned long addr; if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { vcpu->arch.hv_vapic = data; if (kvm_lapic_enable_pv_eoi(vcpu, 0)) return 1; break; } gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; addr = gfn_to_hva(vcpu->kvm, gfn); if (kvm_is_error_hva(addr)) return 1; if (__clear_user((void __user *)addr, PAGE_SIZE)) return 1; vcpu->arch.hv_vapic = data; mark_page_dirty(vcpu->kvm, gfn); if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) return 1; break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); default: vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; } static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) { gpa_t gpa = data & ~0x3f; /* Bits 2:5 are reserved, Should be zero */ if (data & 0x3c) return 1; vcpu->arch.apf.msr_val = data; if (!(data & KVM_ASYNC_PF_ENABLED)) { kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); return 0; } if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, sizeof(u32))) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); kvm_async_pf_wakeup_all(vcpu); return 0; } static void kvmclock_reset(struct kvm_vcpu *vcpu) { vcpu->arch.pv_time_enabled = false; } static void accumulate_steal_time(struct kvm_vcpu *vcpu) { u64 delta; if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; delta = current->sched_info.run_delay - vcpu->arch.st.last_steal; vcpu->arch.st.last_steal = current->sched_info.run_delay; vcpu->arch.st.accum_steal = delta; } static void record_steal_time(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) return; vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; vcpu->arch.st.steal.version += 2; vcpu->arch.st.accum_steal = 0; kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); } int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { bool pr = false; u32 msr = msr_info->index; u64 data = msr_info->data; switch (msr) { case MSR_AMD64_NB_CFG: case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_WRITE: case MSR_VM_HSAVE_PA: case MSR_AMD64_PATCH_LOADER: case MSR_AMD64_BU_CFG2: break; case MSR_EFER: return set_efer(vcpu, data); case MSR_K7_HWCR: data &= ~(u64)0x40; /* ignore flush filter disable */ data &= ~(u64)0x100; /* ignore ignne emulation enable */ data &= ~(u64)0x8; /* ignore TLB cache disable */ data &= ~(u64)0x40000; /* ignore Mc status write enable */ if (data != 0) { vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", data); return 1; } break; case MSR_FAM10H_MMIO_CONF_BASE: if (data != 0) { vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " "0x%llx\n", data); return 1; } break; case MSR_IA32_DEBUGCTLMSR: if (!data) { /* We support the non-activated case already */ break; } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) { /* Values other than LBR and BTF are vendor-specific, thus reserved and should throw a #GP */ return 1; } vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", __func__, data); break; case 0x200 ... 0x2ff: return set_msr_mtrr(vcpu, msr, data); case MSR_IA32_APICBASE: return kvm_set_apic_base(vcpu, msr_info); case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_write(vcpu, msr, data); case MSR_IA32_TSCDEADLINE: kvm_set_lapic_tscdeadline_msr(vcpu, data); break; case MSR_IA32_TSC_ADJUST: if (guest_cpuid_has_tsc_adjust(vcpu)) { if (!msr_info->host_initiated) { u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true); } vcpu->arch.ia32_tsc_adjust_msr = data; } break; case MSR_IA32_MISC_ENABLE: vcpu->arch.ia32_misc_enable_msr = data; break; case MSR_KVM_WALL_CLOCK_NEW: case MSR_KVM_WALL_CLOCK: vcpu->kvm->arch.wall_clock = data; kvm_write_wall_clock(vcpu->kvm, data); break; case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME: { u64 gpa_offset; kvmclock_reset(vcpu); vcpu->arch.time = data; kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); /* we verify if the enable bit is set... */ if (!(data & 1)) break; gpa_offset = data & ~(PAGE_MASK | 1); if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_time, data & ~1ULL, sizeof(struct pvclock_vcpu_time_info))) vcpu->arch.pv_time_enabled = false; else vcpu->arch.pv_time_enabled = true; break; } case MSR_KVM_ASYNC_PF_EN: if (kvm_pv_enable_async_pf(vcpu, data)) return 1; break; case MSR_KVM_STEAL_TIME: if (unlikely(!sched_info_on())) return 1; if (data & KVM_STEAL_RESERVED_MASK) return 1; if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, data & KVM_STEAL_VALID_BITS, sizeof(struct kvm_steal_time))) return 1; vcpu->arch.st.msr_val = data; if (!(data & KVM_MSR_ENABLED)) break; vcpu->arch.st.last_steal = current->sched_info.run_delay; preempt_disable(); accumulate_steal_time(vcpu); preempt_enable(); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); break; case MSR_KVM_PV_EOI_EN: if (kvm_lapic_enable_pv_eoi(vcpu, data)) return 1; break; case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: return set_msr_mce(vcpu, msr, data); /* Performance counters are not protected by a CPUID bit, * so we should check all of them in the generic path for the sake of * cross vendor migration. * Writing a zero into the event select MSRs disables them, * which we perfectly emulate ;-). Any other value should be at least * reported, some guests depend on them. */ case MSR_K7_EVNTSEL0: case MSR_K7_EVNTSEL1: case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL3: if (data != 0) vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; /* at least RHEL 4 unconditionally writes to the perfctr registers, * so we ignore writes to make it happy. */ case MSR_K7_PERFCTR0: case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR2: case MSR_K7_PERFCTR3: vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: pr = true; case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); if (pr || data != 0) vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; case MSR_K7_CLK_CTL: /* * Ignore all writes to this no longer documented MSR. * Writes are only relevant for old K7 processors, * all pre-dating SVM, but a recommended workaround from * AMD for these chips. It is possible to specify the * affected processor models on the command line, hence * the need to ignore the workaround. */ break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: if (kvm_hv_msr_partition_wide(msr)) { int r; mutex_lock(&vcpu->kvm->lock); r = set_msr_hyperv_pw(vcpu, msr, data); mutex_unlock(&vcpu->kvm->lock); return r; } else return set_msr_hyperv(vcpu, msr, data); break; case MSR_IA32_BBL_CR_CTL3: /* Drop writes to this legacy MSR -- see rdmsr * counterpart for further detail. */ vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); break; case MSR_AMD64_OSVW_ID_LENGTH: if (!guest_cpuid_has_osvw(vcpu)) return 1; vcpu->arch.osvw.length = data; break; case MSR_AMD64_OSVW_STATUS: if (!guest_cpuid_has_osvw(vcpu)) return 1; vcpu->arch.osvw.status = data; break; default: if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) return xen_hvm_config(vcpu, data); if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); if (!ignore_msrs) { vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data); return 1; } else { vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); break; } } return 0; } EXPORT_SYMBOL_GPL(kvm_set_msr_common); /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); } static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; if (!msr_mtrr_valid(msr)) return 1; if (msr == MSR_MTRRdefType) *pdata = vcpu->arch.mtrr_state.def_type + (vcpu->arch.mtrr_state.enabled << 10); else if (msr == MSR_MTRRfix64K_00000) *pdata = p[0]; else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) *pdata = p[1 + msr - MSR_MTRRfix16K_80000]; else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) *pdata = p[3 + msr - MSR_MTRRfix4K_C0000]; else if (msr == MSR_IA32_CR_PAT) *pdata = vcpu->arch.pat; else { /* Variable MTRRs */ int idx, is_mtrr_mask; u64 *pt; idx = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * idx; if (!is_mtrr_mask) pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; else pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; *pdata = *pt; } return 0; } static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: data = 0; break; case MSR_IA32_MCG_CAP: data = vcpu->arch.mcg_cap; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; data = vcpu->arch.mcg_ctl; break; case MSR_IA32_MCG_STATUS: data = vcpu->arch.mcg_status; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MC0_CTL + 4 * bank_num) { u32 offset = msr - MSR_IA32_MC0_CTL; data = vcpu->arch.mce_banks[offset]; break; } return 1; } *pdata = data; return 0; } static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data = 0; struct kvm *kvm = vcpu->kvm; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: data = kvm->arch.hv_guest_os_id; break; case HV_X64_MSR_HYPERCALL: data = kvm->arch.hv_hypercall; break; case HV_X64_MSR_TIME_REF_COUNT: { data = div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100); break; } case HV_X64_MSR_REFERENCE_TSC: data = kvm->arch.hv_tsc_page; break; default: vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } *pdata = data; return 0; } static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data = 0; switch (msr) { case HV_X64_MSR_VP_INDEX: { int r; struct kvm_vcpu *v; kvm_for_each_vcpu(r, v, vcpu->kvm) { if (v == vcpu) { data = r; break; } } break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); case HV_X64_MSR_APIC_ASSIST_PAGE: data = vcpu->arch.hv_vapic; break; default: vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } *pdata = data; return 0; } int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; switch (msr) { case MSR_IA32_PLATFORM_ID: case MSR_IA32_EBL_CR_POWERON: case MSR_IA32_DEBUGCTLMSR: case MSR_IA32_LASTBRANCHFROMIP: case MSR_IA32_LASTBRANCHTOIP: case MSR_IA32_LASTINTFROMIP: case MSR_IA32_LASTINTTOIP: case MSR_K8_SYSCFG: case MSR_K7_HWCR: case MSR_VM_HSAVE_PA: case MSR_K7_EVNTSEL0: case MSR_K7_EVNTSEL1: case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL3: case MSR_K7_PERFCTR0: case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR2: case MSR_K7_PERFCTR3: case MSR_K8_INT_PENDING_MSG: case MSR_AMD64_NB_CFG: case MSR_FAM10H_MMIO_CONF_BASE: case MSR_AMD64_BU_CFG2: data = 0; break; case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_get_msr(vcpu, msr, pdata); data = 0; break; case MSR_IA32_UCODE_REV: data = 0x100000000ULL; break; case MSR_MTRRcap: data = 0x500 | KVM_NR_VAR_MTRR; break; case 0x200 ... 0x2ff: return get_msr_mtrr(vcpu, msr, pdata); case 0xcd: /* fsb frequency */ data = 3; break; /* * MSR_EBC_FREQUENCY_ID * Conservative value valid for even the basic CPU models. * Models 0,1: 000 in bits 23:21 indicating a bus speed of * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, * and 266MHz for model 3, or 4. Set Core Clock * Frequency to System Bus Frequency Ratio to 1 (bits * 31:24) even though these are only valid for CPU * models > 2, however guests may end up dividing or * multiplying by zero otherwise. */ case MSR_EBC_FREQUENCY_ID: data = 1 << 24; break; case MSR_IA32_APICBASE: data = kvm_get_apic_base(vcpu); break; case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_read(vcpu, msr, pdata); break; case MSR_IA32_TSCDEADLINE: data = kvm_get_lapic_tscdeadline_msr(vcpu); break; case MSR_IA32_TSC_ADJUST: data = (u64)vcpu->arch.ia32_tsc_adjust_msr; break; case MSR_IA32_MISC_ENABLE: data = vcpu->arch.ia32_misc_enable_msr; break; case MSR_IA32_PERF_STATUS: /* TSC increment by tick */ data = 1000ULL; /* CPU multiplier */ data |= (((uint64_t)4ULL) << 40); break; case MSR_EFER: data = vcpu->arch.efer; break; case MSR_KVM_WALL_CLOCK: case MSR_KVM_WALL_CLOCK_NEW: data = vcpu->kvm->arch.wall_clock; break; case MSR_KVM_SYSTEM_TIME: case MSR_KVM_SYSTEM_TIME_NEW: data = vcpu->arch.time; break; case MSR_KVM_ASYNC_PF_EN: data = vcpu->arch.apf.msr_val; break; case MSR_KVM_STEAL_TIME: data = vcpu->arch.st.msr_val; break; case MSR_KVM_PV_EOI_EN: data = vcpu->arch.pv_eoi.msr_val; break; case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: case MSR_IA32_MCG_CAP: case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: return get_msr_mce(vcpu, msr, pdata); case MSR_K7_CLK_CTL: /* * Provide expected ramp-up count for K7. All other * are set to zero, indicating minimum divisors for * every field. * * This prevents guest kernels on AMD host with CPU * type 6, model 8 and higher from exploding due to * the rdmsr failing. */ data = 0x20000000; break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: if (kvm_hv_msr_partition_wide(msr)) { int r; mutex_lock(&vcpu->kvm->lock); r = get_msr_hyperv_pw(vcpu, msr, pdata); mutex_unlock(&vcpu->kvm->lock); return r; } else return get_msr_hyperv(vcpu, msr, pdata); break; case MSR_IA32_BBL_CR_CTL3: /* This legacy MSR exists but isn't fully documented in current * silicon. It is however accessed by winxp in very narrow * scenarios where it sets bit #19, itself documented as * a "reserved" bit. Best effort attempt to source coherent * read data here should the balance of the register be * interpreted by the guest: * * L2 cache control register 3: 64GB range, 256KB size, * enabled, latency 0x1, configured */ data = 0xbe702111; break; case MSR_AMD64_OSVW_ID_LENGTH: if (!guest_cpuid_has_osvw(vcpu)) return 1; data = vcpu->arch.osvw.length; break; case MSR_AMD64_OSVW_STATUS: if (!guest_cpuid_has_osvw(vcpu)) return 1; data = vcpu->arch.osvw.status; break; default: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_get_msr(vcpu, msr, pdata); if (!ignore_msrs) { vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); return 1; } else { vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); data = 0; } break; } *pdata = data; return 0; } EXPORT_SYMBOL_GPL(kvm_get_msr_common); /* * Read or write a bunch of msrs. All parameters are kernel addresses. * * @return number of msrs set successfully. */ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, struct kvm_msr_entry *entries, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data)) { int i, idx; idx = srcu_read_lock(&vcpu->kvm->srcu); for (i = 0; i < msrs->nmsrs; ++i) if (do_msr(vcpu, entries[i].index, &entries[i].data)) break; srcu_read_unlock(&vcpu->kvm->srcu, idx); return i; } /* * Read or write a bunch of msrs. Parameters are user addresses. * * @return number of msrs set successfully. */ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data), int writeback) { struct kvm_msrs msrs; struct kvm_msr_entry *entries; int r, n; unsigned size; r = -EFAULT; if (copy_from_user(&msrs, user_msrs, sizeof msrs)) goto out; r = -E2BIG; if (msrs.nmsrs >= MAX_IO_MSRS) goto out; size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; entries = memdup_user(user_msrs->entries, size); if (IS_ERR(entries)) { r = PTR_ERR(entries); goto out; } r = n = __msr_io(vcpu, &msrs, entries, do_msr); if (r < 0) goto out_free; r = -EFAULT; if (writeback && copy_to_user(user_msrs->entries, entries, size)) goto out_free; r = n; out_free: kfree(entries); out: return r; } int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: case KVM_CAP_HLT: case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: case KVM_CAP_SET_TSS_ADDR: case KVM_CAP_EXT_CPUID: case KVM_CAP_EXT_EMUL_CPUID: case KVM_CAP_CLOCKSOURCE: case KVM_CAP_PIT: case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: case KVM_CAP_SYNC_MMU: case KVM_CAP_USER_NMI: case KVM_CAP_REINJECT_CONTROL: case KVM_CAP_IRQ_INJECT_STATUS: case KVM_CAP_IRQFD: case KVM_CAP_IOEVENTFD: case KVM_CAP_IOEVENTFD_NO_LENGTH: case KVM_CAP_PIT2: case KVM_CAP_PIT_STATE2: case KVM_CAP_SET_IDENTITY_MAP_ADDR: case KVM_CAP_XEN_HVM: case KVM_CAP_ADJUST_CLOCK: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_HYPERV: case KVM_CAP_HYPERV_VAPIC: case KVM_CAP_HYPERV_SPIN: case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: case KVM_CAP_XSAVE: case KVM_CAP_ASYNC_PF: case KVM_CAP_GET_TSC_KHZ: case KVM_CAP_KVMCLOCK_CTRL: case KVM_CAP_READONLY_MEM: case KVM_CAP_HYPERV_TIME: case KVM_CAP_IOAPIC_POLARITY_IGNORED: #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT case KVM_CAP_ASSIGN_DEV_IRQ: case KVM_CAP_PCI_2_3: #endif r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_VAPIC: r = !kvm_x86_ops->cpu_has_accelerated_tpr(); break; case KVM_CAP_NR_VCPUS: r = KVM_SOFT_MAX_VCPUS; break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; case KVM_CAP_NR_MEMSLOTS: r = KVM_USER_MEM_SLOTS; break; case KVM_CAP_PV_MMU: /* obsolete */ r = 0; break; #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT case KVM_CAP_IOMMU: r = iommu_present(&pci_bus_type); break; #endif case KVM_CAP_MCE: r = KVM_MAX_MCE_BANKS; break; case KVM_CAP_XCRS: r = cpu_has_xsave; break; case KVM_CAP_TSC_CONTROL: r = kvm_has_tsc_control; break; case KVM_CAP_TSC_DEADLINE_TIMER: r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER); break; default: r = 0; break; } return r; } long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; long r; switch (ioctl) { case KVM_GET_MSR_INDEX_LIST: { struct kvm_msr_list __user *user_msr_list = argp; struct kvm_msr_list msr_list; unsigned n; r = -EFAULT; if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) goto out; n = msr_list.nmsrs; msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs); if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) goto out; r = -E2BIG; if (n < msr_list.nmsrs) goto out; r = -EFAULT; if (copy_to_user(user_msr_list->indices, &msrs_to_save, num_msrs_to_save * sizeof(u32))) goto out; if (copy_to_user(user_msr_list->indices + num_msrs_to_save, &emulated_msrs, ARRAY_SIZE(emulated_msrs) * sizeof(u32))) goto out; r = 0; break; } case KVM_GET_SUPPORTED_CPUID: case KVM_GET_EMULATED_CPUID: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, ioctl); if (r) goto out; r = -EFAULT; if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) goto out; r = 0; break; } case KVM_X86_GET_MCE_CAP_SUPPORTED: { u64 mce_cap; mce_cap = KVM_MCE_CAP_SUPPORTED; r = -EFAULT; if (copy_to_user(argp, &mce_cap, sizeof mce_cap)) goto out; r = 0; break; } default: r = -EINVAL; } out: return r; } static void wbinvd_ipi(void *garbage) { wbinvd(); } static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) { return kvm_arch_has_noncoherent_dma(vcpu->kvm); } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { /* Address WBINVD may be executed by guest */ if (need_emulate_wbinvd(vcpu)) { if (kvm_x86_ops->has_wbinvd_exit()) cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); else if (vcpu->cpu != -1 && vcpu->cpu != cpu) smp_call_function_single(vcpu->cpu, wbinvd_ipi, NULL, 1); } kvm_x86_ops->vcpu_load(vcpu, cpu); /* Apply any externally detected TSC adjustments (due to suspend) */ if (unlikely(vcpu->arch.tsc_offset_adjustment)) { adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); vcpu->arch.tsc_offset_adjustment = 0; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); } if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : native_read_tsc() - vcpu->arch.last_host_tsc; if (tsc_delta < 0) mark_tsc_unstable("KVM discovered backwards TSC"); if (check_tsc_unstable()) { u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu, vcpu->arch.last_guest_tsc); kvm_x86_ops->write_tsc_offset(vcpu, offset); vcpu->arch.tsc_catchup = 1; } /* * On a host with synchronized TSC, there is no need to update * kvmclock on vcpu->cpu migration */ if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); if (vcpu->cpu != cpu) kvm_migrate_timers(vcpu); vcpu->cpu = cpu; } accumulate_steal_time(vcpu); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { kvm_x86_ops->vcpu_put(vcpu); kvm_put_guest_fpu(vcpu); vcpu->arch.last_host_tsc = native_read_tsc(); } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { kvm_x86_ops->sync_pir_to_irr(vcpu); memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); return 0; } static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { kvm_apic_post_state_restore(vcpu, s); update_cr8_intercept(vcpu); return 0; } static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { if (irq->irq >= KVM_NR_INTERRUPTS) return -EINVAL; if (irqchip_in_kernel(vcpu->kvm)) return -ENXIO; kvm_queue_interrupt(vcpu, irq->irq, false); kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) { kvm_inject_nmi(vcpu); return 0; } static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, struct kvm_tpr_access_ctl *tac) { if (tac->flags) return -EINVAL; vcpu->arch.tpr_access_reporting = !!tac->enabled; return 0; } static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, u64 mcg_cap) { int r; unsigned bank_num = mcg_cap & 0xff, bank; r = -EINVAL; if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) goto out; if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000)) goto out; r = 0; vcpu->arch.mcg_cap = mcg_cap; /* Init IA32_MCG_CTL to all 1s */ if (mcg_cap & MCG_CTL_P) vcpu->arch.mcg_ctl = ~(u64)0; /* Init IA32_MCi_CTL to all 1s */ for (bank = 0; bank < bank_num; bank++) vcpu->arch.mce_banks[bank*4] = ~(u64)0; out: return r; } static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; u64 *banks = vcpu->arch.mce_banks; if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) return -EINVAL; /* * if IA32_MCG_CTL is not all 1s, the uncorrected error * reporting is disabled */ if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && vcpu->arch.mcg_ctl != ~(u64)0) return 0; banks += 4 * mce->bank; /* * if IA32_MCi_CTL is not all 1s, the uncorrected error * reporting is disabled for the bank */ if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) return 0; if (mce->status & MCI_STATUS_UC) { if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return 0; } if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; vcpu->arch.mcg_status = mce->mcg_status; banks[1] = mce->status; kvm_queue_exception(vcpu, MC_VECTOR); } else if (!(banks[1] & MCI_STATUS_VAL) || !(banks[1] & MCI_STATUS_UC)) { if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; banks[1] = mce->status; } else banks[1] |= MCI_STATUS_OVER; return 0; } static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { process_nmi(vcpu); events->exception.injected = vcpu->arch.exception.pending && !kvm_exception_is_soft(vcpu->arch.exception.nr); events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; events->exception.pad = 0; events->exception.error_code = vcpu->arch.exception.error_code; events->interrupt.injected = vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; events->interrupt.soft = 0; events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending != 0; events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); events->nmi.pad = 0; events->sipi_vector = 0; /* never valid when reporting to user space */ events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SHADOW); memset(&events->reserved, 0, sizeof(events->reserved)); } static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW)) return -EINVAL; process_nmi(vcpu); vcpu->arch.exception.pending = events->exception.injected; vcpu->arch.exception.nr = events->exception.nr; vcpu->arch.exception.has_error_code = events->exception.has_error_code; vcpu->arch.exception.error_code = events->exception.error_code; vcpu->arch.interrupt.pending = events->interrupt.injected; vcpu->arch.interrupt.nr = events->interrupt.nr; vcpu->arch.interrupt.soft = events->interrupt.soft; if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) kvm_x86_ops->set_interrupt_shadow(vcpu, events->interrupt.shadow); vcpu->arch.nmi_injected = events->nmi.injected; if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) vcpu->arch.nmi_pending = events->nmi.pending; kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && kvm_vcpu_has_lapic(vcpu)) vcpu->arch.apic->sipi_vector = events->sipi_vector; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { unsigned long val; memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); _kvm_get_dr(vcpu, 6, &val); dbgregs->dr6 = val; dbgregs->dr7 = vcpu->arch.dr7; dbgregs->flags = 0; memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); } static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { if (dbgregs->flags) return -EINVAL; memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = dbgregs->dr6; kvm_update_dr6(vcpu); vcpu->arch.dr7 = dbgregs->dr7; kvm_update_dr7(vcpu); return 0; } static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { if (cpu_has_xsave) { memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->xsave, vcpu->arch.guest_xstate_size); *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] &= vcpu->arch.guest_supported_xcr0 | XSTATE_FPSSE; } else { memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->fxsave, sizeof(struct i387_fxsave_struct)); *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = XSTATE_FPSSE; } } static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { u64 xstate_bv = *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; if (cpu_has_xsave) { /* * Here we allow setting states that are not present in * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility * with old userspace. */ if (xstate_bv & ~kvm_supported_xcr0()) return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->xsave, guest_xsave->region, vcpu->arch.guest_xstate_size); } else { if (xstate_bv & ~XSTATE_FPSSE) return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->fxsave, guest_xsave->region, sizeof(struct i387_fxsave_struct)); } return 0; } static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { if (!cpu_has_xsave) { guest_xcrs->nr_xcrs = 0; return; } guest_xcrs->nr_xcrs = 1; guest_xcrs->flags = 0; guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; } static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { int i, r = 0; if (!cpu_has_xsave) return -EINVAL; if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) return -EINVAL; for (i = 0; i < guest_xcrs->nr_xcrs; i++) /* Only support XCR0 currently */ if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, guest_xcrs->xcrs[i].value); break; } if (r) r = -EINVAL; return r; } /* * kvm_set_guest_paused() indicates to the guest kernel that it has been * stopped by the hypervisor. This function will be called from the host only. * EINVAL is returned when the host attempts to set the flag for a guest that * does not support pv clocks. */ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) { if (!vcpu->arch.pv_time_enabled) return -EINVAL; vcpu->arch.pvclock_set_guest_stopped_request = true; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); return 0; } long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; int r; union { struct kvm_lapic_state *lapic; struct kvm_xsave *xsave; struct kvm_xcrs *xcrs; void *buffer; } u; u.buffer = NULL; switch (ioctl) { case KVM_GET_LAPIC: { r = -EINVAL; if (!vcpu->arch.apic) goto out; u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); r = -ENOMEM; if (!u.lapic) goto out; r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) goto out; r = 0; break; } case KVM_SET_LAPIC: { r = -EINVAL; if (!vcpu->arch.apic) goto out; u.lapic = memdup_user(argp, sizeof(*u.lapic)); if (IS_ERR(u.lapic)) return PTR_ERR(u.lapic); r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); break; } case KVM_INTERRUPT: { struct kvm_interrupt irq; r = -EFAULT; if (copy_from_user(&irq, argp, sizeof irq)) goto out; r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); break; } case KVM_NMI: { r = kvm_vcpu_ioctl_nmi(vcpu); break; } case KVM_SET_CPUID: { struct kvm_cpuid __user *cpuid_arg = argp; struct kvm_cpuid cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); break; } case KVM_SET_CPUID2: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, cpuid_arg->entries); break; } case KVM_GET_CPUID2: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, cpuid_arg->entries); if (r) goto out; r = -EFAULT; if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) goto out; r = 0; break; } case KVM_GET_MSRS: r = msr_io(vcpu, argp, kvm_get_msr, 1); break; case KVM_SET_MSRS: r = msr_io(vcpu, argp, do_set_msr, 0); break; case KVM_TPR_ACCESS_REPORTING: { struct kvm_tpr_access_ctl tac; r = -EFAULT; if (copy_from_user(&tac, argp, sizeof tac)) goto out; r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &tac, sizeof tac)) goto out; r = 0; break; }; case KVM_SET_VAPIC_ADDR: { struct kvm_vapic_addr va; r = -EINVAL; if (!irqchip_in_kernel(vcpu->kvm)) goto out; r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { u64 mcg_cap; r = -EFAULT; if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap)) goto out; r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); break; } case KVM_X86_SET_MCE: { struct kvm_x86_mce mce; r = -EFAULT; if (copy_from_user(&mce, argp, sizeof mce)) goto out; r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); break; } case KVM_GET_VCPU_EVENTS: { struct kvm_vcpu_events events; kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); r = -EFAULT; if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) break; r = 0; break; } case KVM_SET_VCPU_EVENTS: { struct kvm_vcpu_events events; r = -EFAULT; if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) break; r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); break; } case KVM_GET_DEBUGREGS: { struct kvm_debugregs dbgregs; kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); r = -EFAULT; if (copy_to_user(argp, &dbgregs, sizeof(struct kvm_debugregs))) break; r = 0; break; } case KVM_SET_DEBUGREGS: { struct kvm_debugregs dbgregs; r = -EFAULT; if (copy_from_user(&dbgregs, argp, sizeof(struct kvm_debugregs))) break; r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); break; } case KVM_GET_XSAVE: { u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); r = -ENOMEM; if (!u.xsave) break; kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); r = -EFAULT; if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) break; r = 0; break; } case KVM_SET_XSAVE: { u.xsave = memdup_user(argp, sizeof(*u.xsave)); if (IS_ERR(u.xsave)) return PTR_ERR(u.xsave); r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); break; } case KVM_GET_XCRS: { u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL); r = -ENOMEM; if (!u.xcrs) break; kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); r = -EFAULT; if (copy_to_user(argp, u.xcrs, sizeof(struct kvm_xcrs))) break; r = 0; break; } case KVM_SET_XCRS: { u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); if (IS_ERR(u.xcrs)) return PTR_ERR(u.xcrs); r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); break; } case KVM_SET_TSC_KHZ: { u32 user_tsc_khz; r = -EINVAL; user_tsc_khz = (u32)arg; if (user_tsc_khz >= kvm_max_guest_tsc_khz) goto out; if (user_tsc_khz == 0) user_tsc_khz = tsc_khz; kvm_set_tsc_khz(vcpu, user_tsc_khz); r = 0; goto out; } case KVM_GET_TSC_KHZ: { r = vcpu->arch.virtual_tsc_khz; goto out; } case KVM_KVMCLOCK_CTRL: { r = kvm_set_guest_paused(vcpu); goto out; } default: r = -EINVAL; } out: kfree(u.buffer); return r; } int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) { int ret; if (addr > (unsigned int)(-3 * PAGE_SIZE)) return -EINVAL; ret = kvm_x86_ops->set_tss_addr(kvm, addr); return ret; } static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) { kvm->arch.ept_identity_map_addr = ident_addr; return 0; } static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, u32 kvm_nr_mmu_pages) { if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) return -EINVAL; mutex_lock(&kvm->slots_lock); kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; mutex_unlock(&kvm->slots_lock); return 0; } static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) { return kvm->arch.n_max_mmu_pages; } static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: memcpy(&chip->chip.pic, &pic_irqchip(kvm)->pics[0], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_PIC_SLAVE: memcpy(&chip->chip.pic, &pic_irqchip(kvm)->pics[1], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_IOAPIC: r = kvm_get_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } return r; } static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[0], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_PIC_SLAVE: spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[1], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_IOAPIC: r = kvm_set_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } kvm_pic_update_irq(pic_irqchip(kvm)); return r; } static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, sizeof(ps->channels)); ps->flags = kvm->arch.vpit->pit_state.flags; mutex_unlock(&kvm->arch.vpit->pit_state.lock); memset(&ps->reserved, 0, sizeof(ps->reserved)); return r; } static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int r = 0, start = 0; u32 prev_legacy, cur_legacy; mutex_lock(&kvm->arch.vpit->pit_state.lock); prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; if (!prev_legacy && cur_legacy) start = 1; memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, sizeof(kvm->arch.vpit->pit_state.channels)); kvm->arch.vpit->pit_state.flags = ps->flags; kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_reinject(struct kvm *kvm, struct kvm_reinject_control *control) { if (!kvm->arch.vpit) return -ENXIO; mutex_lock(&kvm->arch.vpit->pit_state.lock); kvm->arch.vpit->pit_state.reinject = control->pit_reinject; mutex_unlock(&kvm->arch.vpit->pit_state.lock); return 0; } /** * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot * @kvm: kvm instance * @log: slot id and address to which we copy the log * * We need to keep it in mind that VCPU threads can write to the bitmap * concurrently. So, to avoid losing data, we keep the following order for * each bit: * * 1. Take a snapshot of the bit and clear it if needed. * 2. Write protect the corresponding page. * 3. Flush TLB's if needed. * 4. Copy the snapshot to the userspace. * * Between 2 and 3, the guest may write to the page using the remaining TLB * entry. This is not a problem because the page will be reported dirty at * step 4 using the snapshot taken before and step 3 ensures that successive * writes will be logged for the next call. */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { int r; struct kvm_memory_slot *memslot; unsigned long n, i; unsigned long *dirty_bitmap; unsigned long *dirty_bitmap_buffer; bool is_dirty = false; mutex_lock(&kvm->slots_lock); r = -EINVAL; if (log->slot >= KVM_USER_MEM_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); dirty_bitmap = memslot->dirty_bitmap; r = -ENOENT; if (!dirty_bitmap) goto out; n = kvm_dirty_bitmap_bytes(memslot); dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); memset(dirty_bitmap_buffer, 0, n); spin_lock(&kvm->mmu_lock); for (i = 0; i < n / sizeof(long); i++) { unsigned long mask; gfn_t offset; if (!dirty_bitmap[i]) continue; is_dirty = true; mask = xchg(&dirty_bitmap[i], 0); dirty_bitmap_buffer[i] = mask; offset = i * BITS_PER_LONG; kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask); } spin_unlock(&kvm->mmu_lock); /* See the comments in kvm_mmu_slot_remove_write_access(). */ lockdep_assert_held(&kvm->slots_lock); /* * All the TLBs can be flushed out of mmu lock, see the comments in * kvm_mmu_slot_remove_write_access(). */ if (is_dirty) kvm_flush_remote_tlbs(kvm); r = -EFAULT; if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) goto out; r = 0; out: mutex_unlock(&kvm->slots_lock); return r; } int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, bool line_status) { if (!irqchip_in_kernel(kvm)) return -ENXIO; irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event->irq, irq_event->level, line_status); return 0; } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; int r = -ENOTTY; /* * This union makes it completely explicit to gcc-3.x * that these two variables' stack usage should be * combined, not added together. */ union { struct kvm_pit_state ps; struct kvm_pit_state2 ps2; struct kvm_pit_config pit_config; } u; switch (ioctl) { case KVM_SET_TSS_ADDR: r = kvm_vm_ioctl_set_tss_addr(kvm, arg); break; case KVM_SET_IDENTITY_MAP_ADDR: { u64 ident_addr; r = -EFAULT; if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) goto out; r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); break; } case KVM_SET_NR_MMU_PAGES: r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); break; case KVM_GET_NR_MMU_PAGES: r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); break; case KVM_CREATE_IRQCHIP: { struct kvm_pic *vpic; mutex_lock(&kvm->lock); r = -EEXIST; if (kvm->arch.vpic) goto create_irqchip_unlock; r = -EINVAL; if (atomic_read(&kvm->online_vcpus)) goto create_irqchip_unlock; r = -ENOMEM; vpic = kvm_create_pic(kvm); if (vpic) { r = kvm_ioapic_init(kvm); if (r) { mutex_lock(&kvm->slots_lock); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_master); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_slave); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_eclr); mutex_unlock(&kvm->slots_lock); kfree(vpic); goto create_irqchip_unlock; } } else goto create_irqchip_unlock; smp_wmb(); kvm->arch.vpic = vpic; smp_wmb(); r = kvm_setup_default_irq_routing(kvm); if (r) { mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->irq_lock); kvm_ioapic_destroy(kvm); kvm_destroy_pic(kvm); mutex_unlock(&kvm->irq_lock); mutex_unlock(&kvm->slots_lock); } create_irqchip_unlock: mutex_unlock(&kvm->lock); break; } case KVM_CREATE_PIT: u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; goto create_pit; case KVM_CREATE_PIT2: r = -EFAULT; if (copy_from_user(&u.pit_config, argp, sizeof(struct kvm_pit_config))) goto out; create_pit: mutex_lock(&kvm->slots_lock); r = -EEXIST; if (kvm->arch.vpit) goto create_pit_unlock; r = -ENOMEM; kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); if (kvm->arch.vpit) r = 0; create_pit_unlock: mutex_unlock(&kvm->slots_lock); break; case KVM_GET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip; chip = memdup_user(argp, sizeof(*chip)); if (IS_ERR(chip)) { r = PTR_ERR(chip); goto out; } r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto get_irqchip_out; r = kvm_vm_ioctl_get_irqchip(kvm, chip); if (r) goto get_irqchip_out; r = -EFAULT; if (copy_to_user(argp, chip, sizeof *chip)) goto get_irqchip_out; r = 0; get_irqchip_out: kfree(chip); break; } case KVM_SET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip; chip = memdup_user(argp, sizeof(*chip)); if (IS_ERR(chip)) { r = PTR_ERR(chip); goto out; } r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto set_irqchip_out; r = kvm_vm_ioctl_set_irqchip(kvm, chip); if (r) goto set_irqchip_out; r = 0; set_irqchip_out: kfree(chip); break; } case KVM_GET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit(kvm, &u.ps); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) goto out; r = 0; break; } case KVM_SET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof u.ps)) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit(kvm, &u.ps); break; } case KVM_GET_PIT2: { r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) goto out; r = 0; break; } case KVM_SET_PIT2: { r = -EFAULT; if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); break; } case KVM_REINJECT_CONTROL: { struct kvm_reinject_control control; r = -EFAULT; if (copy_from_user(&control, argp, sizeof(control))) goto out; r = kvm_vm_ioctl_reinject(kvm, &control); break; } case KVM_XEN_HVM_CONFIG: { r = -EFAULT; if (copy_from_user(&kvm->arch.xen_hvm_config, argp, sizeof(struct kvm_xen_hvm_config))) goto out; r = -EINVAL; if (kvm->arch.xen_hvm_config.flags) goto out; r = 0; break; } case KVM_SET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; s64 delta; r = -EFAULT; if (copy_from_user(&user_ns, argp, sizeof(user_ns))) goto out; r = -EINVAL; if (user_ns.flags) goto out; r = 0; local_irq_disable(); now_ns = get_kernel_ns(); delta = user_ns.clock - now_ns; local_irq_enable(); kvm->arch.kvmclock_offset = delta; kvm_gen_update_masterclock(kvm); break; } case KVM_GET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; local_irq_disable(); now_ns = get_kernel_ns(); user_ns.clock = kvm->arch.kvmclock_offset + now_ns; local_irq_enable(); user_ns.flags = 0; memset(&user_ns.pad, 0, sizeof(user_ns.pad)); r = -EFAULT; if (copy_to_user(argp, &user_ns, sizeof(user_ns))) goto out; r = 0; break; } default: ; } out: return r; } static void kvm_init_msr_list(void) { u32 dummy[2]; unsigned i, j; /* skip the first msrs in the list. KVM-specific */ for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) { if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) continue; /* * Even MSRs that are valid in the host may not be exposed * to the guests in some cases. We could work around this * in VMX with the generic MSR save/load machinery, but it * is not really worthwhile since it will really only * happen with nested virtualization. */ switch (msrs_to_save[i]) { case MSR_IA32_BNDCFGS: if (!kvm_x86_ops->mpx_supported()) continue; break; default: break; } if (j < i) msrs_to_save[j] = msrs_to_save[i]; j++; } num_msrs_to_save = j; } static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *v) { int handled = 0; int n; do { n = min(len, 8); if (!(vcpu->arch.apic && !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v)) && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) break; handled += n; addr += n; len -= n; v += n; } while (len); return handled; } static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) { int handled = 0; int n; do { n = min(len, 8); if (!(vcpu->arch.apic && !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v)) && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) break; trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); handled += n; addr += n; len -= n; v += n; } while (len); return handled; } static void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { kvm_x86_ops->set_segment(vcpu, var, seg); } void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { kvm_x86_ops->get_segment(vcpu, var, seg); } gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, struct x86_exception *exception) { gpa_t t_gpa; BUG_ON(!mmu_is_nested(vcpu)); /* NPT walks are always user-walks */ access |= PFERR_USER_MASK; t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception); return t_gpa; } gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_FETCH_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_WRITE_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } /* uses this to access any guest's mapped memory without checking CPL */ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); } static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, u32 access, struct x86_exception *exception) { void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, data, offset, toread); if (ret < 0) { r = X86EMUL_IO_NEEDED; goto out; } bytes -= toread; data += toread; addr += toread; } out: return r; } /* used for instruction fetching */ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; unsigned offset; int ret; /* Inline kvm_read_guest_virt_helper for speed. */ gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, exception); if (unlikely(gpa == UNMAPPED_GVA)) return X86EMUL_PROPAGATE_FAULT; offset = addr & (PAGE_SIZE-1); if (WARN_ON(offset + bytes > PAGE_SIZE)) bytes = (unsigned)PAGE_SIZE - offset; ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, val, offset, bytes); if (unlikely(ret < 0)) return X86EMUL_IO_NEEDED; return X86EMUL_CONTINUE; } int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } EXPORT_SYMBOL_GPL(kvm_read_guest_virt); static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); } int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, PFERR_WRITE_MASK, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); if (ret < 0) { r = X86EMUL_IO_NEEDED; goto out; } bytes -= towrite; data += towrite; addr += towrite; } out: return r; } EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gpa_t *gpa, struct x86_exception *exception, bool write) { u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) | (write ? PFERR_WRITE_MASK : 0); if (vcpu_match_mmio_gva(vcpu, gva) && !permission_fault(vcpu, vcpu->arch.walk_mmu, vcpu->arch.access, access)) { *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | (gva & (PAGE_SIZE - 1)); trace_vcpu_match_mmio(gva, *gpa, write, false); return 1; } *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); if (*gpa == UNMAPPED_GVA) return -1; /* For APIC access vmexit */ if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) return 1; if (vcpu_match_mmio_gpa(vcpu, *gpa)) { trace_vcpu_match_mmio(gva, *gpa, write, true); return 1; } return 0; } int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, const void *val, int bytes) { int ret; ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); if (ret < 0) return 0; kvm_mmu_pte_write(vcpu, gpa, val, bytes); return 1; } struct read_write_emulator_ops { int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val, int bytes); int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes); int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val); int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes); bool write; }; static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) { if (vcpu->mmio_read_completed) { trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, vcpu->mmio_fragments[0].gpa, *(u64 *)val); vcpu->mmio_read_completed = 0; return 1; } return 0; } static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { return !kvm_read_guest(vcpu->kvm, gpa, val, bytes); } static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { return emulator_write_phys(vcpu, gpa, val, bytes); } static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) { trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val); return vcpu_mmio_write(vcpu, gpa, bytes, val); } static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); return X86EMUL_IO_NEEDED; } static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); return X86EMUL_CONTINUE; } static const struct read_write_emulator_ops read_emultor = { .read_write_prepare = read_prepare, .read_write_emulate = read_emulate, .read_write_mmio = vcpu_mmio_read, .read_write_exit_mmio = read_exit_mmio, }; static const struct read_write_emulator_ops write_emultor = { .read_write_emulate = write_emulate, .read_write_mmio = write_mmio, .read_write_exit_mmio = write_exit_mmio, .write = true, }; static int emulator_read_write_onepage(unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu, const struct read_write_emulator_ops *ops) { gpa_t gpa; int handled, ret; bool write = ops->write; struct kvm_mmio_fragment *frag; ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); if (ret < 0) return X86EMUL_PROPAGATE_FAULT; /* For APIC access vmexit */ if (ret) goto mmio; if (ops->read_write_emulate(vcpu, gpa, val, bytes)) return X86EMUL_CONTINUE; mmio: /* * Is this MMIO handled locally? */ handled = ops->read_write_mmio(vcpu, gpa, bytes, val); if (handled == bytes) return X86EMUL_CONTINUE; gpa += handled; bytes -= handled; val += handled; WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; frag->gpa = gpa; frag->data = val; frag->len = bytes; return X86EMUL_CONTINUE; } int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, const struct read_write_emulator_ops *ops) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); gpa_t gpa; int rc; if (ops->read_write_prepare && ops->read_write_prepare(vcpu, val, bytes)) return X86EMUL_CONTINUE; vcpu->mmio_nr_fragments = 0; /* Crossing a page boundary? */ if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { int now; now = -addr & ~PAGE_MASK; rc = emulator_read_write_onepage(addr, val, now, exception, vcpu, ops); if (rc != X86EMUL_CONTINUE) return rc; addr += now; val += now; bytes -= now; } rc = emulator_read_write_onepage(addr, val, bytes, exception, vcpu, ops); if (rc != X86EMUL_CONTINUE) return rc; if (!vcpu->mmio_nr_fragments) return rc; gpa = vcpu->mmio_fragments[0].gpa; vcpu->mmio_needed = 1; vcpu->mmio_cur_fragment = 0; vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; vcpu->run->exit_reason = KVM_EXIT_MMIO; vcpu->run->mmio.phys_addr = gpa; return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); } static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception) { return emulator_read_write(ctxt, addr, val, bytes, exception, &read_emultor); } int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *val, unsigned int bytes, struct x86_exception *exception) { return emulator_read_write(ctxt, addr, (void *)val, bytes, exception, &write_emultor); } #define CMPXCHG_TYPE(t, ptr, old, new) \ (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old)) #ifdef CONFIG_X86_64 # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new) #else # define CMPXCHG64(ptr, old, new) \ (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old)) #endif static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *old, const void *new, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); gpa_t gpa; struct page *page; char *kaddr; bool exchanged; /* guests cmpxchg8b have to be emulated atomically */ if (bytes > 8 || (bytes & (bytes - 1))) goto emul_write; gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); if (gpa == UNMAPPED_GVA || (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto emul_write; if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK)) goto emul_write; page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); if (is_error_page(page)) goto emul_write; kaddr = kmap_atomic(page); kaddr += offset_in_page(gpa); switch (bytes) { case 1: exchanged = CMPXCHG_TYPE(u8, kaddr, old, new); break; case 2: exchanged = CMPXCHG_TYPE(u16, kaddr, old, new); break; case 4: exchanged = CMPXCHG_TYPE(u32, kaddr, old, new); break; case 8: exchanged = CMPXCHG64(kaddr, old, new); break; default: BUG(); } kunmap_atomic(kaddr); kvm_release_page_dirty(page); if (!exchanged) return X86EMUL_CMPXCHG_FAILED; mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); kvm_mmu_pte_write(vcpu, gpa, new, bytes); return X86EMUL_CONTINUE; emul_write: printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); return emulator_write_emulated(ctxt, addr, new, bytes, exception); } static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) { /* TODO: String I/O for in kernel device */ int r; if (vcpu->arch.pio.in) r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, vcpu->arch.pio.size, pd); else r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, vcpu->arch.pio.size, pd); return r; } static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, unsigned short port, void *val, unsigned int count, bool in) { vcpu->arch.pio.port = port; vcpu->arch.pio.in = in; vcpu->arch.pio.count = count; vcpu->arch.pio.size = size; if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { vcpu->arch.pio.count = 0; return 1; } vcpu->run->exit_reason = KVM_EXIT_IO; vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; vcpu->run->io.size = size; vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; vcpu->run->io.count = count; vcpu->run->io.port = port; return 0; } static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, int size, unsigned short port, void *val, unsigned int count) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); int ret; if (vcpu->arch.pio.count) goto data_avail; ret = emulator_pio_in_out(vcpu, size, port, val, count, true); if (ret) { data_avail: memcpy(val, vcpu->arch.pio_data, size * count); trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); vcpu->arch.pio.count = 0; return 1; } return 0; } static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, int size, unsigned short port, const void *val, unsigned int count) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); memcpy(vcpu->arch.pio_data, val, size * count); trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); } static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) { return kvm_x86_ops->get_segment_base(vcpu, seg); } static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) { kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); } int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) { if (!need_emulate_wbinvd(vcpu)) return X86EMUL_CONTINUE; if (kvm_x86_ops->has_wbinvd_exit()) { int cpu = get_cpu(); cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, wbinvd_ipi, NULL, 1); put_cpu(); cpumask_clear(vcpu->arch.wbinvd_dirty_mask); } else wbinvd(); return X86EMUL_CONTINUE; } EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) { kvm_emulate_wbinvd(emul_to_vcpu(ctxt)); } int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) { return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); } int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) { return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); } static u64 mk_cr_64(u64 curr_cr, u32 new_val) { return (curr_cr & ~((1ULL << 32) - 1)) | new_val; } static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); unsigned long value; switch (cr) { case 0: value = kvm_read_cr0(vcpu); break; case 2: value = vcpu->arch.cr2; break; case 3: value = kvm_read_cr3(vcpu); break; case 4: value = kvm_read_cr4(vcpu); break; case 8: value = kvm_get_cr8(vcpu); break; default: kvm_err("%s: unexpected cr %u\n", __func__, cr); return 0; } return value; } static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); int res = 0; switch (cr) { case 0: res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); break; case 2: vcpu->arch.cr2 = val; break; case 3: res = kvm_set_cr3(vcpu, val); break; case 4: res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); break; case 8: res = kvm_set_cr8(vcpu, val); break; default: kvm_err("%s: unexpected cr %u\n", __func__, cr); res = -1; } return res; } static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) { return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); } static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt); } static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt); } static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt); } static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt); } static unsigned long emulator_get_cached_segment_base( struct x86_emulate_ctxt *ctxt, int seg) { return get_segment_base(emul_to_vcpu(ctxt), seg); } static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, struct desc_struct *desc, u32 *base3, int seg) { struct kvm_segment var; kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); *selector = var.selector; if (var.unusable) { memset(desc, 0, sizeof(*desc)); return false; } if (var.g) var.limit >>= 12; set_desc_limit(desc, var.limit); set_desc_base(desc, (unsigned long)var.base); #ifdef CONFIG_X86_64 if (base3) *base3 = var.base >> 32; #endif desc->type = var.type; desc->s = var.s; desc->dpl = var.dpl; desc->p = var.present; desc->avl = var.avl; desc->l = var.l; desc->d = var.db; desc->g = var.g; return true; } static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc, u32 base3, int seg) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); struct kvm_segment var; var.selector = selector; var.base = get_desc_base(desc); #ifdef CONFIG_X86_64 var.base |= ((u64)base3) << 32; #endif var.limit = get_desc_limit(desc); if (desc->g) var.limit = (var.limit << 12) | 0xfff; var.type = desc->type; var.dpl = desc->dpl; var.db = desc->d; var.s = desc->s; var.l = desc->l; var.g = desc->g; var.avl = desc->avl; var.present = desc->p; var.unusable = !var.present; var.padding = 0; kvm_set_segment(vcpu, &var, seg); return; } static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata) { return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); } static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data) { struct msr_data msr; msr.data = data; msr.index = msr_index; msr.host_initiated = false; return kvm_set_msr(emul_to_vcpu(ctxt), &msr); } static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, u32 pmc) { return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc); } static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata) { return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata); } static void emulator_halt(struct x86_emulate_ctxt *ctxt) { emul_to_vcpu(ctxt)->arch.halt_request = 1; } static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt) { preempt_disable(); kvm_load_guest_fpu(emul_to_vcpu(ctxt)); /* * CR0.TS may reference the host fpu state, not the guest fpu state, * so it may be clear at this point. */ clts(); } static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt) { preempt_enable(); } static int emulator_intercept(struct x86_emulate_ctxt *ctxt, struct x86_instruction_info *info, enum x86_intercept_stage stage) { return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); } static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) { kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx); } static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) { return kvm_register_read(emul_to_vcpu(ctxt), reg); } static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) { kvm_register_write(emul_to_vcpu(ctxt), reg, val); } static const struct x86_emulate_ops emulate_ops = { .read_gpr = emulator_read_gpr, .write_gpr = emulator_write_gpr, .read_std = kvm_read_guest_virt_system, .write_std = kvm_write_guest_virt_system, .fetch = kvm_fetch_guest_virt, .read_emulated = emulator_read_emulated, .write_emulated = emulator_write_emulated, .cmpxchg_emulated = emulator_cmpxchg_emulated, .invlpg = emulator_invlpg, .pio_in_emulated = emulator_pio_in_emulated, .pio_out_emulated = emulator_pio_out_emulated, .get_segment = emulator_get_segment, .set_segment = emulator_set_segment, .get_cached_segment_base = emulator_get_cached_segment_base, .get_gdt = emulator_get_gdt, .get_idt = emulator_get_idt, .set_gdt = emulator_set_gdt, .set_idt = emulator_set_idt, .get_cr = emulator_get_cr, .set_cr = emulator_set_cr, .cpl = emulator_get_cpl, .get_dr = emulator_get_dr, .set_dr = emulator_set_dr, .set_msr = emulator_set_msr, .get_msr = emulator_get_msr, .check_pmc = emulator_check_pmc, .read_pmc = emulator_read_pmc, .halt = emulator_halt, .wbinvd = emulator_wbinvd, .fix_hypercall = emulator_fix_hypercall, .get_fpu = emulator_get_fpu, .put_fpu = emulator_put_fpu, .intercept = emulator_intercept, .get_cpuid = emulator_get_cpuid, }; static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) { u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); /* * an sti; sti; sequence only disable interrupts for the first * instruction. So, if the last instruction, be it emulated or * not, left the system with the INT_STI flag enabled, it * means that the last instruction is an sti. We should not * leave the flag on in this case. The same goes for mov ss */ if (int_shadow & mask) mask = 0; if (unlikely(int_shadow || mask)) { kvm_x86_ops->set_interrupt_shadow(vcpu, mask); if (!mask) kvm_make_request(KVM_REQ_EVENT, vcpu); } } static bool inject_emulated_exception(struct kvm_vcpu *vcpu) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; if (ctxt->exception.vector == PF_VECTOR) return kvm_propagate_fault(vcpu, &ctxt->exception); if (ctxt->exception.error_code_valid) kvm_queue_exception_e(vcpu, ctxt->exception.vector, ctxt->exception.error_code); else kvm_queue_exception(vcpu, ctxt->exception.vector); return false; } static void init_emulate_ctxt(struct kvm_vcpu *vcpu) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int cs_db, cs_l; kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); ctxt->eflags = kvm_get_rflags(vcpu); ctxt->eip = kvm_rip_read(vcpu); ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : cs_db ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; ctxt->guest_mode = is_guest_mode(vcpu); init_decode_cache(ctxt); vcpu->arch.emulate_regs_need_sync_from_vcpu = false; } int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int ret; init_emulate_ctxt(vcpu); ctxt->op_bytes = 2; ctxt->ad_bytes = 2; ctxt->_eip = ctxt->eip + inc_eip; ret = emulate_int_real(ctxt, irq); if (ret != X86EMUL_CONTINUE) return EMULATE_FAIL; ctxt->eip = ctxt->_eip; kvm_rip_write(vcpu, ctxt->eip); kvm_set_rflags(vcpu, ctxt->eflags); if (irq == NMI_VECTOR) vcpu->arch.nmi_pending = 0; else vcpu->arch.interrupt.pending = false; return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); static int handle_emulation_failure(struct kvm_vcpu *vcpu) { int r = EMULATE_DONE; ++vcpu->stat.insn_emulation_fail; trace_kvm_emulate_insn_failed(vcpu); if (!is_guest_mode(vcpu)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; r = EMULATE_FAIL; } kvm_queue_exception(vcpu, UD_VECTOR); return r; } static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, bool write_fault_to_shadow_pgtable, int emulation_type) { gpa_t gpa = cr2; pfn_t pfn; if (emulation_type & EMULTYPE_NO_REEXECUTE) return false; if (!vcpu->arch.mmu.direct_map) { /* * Write permission should be allowed since only * write access need to be emulated. */ gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); /* * If the mapping is invalid in guest, let cpu retry * it to generate fault. */ if (gpa == UNMAPPED_GVA) return true; } /* * Do not retry the unhandleable instruction if it faults on the * readonly host memory, otherwise it will goto a infinite loop: * retry instruction -> write #PF -> emulation fail -> retry * instruction -> ... */ pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); /* * If the instruction failed on the error pfn, it can not be fixed, * report the error to userspace. */ if (is_error_noslot_pfn(pfn)) return false; kvm_release_pfn_clean(pfn); /* The instructions are well-emulated on direct mmu. */ if (vcpu->arch.mmu.direct_map) { unsigned int indirect_shadow_pages; spin_lock(&vcpu->kvm->mmu_lock); indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; spin_unlock(&vcpu->kvm->mmu_lock); if (indirect_shadow_pages) kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); return true; } /* * if emulation was due to access to shadowed page table * and it failed try to unshadow page and re-enter the * guest to let CPU execute the instruction. */ kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); /* * If the access faults on its page table, it can not * be fixed by unprotecting shadow page and it should * be reported to userspace. */ return !write_fault_to_shadow_pgtable; } static bool retry_instruction(struct x86_emulate_ctxt *ctxt, unsigned long cr2, int emulation_type) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); unsigned long last_retry_eip, last_retry_addr, gpa = cr2; last_retry_eip = vcpu->arch.last_retry_eip; last_retry_addr = vcpu->arch.last_retry_addr; /* * If the emulation is caused by #PF and it is non-page_table * writing instruction, it means the VM-EXIT is caused by shadow * page protected, we can zap the shadow page and retry this * instruction directly. * * Note: if the guest uses a non-page-table modifying instruction * on the PDE that points to the instruction, then we will unmap * the instruction and go to an infinite loop. So, we cache the * last retried eip and the last fault address, if we meet the eip * and the address again, we can break out of the potential infinite * loop. */ vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; if (!(emulation_type & EMULTYPE_RETRY)) return false; if (x86_page_table_writing_insn(ctxt)) return false; if (ctxt->eip == last_retry_eip && last_retry_addr == cr2) return false; vcpu->arch.last_retry_eip = ctxt->eip; vcpu->arch.last_retry_addr = cr2; if (!vcpu->arch.mmu.direct_map) gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); return true; } static int complete_emulated_mmio(struct kvm_vcpu *vcpu); static int complete_emulated_pio(struct kvm_vcpu *vcpu); static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, unsigned long *db) { u32 dr6 = 0; int i; u32 enable, rwlen; enable = dr7; rwlen = dr7 >> 16; for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) if ((enable & 3) && (rwlen & 15) == type && db[i] == addr) dr6 |= (1 << i); return dr6; } static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) { struct kvm_run *kvm_run = vcpu->run; /* * rflags is the old, "raw" value of the flags. The new value has * not been saved yet. * * This is correct even for TF set by the guest, because "the * processor will not generate this exception after the instruction * that sets the TF flag". */ if (unlikely(rflags & X86_EFLAGS_TF)) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM; kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->exit_reason = KVM_EXIT_DEBUG; *r = EMULATE_USER_EXIT; } else { vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; /* * "Certain debug exceptions may clear bit 0-3. The * remaining contents of the DR6 register are never * cleared by the processor". */ vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= DR6_BS | DR6_RTM; kvm_queue_exception(vcpu, DB_VECTOR); } } } static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) { struct kvm_run *kvm_run = vcpu->run; unsigned long eip = vcpu->arch.emulate_ctxt.eip; u32 dr6 = 0; if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { dr6 = kvm_vcpu_check_hw_bp(eip, 0, vcpu->arch.guest_debug_dr7, vcpu->arch.eff_db); if (dr6 != 0) { kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; kvm_run->debug.arch.pc = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->exit_reason = KVM_EXIT_DEBUG; *r = EMULATE_USER_EXIT; return true; } } if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) { dr6 = kvm_vcpu_check_hw_bp(eip, 0, vcpu->arch.dr7, vcpu->arch.db); if (dr6 != 0) { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= dr6 | DR6_RTM; kvm_queue_exception(vcpu, DB_VECTOR); *r = EMULATE_DONE; return true; } } return false; } int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, int emulation_type, void *insn, int insn_len) { int r; struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; bool writeback = true; bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; /* * Clear write_fault_to_shadow_pgtable here to ensure it is * never reused. */ vcpu->arch.write_fault_to_shadow_pgtable = false; kvm_clear_exception_queue(vcpu); if (!(emulation_type & EMULTYPE_NO_DECODE)) { init_emulate_ctxt(vcpu); /* * We will reenter on the same instruction since * we do not set complete_userspace_io. This does not * handle watchpoints yet, those would be handled in * the emulate_ops. */ if (kvm_vcpu_check_breakpoint(vcpu, &r)) return r; ctxt->interruptibility = 0; ctxt->have_exception = false; ctxt->exception.vector = -1; ctxt->perm_ok = false; ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; r = x86_decode_insn(ctxt, insn, insn_len); trace_kvm_emulate_insn_start(vcpu); ++vcpu->stat.insn_emulation; if (r != EMULATION_OK) { if (emulation_type & EMULTYPE_TRAP_UD) return EMULATE_FAIL; if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, emulation_type)) return EMULATE_DONE; if (emulation_type & EMULTYPE_SKIP) return EMULATE_FAIL; return handle_emulation_failure(vcpu); } } if (emulation_type & EMULTYPE_SKIP) { kvm_rip_write(vcpu, ctxt->_eip); if (ctxt->eflags & X86_EFLAGS_RF) kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); return EMULATE_DONE; } if (retry_instruction(ctxt, cr2, emulation_type)) return EMULATE_DONE; /* this is needed for vmware backdoor interface to work since it changes registers values during IO operation */ if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { vcpu->arch.emulate_regs_need_sync_from_vcpu = false; emulator_invalidate_register_cache(ctxt); } restart: r = x86_emulate_insn(ctxt); if (r == EMULATION_INTERCEPTED) return EMULATE_DONE; if (r == EMULATION_FAILED) { if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, emulation_type)) return EMULATE_DONE; return handle_emulation_failure(vcpu); } if (ctxt->have_exception) { r = EMULATE_DONE; if (inject_emulated_exception(vcpu)) return r; } else if (vcpu->arch.pio.count) { if (!vcpu->arch.pio.in) { /* FIXME: return into emulator if single-stepping. */ vcpu->arch.pio.count = 0; } else { writeback = false; vcpu->arch.complete_userspace_io = complete_emulated_pio; } r = EMULATE_USER_EXIT; } else if (vcpu->mmio_needed) { if (!vcpu->mmio_is_write) writeback = false; r = EMULATE_USER_EXIT; vcpu->arch.complete_userspace_io = complete_emulated_mmio; } else if (r == EMULATION_RESTART) goto restart; else r = EMULATE_DONE; if (writeback) { unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_rip_write(vcpu, ctxt->eip); if (r == EMULATE_DONE) kvm_vcpu_check_singlestep(vcpu, rflags, &r); __kvm_set_rflags(vcpu, ctxt->eflags); /* * For STI, interrupts are shadowed; so KVM_REQ_EVENT will * do nothing, and it will be requested again as soon as * the shadow expires. But we still need to check here, * because POPF has no interrupt shadow. */ if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) kvm_make_request(KVM_REQ_EVENT, vcpu); } else vcpu->arch.emulate_regs_need_sync_to_vcpu = true; return r; } EXPORT_SYMBOL_GPL(x86_emulate_instruction); int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) { unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, size, port, &val, 1); /* do not return to emulator after return from userspace */ vcpu->arch.pio.count = 0; return ret; } EXPORT_SYMBOL_GPL(kvm_fast_pio_out); static void tsc_bad(void *info) { __this_cpu_write(cpu_tsc_khz, 0); } static void tsc_khz_changed(void *data) { struct cpufreq_freqs *freq = data; unsigned long khz = 0; if (data) khz = freq->new; else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) khz = cpufreq_quick_get(raw_smp_processor_id()); if (!khz) khz = tsc_khz; __this_cpu_write(cpu_tsc_khz, khz); } static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; struct kvm *kvm; struct kvm_vcpu *vcpu; int i, send_ipi = 0; /* * We allow guests to temporarily run on slowing clocks, * provided we notify them after, or to run on accelerating * clocks, provided we notify them before. Thus time never * goes backwards. * * However, we have a problem. We can't atomically update * the frequency of a given CPU from this function; it is * merely a notifier, which can be called from any CPU. * Changing the TSC frequency at arbitrary points in time * requires a recomputation of local variables related to * the TSC for each VCPU. We must flag these local variables * to be updated and be sure the update takes place with the * new frequency before any guests proceed. * * Unfortunately, the combination of hotplug CPU and frequency * change creates an intractable locking scenario; the order * of when these callouts happen is undefined with respect to * CPU hotplug, and they can race with each other. As such, * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is * undefined; you can actually have a CPU frequency change take * place in between the computation of X and the setting of the * variable. To protect against this problem, all updates of * the per_cpu tsc_khz variable are done in an interrupt * protected IPI, and all callers wishing to update the value * must wait for a synchronous IPI to complete (which is trivial * if the caller is on the CPU already). This establishes the * necessary total order on variable updates. * * Note that because a guest time update may take place * anytime after the setting of the VCPU's request bit, the * correct TSC value must be set before the request. However, * to ensure the update actually makes it to any guest which * starts running in hardware virtualization between the set * and the acquisition of the spinlock, we must also ping the * CPU after setting the request bit. * */ if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) return 0; if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) return 0; smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->cpu != freq->cpu) continue; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->cpu != smp_processor_id()) send_ipi = 1; } } spin_unlock(&kvm_lock); if (freq->old < freq->new && send_ipi) { /* * We upscale the frequency. Must make the guest * doesn't see old kvmclock values while running with * the new frequency, otherwise we risk the guest sees * time go backwards. * * In case we update the frequency for another cpu * (which might be in guest context) send an interrupt * to kick the cpu out of guest context. Next time * guest context is entered kvmclock will be updated, * so the guest will not see stale values. */ smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); } return 0; } static struct notifier_block kvmclock_cpufreq_notifier_block = { .notifier_call = kvmclock_cpufreq_notifier }; static int kvmclock_cpu_notifier(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; switch (action) { case CPU_ONLINE: case CPU_DOWN_FAILED: smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); break; case CPU_DOWN_PREPARE: smp_call_function_single(cpu, tsc_bad, NULL, 1); break; } return NOTIFY_OK; } static struct notifier_block kvmclock_cpu_notifier_block = { .notifier_call = kvmclock_cpu_notifier, .priority = -INT_MAX }; static void kvm_timer_init(void) { int cpu; max_tsc_khz = tsc_khz; cpu_notifier_register_begin(); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { #ifdef CONFIG_CPU_FREQ struct cpufreq_policy policy; memset(&policy, 0, sizeof(policy)); cpu = get_cpu(); cpufreq_get_policy(&policy, cpu); if (policy.cpuinfo.max_freq) max_tsc_khz = policy.cpuinfo.max_freq; put_cpu(); #endif cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz); for_each_online_cpu(cpu) smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); __register_hotcpu_notifier(&kvmclock_cpu_notifier_block); cpu_notifier_register_done(); } static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); int kvm_is_in_guest(void) { return __this_cpu_read(current_vcpu) != NULL; } static int kvm_is_user_mode(void) { int user_mode = 3; if (__this_cpu_read(current_vcpu)) user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu)); return user_mode != 0; } static unsigned long kvm_get_guest_ip(void) { unsigned long ip = 0; if (__this_cpu_read(current_vcpu)) ip = kvm_rip_read(__this_cpu_read(current_vcpu)); return ip; } static struct perf_guest_info_callbacks kvm_guest_cbs = { .is_in_guest = kvm_is_in_guest, .is_user_mode = kvm_is_user_mode, .get_guest_ip = kvm_get_guest_ip, }; void kvm_before_handle_nmi(struct kvm_vcpu *vcpu) { __this_cpu_write(current_vcpu, vcpu); } EXPORT_SYMBOL_GPL(kvm_before_handle_nmi); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) { __this_cpu_write(current_vcpu, NULL); } EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); static void kvm_set_mmio_spte_mask(void) { u64 mask; int maxphyaddr = boot_cpu_data.x86_phys_bits; /* * Set the reserved bits and the present bit of an paging-structure * entry to generate page fault with PFER.RSV = 1. */ /* Mask the reserved physical address bits. */ mask = rsvd_bits(maxphyaddr, 51); /* Bit 62 is always reserved for 32bit host. */ mask |= 0x3ull << 62; /* Set the present bit. */ mask |= 1ull; #ifdef CONFIG_X86_64 /* * If reserved bit is not supported, clear the present bit to disable * mmio page fault. */ if (maxphyaddr == 52) mask &= ~1ull; #endif kvm_mmu_set_mmio_spte_mask(mask); } #ifdef CONFIG_X86_64 static void pvclock_gtod_update_fn(struct work_struct *work) { struct kvm *kvm; struct kvm_vcpu *vcpu; int i; spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) kvm_for_each_vcpu(i, vcpu, kvm) kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); atomic_set(&kvm_guest_has_master_clock, 0); spin_unlock(&kvm_lock); } static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); /* * Notification about pvclock gtod data update. */ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, void *priv) { struct pvclock_gtod_data *gtod = &pvclock_gtod_data; struct timekeeper *tk = priv; update_pvclock_gtod(tk); /* disable master clock if host does not trust, or does not * use, TSC clocksource */ if (gtod->clock.vclock_mode != VCLOCK_TSC && atomic_read(&kvm_guest_has_master_clock) != 0) queue_work(system_long_wq, &pvclock_gtod_work); return 0; } static struct notifier_block pvclock_gtod_notifier = { .notifier_call = pvclock_gtod_notify, }; #endif int kvm_arch_init(void *opaque) { int r; struct kvm_x86_ops *ops = opaque; if (kvm_x86_ops) { printk(KERN_ERR "kvm: already loaded the other module\n"); r = -EEXIST; goto out; } if (!ops->cpu_has_kvm_support()) { printk(KERN_ERR "kvm: no hardware support\n"); r = -EOPNOTSUPP; goto out; } if (ops->disabled_by_bios()) { printk(KERN_ERR "kvm: disabled by bios\n"); r = -EOPNOTSUPP; goto out; } r = -ENOMEM; shared_msrs = alloc_percpu(struct kvm_shared_msrs); if (!shared_msrs) { printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n"); goto out; } r = kvm_mmu_module_init(); if (r) goto out_free_percpu; kvm_set_mmio_spte_mask(); kvm_x86_ops = ops; kvm_init_msr_list(); kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, PT_DIRTY_MASK, PT64_NX_MASK, 0); kvm_timer_init(); perf_register_guest_info_callbacks(&kvm_guest_cbs); if (cpu_has_xsave) host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); kvm_lapic_init(); #ifdef CONFIG_X86_64 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); #endif return 0; out_free_percpu: free_percpu(shared_msrs); out: return r; } void kvm_arch_exit(void) { perf_unregister_guest_info_callbacks(&kvm_guest_cbs); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block); #ifdef CONFIG_X86_64 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); #endif kvm_x86_ops = NULL; kvm_mmu_module_exit(); free_percpu(shared_msrs); } int kvm_emulate_halt(struct kvm_vcpu *vcpu) { ++vcpu->stat.halt_exits; if (irqchip_in_kernel(vcpu->kvm)) { vcpu->arch.mp_state = KVM_MP_STATE_HALTED; return 1; } else { vcpu->run->exit_reason = KVM_EXIT_HLT; return 0; } } EXPORT_SYMBOL_GPL(kvm_emulate_halt); int kvm_hv_hypercall(struct kvm_vcpu *vcpu) { u64 param, ingpa, outgpa, ret; uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; bool fast, longmode; /* * hypercall generates UD from non zero cpl and real mode * per HYPER-V spec */ if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; } longmode = is_64_bit_mode(vcpu); if (!longmode) { param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); } #ifdef CONFIG_X86_64 else { param = kvm_register_read(vcpu, VCPU_REGS_RCX); ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); } #endif code = param & 0xffff; fast = (param >> 16) & 0x1; rep_cnt = (param >> 32) & 0xfff; rep_idx = (param >> 48) & 0xfff; trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); switch (code) { case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: kvm_vcpu_on_spin(vcpu); break; default: res = HV_STATUS_INVALID_HYPERCALL_CODE; break; } ret = res | (((u64)rep_done & 0xfff) << 32); if (longmode) { kvm_register_write(vcpu, VCPU_REGS_RAX, ret); } else { kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); } return 1; } /* * kvm_pv_kick_cpu_op: Kick a vcpu. * * @apicid - apicid of vcpu to be kicked. */ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) { struct kvm_lapic_irq lapic_irq; lapic_irq.shorthand = 0; lapic_irq.dest_mode = 0; lapic_irq.dest_id = apicid; lapic_irq.delivery_mode = APIC_DM_REMRD; kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL); } int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) { unsigned long nr, a0, a1, a2, a3, ret; int op_64_bit, r = 1; if (kvm_hv_hypercall_enabled(vcpu->kvm)) return kvm_hv_hypercall(vcpu); nr = kvm_register_read(vcpu, VCPU_REGS_RAX); a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); a1 = kvm_register_read(vcpu, VCPU_REGS_RCX); a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); trace_kvm_hypercall(nr, a0, a1, a2, a3); op_64_bit = is_64_bit_mode(vcpu); if (!op_64_bit) { nr &= 0xFFFFFFFF; a0 &= 0xFFFFFFFF; a1 &= 0xFFFFFFFF; a2 &= 0xFFFFFFFF; a3 &= 0xFFFFFFFF; } if (kvm_x86_ops->get_cpl(vcpu) != 0) { ret = -KVM_EPERM; goto out; } switch (nr) { case KVM_HC_VAPIC_POLL_IRQ: ret = 0; break; case KVM_HC_KICK_CPU: kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); ret = 0; break; default: ret = -KVM_ENOSYS; break; } out: if (!op_64_bit) ret = (u32)ret; kvm_register_write(vcpu, VCPU_REGS_RAX, ret); ++vcpu->stat.hypercalls; return r; } EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); char instruction[3]; unsigned long rip = kvm_rip_read(vcpu); kvm_x86_ops->patch_hypercall(vcpu, instruction); return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); } /* * Check if userspace requested an interrupt window, and that the * interrupt window is open. * * No need to exit to userspace if we already have an interrupt queued. */ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) { return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && vcpu->run->request_interrupt_window && kvm_arch_interrupt_allowed(vcpu)); } static void post_kvm_run_save(struct kvm_vcpu *vcpu) { struct kvm_run *kvm_run = vcpu->run; kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu); if (irqchip_in_kernel(vcpu->kvm)) kvm_run->ready_for_interrupt_injection = 1; else kvm_run->ready_for_interrupt_injection = kvm_arch_interrupt_allowed(vcpu) && !kvm_cpu_has_interrupt(vcpu) && !kvm_event_needs_reinjection(vcpu); } static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; if (!kvm_x86_ops->update_cr8_intercept) return; if (!vcpu->arch.apic) return; if (!vcpu->arch.apic->vapic_addr) max_irr = kvm_lapic_find_highest_irr(vcpu); else max_irr = -1; if (max_irr != -1) max_irr >>= 4; tpr = kvm_lapic_get_cr8(vcpu); kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); } static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) { int r; /* try to reinject previous events if any */ if (vcpu->arch.exception.pending) { trace_kvm_inj_exception(vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code); if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | X86_EFLAGS_RF); kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code, vcpu->arch.exception.reinject); return 0; } if (vcpu->arch.nmi_injected) { kvm_x86_ops->set_nmi(vcpu); return 0; } if (vcpu->arch.interrupt.pending) { kvm_x86_ops->set_irq(vcpu); return 0; } if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); if (r != 0) return r; } /* try to inject new event if pending */ if (vcpu->arch.nmi_pending) { if (kvm_x86_ops->nmi_allowed(vcpu)) { --vcpu->arch.nmi_pending; vcpu->arch.nmi_injected = true; kvm_x86_ops->set_nmi(vcpu); } } else if (kvm_cpu_has_injectable_intr(vcpu)) { /* * Because interrupts can be injected asynchronously, we are * calling check_nested_events again here to avoid a race condition. * See https://lkml.org/lkml/2014/7/2/60 for discussion about this * proposal and current concerns. Perhaps we should be setting * KVM_REQ_EVENT only on certain events and not unconditionally? */ if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); if (r != 0) return r; } if (kvm_x86_ops->interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); kvm_x86_ops->set_irq(vcpu); } } return 0; } static void process_nmi(struct kvm_vcpu *vcpu) { unsigned limit = 2; /* * x86 is limited to one NMI running, and one NMI pending after it. * If an NMI is already in progress, limit further NMIs to just one. * Otherwise, allow two (and we'll inject the first one immediately). */ if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) limit = 1; vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); kvm_make_request(KVM_REQ_EVENT, vcpu); } static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) { u64 eoi_exit_bitmap[4]; u32 tmr[8]; if (!kvm_apic_hw_enabled(vcpu->arch.apic)) return; memset(eoi_exit_bitmap, 0, 32); memset(tmr, 0, 32); kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap, tmr); kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); kvm_apic_update_tmr(vcpu, tmr); } /* * Returns 1 to let __vcpu_run() continue the guest execution loop without * exiting to the userspace. Otherwise, the value will be returned to the * userspace. */ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) { int r; bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && vcpu->run->request_interrupt_window; bool req_immediate_exit = false; if (vcpu->requests) { if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) kvm_mmu_unload(vcpu); if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) __kvm_migrate_timers(vcpu); if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) kvm_gen_update_masterclock(vcpu->kvm); if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) kvm_gen_kvmclock_update(vcpu); if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { r = kvm_guest_time_update(vcpu); if (unlikely(r)) goto out; } if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) kvm_mmu_sync_roots(vcpu); if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) kvm_x86_ops->tlb_flush(vcpu); if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; r = 0; goto out; } if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; r = 0; goto out; } if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) { vcpu->fpu_active = 0; kvm_x86_ops->fpu_deactivate(vcpu); } if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { /* Page is swapped out. Do synthetic halt */ vcpu->arch.apf.halted = true; r = 1; goto out; } if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) record_steal_time(vcpu); if (kvm_check_request(KVM_REQ_NMI, vcpu)) process_nmi(vcpu); if (kvm_check_request(KVM_REQ_PMU, vcpu)) kvm_handle_pmu_event(vcpu); if (kvm_check_request(KVM_REQ_PMI, vcpu)) kvm_deliver_pmi(vcpu); if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) vcpu_scan_ioapic(vcpu); } if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { kvm_apic_accept_events(vcpu); if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { r = 1; goto out; } if (inject_pending_event(vcpu, req_int_win) != 0) req_immediate_exit = true; /* enable NMI/IRQ window open exits if needed */ else if (vcpu->arch.nmi_pending) kvm_x86_ops->enable_nmi_window(vcpu); else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) kvm_x86_ops->enable_irq_window(vcpu); if (kvm_lapic_enabled(vcpu)) { /* * Update architecture specific hints for APIC * virtual interrupt delivery. */ if (kvm_x86_ops->hwapic_irr_update) kvm_x86_ops->hwapic_irr_update(vcpu, kvm_lapic_find_highest_irr(vcpu)); update_cr8_intercept(vcpu); kvm_lapic_sync_to_vapic(vcpu); } } r = kvm_mmu_reload(vcpu); if (unlikely(r)) { goto cancel_injection; } preempt_disable(); kvm_x86_ops->prepare_guest_switch(vcpu); if (vcpu->fpu_active) kvm_load_guest_fpu(vcpu); kvm_load_guest_xcr0(vcpu); vcpu->mode = IN_GUEST_MODE; srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); /* We should set ->mode before check ->requests, * see the comment in make_all_cpus_request. */ smp_mb__after_srcu_read_unlock(); local_irq_disable(); if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests || need_resched() || signal_pending(current)) { vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); local_irq_enable(); preempt_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = 1; goto cancel_injection; } if (req_immediate_exit) smp_send_reschedule(vcpu->cpu); kvm_guest_enter(); if (unlikely(vcpu->arch.switch_db_regs)) { set_debugreg(0, 7); set_debugreg(vcpu->arch.eff_db[0], 0); set_debugreg(vcpu->arch.eff_db[1], 1); set_debugreg(vcpu->arch.eff_db[2], 2); set_debugreg(vcpu->arch.eff_db[3], 3); set_debugreg(vcpu->arch.dr6, 6); } trace_kvm_entry(vcpu->vcpu_id); kvm_x86_ops->run(vcpu); /* * Do this here before restoring debug registers on the host. And * since we do this before handling the vmexit, a DR access vmexit * can (a) read the correct value of the debug registers, (b) set * KVM_DEBUGREG_WONT_EXIT again. */ if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { int i; WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); kvm_x86_ops->sync_dirty_debug_regs(vcpu); for (i = 0; i < KVM_NR_DB_REGS; i++) vcpu->arch.eff_db[i] = vcpu->arch.db[i]; } /* * If the guest has used debug registers, at least dr7 * will be disabled while returning to the host. * If we don't have active breakpoints in the host, we don't * care about the messed up debug address registers. But if * we have some of them active, restore the old state. */ if (hw_breakpoint_active()) hw_breakpoint_restore(); vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); /* Interrupt is enabled by handle_external_intr() */ kvm_x86_ops->handle_external_intr(vcpu); ++vcpu->stat.exits; /* * We must have an instruction between local_irq_enable() and * kvm_guest_exit(), so the timer interrupt isn't delayed by * the interrupt shadow. The stat.exits increment will do nicely. * But we need to prevent reordering, hence this barrier(): */ barrier(); kvm_guest_exit(); preempt_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); /* * Profile KVM exit RIPs: */ if (unlikely(prof_on == KVM_PROFILING)) { unsigned long rip = kvm_rip_read(vcpu); profile_hit(KVM_PROFILING, (void *)rip); } if (unlikely(vcpu->arch.tsc_always_catchup)) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->arch.apic_attention) kvm_lapic_sync_from_vapic(vcpu); r = kvm_x86_ops->handle_exit(vcpu); return r; cancel_injection: kvm_x86_ops->cancel_injection(vcpu); if (unlikely(vcpu->arch.apic_attention)) kvm_lapic_sync_from_vapic(vcpu); out: return r; } static int __vcpu_run(struct kvm_vcpu *vcpu) { int r; struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); r = 1; while (r > 0) { if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) r = vcpu_enter_guest(vcpu); else { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { kvm_apic_accept_events(vcpu); switch(vcpu->arch.mp_state) { case KVM_MP_STATE_HALTED: vcpu->arch.pv.pv_unhalted = false; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; case KVM_MP_STATE_RUNNABLE: vcpu->arch.apf.halted = false; break; case KVM_MP_STATE_INIT_RECEIVED: break; default: r = -EINTR; break; } } } if (r <= 0) break; clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); if (kvm_cpu_has_pending_timer(vcpu)) kvm_inject_pending_timer_irqs(vcpu); if (dm_request_for_irq_injection(vcpu)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.request_irq_exits; } kvm_check_async_pf_completion(vcpu); if (signal_pending(current)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.signal_exits; } if (need_resched()) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); cond_resched(); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); } } srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); return r; } static inline int complete_emulated_io(struct kvm_vcpu *vcpu) { int r; vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (r != EMULATE_DONE) return 0; return 1; } static int complete_emulated_pio(struct kvm_vcpu *vcpu) { BUG_ON(!vcpu->arch.pio.count); return complete_emulated_io(vcpu); } /* * Implements the following, as a state machine: * * read: * for each fragment * for each mmio piece in the fragment * write gpa, len * exit * copy data * execute insn * * write: * for each fragment * for each mmio piece in the fragment * write gpa, len * copy data * exit */ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; struct kvm_mmio_fragment *frag; unsigned len; BUG_ON(!vcpu->mmio_needed); /* Complete previous fragment */ frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; len = min(8u, frag->len); if (!vcpu->mmio_is_write) memcpy(frag->data, run->mmio.data, len); if (frag->len <= 8) { /* Switch to the next fragment. */ frag++; vcpu->mmio_cur_fragment++; } else { /* Go forward to the next mmio piece. */ frag->data += len; frag->gpa += len; frag->len -= len; } if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { vcpu->mmio_needed = 0; /* FIXME: return into emulator if single-stepping. */ if (vcpu->mmio_is_write) return 1; vcpu->mmio_read_completed = 1; return complete_emulated_io(vcpu); } run->exit_reason = KVM_EXIT_MMIO; run->mmio.phys_addr = frag->gpa; if (vcpu->mmio_is_write) memcpy(run->mmio.data, frag->data, min(8u, frag->len)); run->mmio.len = min(8u, frag->len); run->mmio.is_write = vcpu->mmio_is_write; vcpu->arch.complete_userspace_io = complete_emulated_mmio; return 0; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; sigset_t sigsaved; if (!tsk_used_math(current) && init_fpu(current)) return -ENOMEM; if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { kvm_vcpu_block(vcpu); kvm_apic_accept_events(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); r = -EAGAIN; goto out; } /* re-sync apic's tpr */ if (!irqchip_in_kernel(vcpu->kvm)) { if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { r = -EINVAL; goto out; } } if (unlikely(vcpu->arch.complete_userspace_io)) { int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; vcpu->arch.complete_userspace_io = NULL; r = cui(vcpu); if (r <= 0) goto out; } else WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); r = __vcpu_run(vcpu); out: post_kvm_run_save(vcpu); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return r; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { /* * We are here if userspace calls get_regs() in the middle of * instruction emulation. Registers state needs to be copied * back from emulation context to vcpu. Userspace shouldn't do * that usually, but some bad designed PV devices (vmware * backdoor interface) need this to work */ emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; } regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX); regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX); regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI); regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI); regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP); #ifdef CONFIG_X86_64 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8); regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9); regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10); regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11); regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12); regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13); regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14); regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15); #endif regs->rip = kvm_rip_read(vcpu); regs->rflags = kvm_get_rflags(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { vcpu->arch.emulate_regs_need_sync_from_vcpu = true; vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax); kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx); kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx); kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi); kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi); kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp); kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp); #ifdef CONFIG_X86_64 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8); kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9); kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10); kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11); kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12); kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); #endif kvm_rip_write(vcpu, regs->rip); kvm_set_rflags(vcpu, regs->rflags); vcpu->arch.exception.pending = false; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) { struct kvm_segment cs; kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); *db = cs.db; *l = cs.l; } EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits); int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct desc_ptr dt; kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); kvm_x86_ops->get_idt(vcpu, &dt); sregs->idt.limit = dt.size; sregs->idt.base = dt.address; kvm_x86_ops->get_gdt(vcpu, &dt); sregs->gdt.limit = dt.size; sregs->gdt.base = dt.address; sregs->cr0 = kvm_read_cr0(vcpu); sregs->cr2 = vcpu->arch.cr2; sregs->cr3 = kvm_read_cr3(vcpu); sregs->cr4 = kvm_read_cr4(vcpu); sregs->cr8 = kvm_get_cr8(vcpu); sregs->efer = vcpu->arch.efer; sregs->apic_base = kvm_get_apic_base(vcpu); memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft) set_bit(vcpu->arch.interrupt.nr, (unsigned long *)sregs->interrupt_bitmap); return 0; } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { kvm_apic_accept_events(vcpu); if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && vcpu->arch.pv.pv_unhalted) mp_state->mp_state = KVM_MP_STATE_RUNNABLE; else mp_state->mp_state = vcpu->arch.mp_state; return 0; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { if (!kvm_vcpu_has_lapic(vcpu) && mp_state->mp_state != KVM_MP_STATE_RUNNABLE) return -EINVAL; if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); } else vcpu->arch.mp_state = mp_state->mp_state; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int ret; init_emulate_ctxt(vcpu); ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, has_error_code, error_code); if (ret) return EMULATE_FAIL; kvm_rip_write(vcpu, ctxt->eip); kvm_set_rflags(vcpu, ctxt->eflags); kvm_make_request(KVM_REQ_EVENT, vcpu); return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_task_switch); int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct msr_data apic_base_msr; int mmu_reset_needed = 0; int pending_vec, max_bits, idx; struct desc_ptr dt; if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) return -EINVAL; dt.size = sregs->idt.limit; dt.address = sregs->idt.base; kvm_x86_ops->set_idt(vcpu, &dt); dt.size = sregs->gdt.limit; dt.address = sregs->gdt.base; kvm_x86_ops->set_gdt(vcpu, &dt); vcpu->arch.cr2 = sregs->cr2; mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; vcpu->arch.cr3 = sregs->cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); kvm_set_cr8(vcpu, sregs->cr8); mmu_reset_needed |= vcpu->arch.efer != sregs->efer; kvm_x86_ops->set_efer(vcpu, sregs->efer); apic_base_msr.data = sregs->apic_base; apic_base_msr.host_initiated = true; kvm_set_apic_base(vcpu, &apic_base_msr); mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; kvm_x86_ops->set_cr0(vcpu, sregs->cr0); vcpu->arch.cr0 = sregs->cr0; mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; kvm_x86_ops->set_cr4(vcpu, sregs->cr4); if (sregs->cr4 & X86_CR4_OSXSAVE) kvm_update_cpuid(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); if (!is_long_mode(vcpu) && is_pae(vcpu)) { load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); mmu_reset_needed = 1; } srcu_read_unlock(&vcpu->kvm->srcu, idx); if (mmu_reset_needed) kvm_mmu_reset_context(vcpu); max_bits = KVM_NR_INTERRUPTS; pending_vec = find_first_bit( (const unsigned long *)sregs->interrupt_bitmap, max_bits); if (pending_vec < max_bits) { kvm_queue_interrupt(vcpu, pending_vec, false); pr_debug("Set back pending irq %d\n", pending_vec); } kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); update_cr8_intercept(vcpu); /* Older userspace won't unhalt the vcpu on reset. */ if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && !is_protmode(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { unsigned long rflags; int i, r; if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { r = -EBUSY; if (vcpu->arch.exception.pending) goto out; if (dbg->control & KVM_GUESTDBG_INJECT_DB) kvm_queue_exception(vcpu, DB_VECTOR); else kvm_queue_exception(vcpu, BP_VECTOR); } /* * Read rflags as long as potentially injected trace flags are still * filtered out. */ rflags = kvm_get_rflags(vcpu); vcpu->guest_debug = dbg->control; if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) vcpu->guest_debug = 0; if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { for (i = 0; i < KVM_NR_DB_REGS; ++i) vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; } else { for (i = 0; i < KVM_NR_DB_REGS; i++) vcpu->arch.eff_db[i] = vcpu->arch.db[i]; } kvm_update_dr7(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); /* * Trigger an rflags update that will inject or remove the trace * flags. */ kvm_set_rflags(vcpu, rflags); kvm_x86_ops->update_db_bp_intercept(vcpu); r = 0; out: return r; } /* * Translate a guest virtual address to a guest physical address. */ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { unsigned long vaddr = tr->linear_address; gpa_t gpa; int idx; idx = srcu_read_lock(&vcpu->kvm->srcu); gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); srcu_read_unlock(&vcpu->kvm->srcu, idx); tr->physical_address = gpa; tr->valid = gpa != UNMAPPED_GVA; tr->writeable = 1; tr->usermode = 0; return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct i387_fxsave_struct *fxsave = &vcpu->arch.guest_fpu.state->fxsave; memcpy(fpu->fpr, fxsave->st_space, 128); fpu->fcw = fxsave->cwd; fpu->fsw = fxsave->swd; fpu->ftwx = fxsave->twd; fpu->last_opcode = fxsave->fop; fpu->last_ip = fxsave->rip; fpu->last_dp = fxsave->rdp; memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); return 0; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct i387_fxsave_struct *fxsave = &vcpu->arch.guest_fpu.state->fxsave; memcpy(fxsave->st_space, fpu->fpr, 128); fxsave->cwd = fpu->fcw; fxsave->swd = fpu->fsw; fxsave->twd = fpu->ftwx; fxsave->fop = fpu->last_opcode; fxsave->rip = fpu->last_ip; fxsave->rdp = fpu->last_dp; memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); return 0; } int fx_init(struct kvm_vcpu *vcpu) { int err; err = fpu_alloc(&vcpu->arch.guest_fpu); if (err) return err; fpu_finit(&vcpu->arch.guest_fpu); /* * Ensure guest xcr0 is valid for loading */ vcpu->arch.xcr0 = XSTATE_FP; vcpu->arch.cr0 |= X86_CR0_ET; return 0; } EXPORT_SYMBOL_GPL(fx_init); static void fx_free(struct kvm_vcpu *vcpu) { fpu_free(&vcpu->arch.guest_fpu); } void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) { if (vcpu->guest_fpu_loaded) return; /* * Restore all possible states in the guest, * and assume host would use all available bits. * Guest xcr0 would be loaded later. */ kvm_put_guest_xcr0(vcpu); vcpu->guest_fpu_loaded = 1; __kernel_fpu_begin(); fpu_restore_checking(&vcpu->arch.guest_fpu); trace_kvm_fpu(1); } void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) { kvm_put_guest_xcr0(vcpu); if (!vcpu->guest_fpu_loaded) return; vcpu->guest_fpu_loaded = 0; fpu_save_init(&vcpu->arch.guest_fpu); __kernel_fpu_end(); ++vcpu->stat.fpu_reload; kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); trace_kvm_fpu(0); } void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { kvmclock_reset(vcpu); free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) printk_once(KERN_WARNING "kvm: SMP vm created on host with unstable TSC; " "guest TSC will not be reliable\n"); return kvm_x86_ops->vcpu_create(kvm, id); } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { int r; vcpu->arch.mtrr_state.have_fixed = 1; r = vcpu_load(vcpu); if (r) return r; kvm_vcpu_reset(vcpu); kvm_mmu_setup(vcpu); vcpu_put(vcpu); return r; } int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { int r; struct msr_data msr; struct kvm *kvm = vcpu->kvm; r = vcpu_load(vcpu); if (r) return r; msr.data = 0x0; msr.index = MSR_IA32_TSC; msr.host_initiated = true; kvm_write_tsc(vcpu, &msr); vcpu_put(vcpu); schedule_delayed_work(&kvm->arch.kvmclock_sync_work, KVMCLOCK_SYNC_PERIOD); return r; } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { int r; vcpu->arch.apf.msr_val = 0; r = vcpu_load(vcpu); BUG_ON(r); kvm_mmu_unload(vcpu); vcpu_put(vcpu); fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } void kvm_vcpu_reset(struct kvm_vcpu *vcpu) { atomic_set(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = 0; vcpu->arch.nmi_injected = false; kvm_clear_interrupt_queue(vcpu); kvm_clear_exception_queue(vcpu); memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = DR6_INIT; kvm_update_dr6(vcpu); vcpu->arch.dr7 = DR7_FIXED_1; kvm_update_dr7(vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu); vcpu->arch.apf.msr_val = 0; vcpu->arch.st.msr_val = 0; kvmclock_reset(vcpu); kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); vcpu->arch.apf.halted = false; kvm_pmu_reset(vcpu); memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); vcpu->arch.regs_avail = ~0; vcpu->arch.regs_dirty = ~0; kvm_x86_ops->vcpu_reset(vcpu); } void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector) { struct kvm_segment cs; kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); cs.selector = vector << 8; cs.base = vector << 12; kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); kvm_rip_write(vcpu, 0); } int kvm_arch_hardware_enable(void) { struct kvm *kvm; struct kvm_vcpu *vcpu; int i; int ret; u64 local_tsc; u64 max_tsc = 0; bool stable, backwards_tsc = false; kvm_shared_msr_cpu_online(); ret = kvm_x86_ops->hardware_enable(); if (ret != 0) return ret; local_tsc = native_read_tsc(); stable = !check_tsc_unstable(); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (!stable && vcpu->cpu == smp_processor_id()) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (stable && vcpu->arch.last_host_tsc > local_tsc) { backwards_tsc = true; if (vcpu->arch.last_host_tsc > max_tsc) max_tsc = vcpu->arch.last_host_tsc; } } } /* * Sometimes, even reliable TSCs go backwards. This happens on * platforms that reset TSC during suspend or hibernate actions, but * maintain synchronization. We must compensate. Fortunately, we can * detect that condition here, which happens early in CPU bringup, * before any KVM threads can be running. Unfortunately, we can't * bring the TSCs fully up to date with real time, as we aren't yet far * enough into CPU bringup that we know how much real time has actually * elapsed; our helper function, get_kernel_ns() will be using boot * variables that haven't been updated yet. * * So we simply find the maximum observed TSC above, then record the * adjustment to TSC in each VCPU. When the VCPU later gets loaded, * the adjustment will be applied. Note that we accumulate * adjustments, in case multiple suspend cycles happen before some VCPU * gets a chance to run again. In the event that no KVM threads get a * chance to run, we will miss the entire elapsed period, as we'll have * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may * loose cycle time. This isn't too big a deal, since the loss will be * uniform across all VCPUs (not to mention the scenario is extremely * unlikely). It is possible that a second hibernate recovery happens * much faster than a first, causing the observed TSC here to be * smaller; this would require additional padding adjustment, which is * why we set last_host_tsc to the local tsc observed here. * * N.B. - this code below runs only on platforms with reliable TSC, * as that is the only way backwards_tsc is set above. Also note * that this runs for ALL vcpus, which is not a bug; all VCPUs should * have the same delta_cyc adjustment applied if backwards_tsc * is detected. Note further, this adjustment is only done once, * as we reset last_host_tsc on all VCPUs to stop this from being * called multiple times (one for each physical CPU bringup). * * Platforms with unreliable TSCs don't have to deal with this, they * will be compensated by the logic in vcpu_load, which sets the TSC to * catchup mode. This will catchup all VCPUs to real time, but cannot * guarantee that they stay in perfect synchronization. */ if (backwards_tsc) { u64 delta_cyc = max_tsc - local_tsc; backwards_tsc_observed = true; list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { vcpu->arch.tsc_offset_adjustment += delta_cyc; vcpu->arch.last_host_tsc = local_tsc; kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); } /* * We have to disable TSC offset matching.. if you were * booting a VM while issuing an S4 host suspend.... * you may have some problem. Solving this issue is * left as an exercise to the reader. */ kvm->arch.last_tsc_nsec = 0; kvm->arch.last_tsc_write = 0; } } return 0; } void kvm_arch_hardware_disable(void) { kvm_x86_ops->hardware_disable(); drop_user_return_notifiers(); } int kvm_arch_hardware_setup(void) { return kvm_x86_ops->hardware_setup(); } void kvm_arch_hardware_unsetup(void) { kvm_x86_ops->hardware_unsetup(); } void kvm_arch_check_processor_compat(void *rtn) { kvm_x86_ops->check_processor_compatibility(rtn); } bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); } struct static_key kvm_no_apic_vcpu __read_mostly; int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct page *page; struct kvm *kvm; int r; BUG_ON(vcpu->kvm == NULL); kvm = vcpu->kvm; vcpu->arch.pv.pv_unhalted = false; vcpu->arch.emulate_ctxt.ops = &emulate_ops; if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; else vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) { r = -ENOMEM; goto fail; } vcpu->arch.pio_data = page_address(page); kvm_set_tsc_khz(vcpu, max_tsc_khz); r = kvm_mmu_create(vcpu); if (r < 0) goto fail_free_pio_data; if (irqchip_in_kernel(kvm)) { r = kvm_create_lapic(vcpu); if (r < 0) goto fail_mmu_destroy; } else static_key_slow_inc(&kvm_no_apic_vcpu); vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, GFP_KERNEL); if (!vcpu->arch.mce_banks) { r = -ENOMEM; goto fail_free_lapic; } vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) { r = -ENOMEM; goto fail_free_mce_banks; } r = fx_init(vcpu); if (r) goto fail_free_wbinvd_dirty_mask; vcpu->arch.ia32_tsc_adjust_msr = 0x0; vcpu->arch.pv_time_enabled = false; vcpu->arch.guest_supported_xcr0 = 0; vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; kvm_async_pf_hash_reset(vcpu); kvm_pmu_init(vcpu); return 0; fail_free_wbinvd_dirty_mask: free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); fail_free_mce_banks: kfree(vcpu->arch.mce_banks); fail_free_lapic: kvm_free_lapic(vcpu); fail_mmu_destroy: kvm_mmu_destroy(vcpu); fail_free_pio_data: free_page((unsigned long)vcpu->arch.pio_data); fail: return r; } void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { int idx; kvm_pmu_destroy(vcpu); kfree(vcpu->arch.mce_banks); kvm_free_lapic(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_mmu_destroy(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, idx); free_page((unsigned long)vcpu->arch.pio_data); if (!irqchip_in_kernel(vcpu->kvm)) static_key_slow_dec(&kvm_no_apic_vcpu); } void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) { kvm_x86_ops->sched_in(vcpu, cpu); } int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { if (type) return -EINVAL; INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); atomic_set(&kvm->arch.noncoherent_dma_count, 0); /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); raw_spin_lock_init(&kvm->arch.tsc_write_lock); mutex_init(&kvm->arch.apic_map_lock); spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); pvclock_update_vm_gtod_copy(kvm); INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); return 0; } static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) { int r; r = vcpu_load(vcpu); BUG_ON(r); kvm_mmu_unload(vcpu); vcpu_put(vcpu); } static void kvm_free_vcpus(struct kvm *kvm) { unsigned int i; struct kvm_vcpu *vcpu; /* * Unpin any mmu pages first. */ kvm_for_each_vcpu(i, vcpu, kvm) { kvm_clear_async_pf_completion_queue(vcpu); kvm_unload_vcpu_mmu(vcpu); } kvm_for_each_vcpu(i, vcpu, kvm) kvm_arch_vcpu_free(vcpu); mutex_lock(&kvm->lock); for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) kvm->vcpus[i] = NULL; atomic_set(&kvm->online_vcpus, 0); mutex_unlock(&kvm->lock); } void kvm_arch_sync_events(struct kvm *kvm) { cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); kvm_free_all_assigned_devices(kvm); kvm_free_pit(kvm); } void kvm_arch_destroy_vm(struct kvm *kvm) { if (current->mm == kvm->mm) { /* * Free memory regions allocated on behalf of userspace, * unless the the memory map has changed due to process exit * or fd copying. */ struct kvm_userspace_memory_region mem; memset(&mem, 0, sizeof(mem)); mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; kvm_set_memory_region(kvm, &mem); mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; kvm_set_memory_region(kvm, &mem); mem.slot = TSS_PRIVATE_MEMSLOT; kvm_set_memory_region(kvm, &mem); } kvm_iommu_unmap_guest(kvm); kfree(kvm->arch.vpic); kfree(kvm->arch.vioapic); kvm_free_vcpus(kvm); if (kvm->arch.apic_access_page) put_page(kvm->arch.apic_access_page); kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); } void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { int i; for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { kvm_kvfree(free->arch.rmap[i]); free->arch.rmap[i] = NULL; } if (i == 0) continue; if (!dont || free->arch.lpage_info[i - 1] != dont->arch.lpage_info[i - 1]) { kvm_kvfree(free->arch.lpage_info[i - 1]); free->arch.lpage_info[i - 1] = NULL; } } } int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) { int i; for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { unsigned long ugfn; int lpages; int level = i + 1; lpages = gfn_to_index(slot->base_gfn + npages - 1, slot->base_gfn, level) + 1; slot->arch.rmap[i] = kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i])); if (!slot->arch.rmap[i]) goto out_free; if (i == 0) continue; slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages * sizeof(*slot->arch.lpage_info[i - 1])); if (!slot->arch.lpage_info[i - 1]) goto out_free; if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) slot->arch.lpage_info[i - 1][0].write_count = 1; if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1; ugfn = slot->userspace_addr >> PAGE_SHIFT; /* * If the gfn and userspace address are not aligned wrt each * other, or if explicitly asked to, disable large page * support for this slot */ if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || !kvm_largepages_enabled()) { unsigned long j; for (j = 0; j < lpages; ++j) slot->arch.lpage_info[i - 1][j].write_count = 1; } } return 0; out_free: for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { kvm_kvfree(slot->arch.rmap[i]); slot->arch.rmap[i] = NULL; if (i == 0) continue; kvm_kvfree(slot->arch.lpage_info[i - 1]); slot->arch.lpage_info[i - 1] = NULL; } return -ENOMEM; } void kvm_arch_memslots_updated(struct kvm *kvm) { /* * memslots->generation has been incremented. * mmio generation may have reached its maximum value. */ kvm_mmu_invalidate_mmio_sptes(kvm); } int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) { /* * Only private memory slots need to be mapped here since * KVM_SET_MEMORY_REGION ioctl is no longer supported. */ if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) { unsigned long userspace_addr; /* * MAP_SHARED to prevent internal slot pages from being moved * by fork()/COW. */ userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0); if (IS_ERR((void *)userspace_addr)) return PTR_ERR((void *)userspace_addr); memslot->userspace_addr = userspace_addr; } return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, enum kvm_mr_change change) { int nr_mmu_pages = 0; if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) { int ret; ret = vm_munmap(old->userspace_addr, old->npages * PAGE_SIZE); if (ret < 0) printk(KERN_WARNING "kvm_vm_ioctl_set_memory_region: " "failed to munmap memory\n"); } if (!kvm->arch.n_requested_mmu_pages) nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); if (nr_mmu_pages) kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); /* * Write protect all pages for dirty logging. * * All the sptes including the large sptes which point to this * slot are set to readonly. We can not create any new large * spte on this slot until the end of the logging. * * See the comments in fast_page_fault(). */ if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) kvm_mmu_slot_remove_write_access(kvm, mem->slot); } void kvm_arch_flush_shadow_all(struct kvm *kvm) { kvm_mmu_invalidate_zap_all_pages(kvm); } void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_mmu_invalidate_zap_all_pages(kvm); } int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) kvm_x86_ops->check_nested_events(vcpu, false); return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) || !list_empty_careful(&vcpu->async_pf.done) || kvm_apic_has_events(vcpu) || vcpu->arch.pv.pv_unhalted || atomic_read(&vcpu->arch.nmi_queued) || (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu)); } int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; } int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) { return kvm_x86_ops->interrupt_allowed(vcpu); } bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) { unsigned long current_rip = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); return current_rip == linear_rip; } EXPORT_SYMBOL_GPL(kvm_is_linear_rip); unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags; rflags = kvm_x86_ops->get_rflags(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) rflags &= ~X86_EFLAGS_TF; return rflags; } EXPORT_SYMBOL_GPL(kvm_get_rflags); static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) rflags |= X86_EFLAGS_TF; kvm_x86_ops->set_rflags(vcpu, rflags); } void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { __kvm_set_rflags(vcpu, rflags); kvm_make_request(KVM_REQ_EVENT, vcpu); } EXPORT_SYMBOL_GPL(kvm_set_rflags); void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { int r; if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || work->wakeup_all) return; r = kvm_mmu_reload(vcpu); if (unlikely(r)) return; if (!vcpu->arch.mmu.direct_map && work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) return; vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); } static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) { return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); } static inline u32 kvm_async_pf_next_probe(u32 key) { return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1); } static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { u32 key = kvm_async_pf_hash_fn(gfn); while (vcpu->arch.apf.gfns[key] != ~0) key = kvm_async_pf_next_probe(key); vcpu->arch.apf.gfns[key] = gfn; } static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) { int i; u32 key = kvm_async_pf_hash_fn(gfn); for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) && (vcpu->arch.apf.gfns[key] != gfn && vcpu->arch.apf.gfns[key] != ~0); i++) key = kvm_async_pf_next_probe(key); return key; } bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; } static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { u32 i, j, k; i = j = kvm_async_pf_gfn_slot(vcpu, gfn); while (true) { vcpu->arch.apf.gfns[i] = ~0; do { j = kvm_async_pf_next_probe(j); if (vcpu->arch.apf.gfns[j] == ~0) return; k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); /* * k lies cyclically in ]i,j] * | i.k.j | * |....j i.k.| or |.k..j i...| */ } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; i = j; } } static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) { return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, sizeof(val)); } void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; trace_kvm_async_pf_not_present(work->arch.token, work->gva); kvm_add_async_pf_gfn(vcpu, work->arch.gfn); if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || (vcpu->arch.apf.send_user_only && kvm_x86_ops->get_cpl(vcpu) == 0)) kvm_make_request(KVM_REQ_APF_HALT, vcpu); else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; fault.error_code = 0; fault.nested_page_fault = false; fault.address = work->arch.token; kvm_inject_page_fault(vcpu, &fault); } } void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; trace_kvm_async_pf_ready(work->arch.token, work->gva); if (work->wakeup_all) work->arch.token = ~0; /* broadcast wakeup */ else kvm_del_async_pf_gfn(vcpu, work->arch.gfn); if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; fault.error_code = 0; fault.nested_page_fault = false; fault.address = work->arch.token; kvm_inject_page_fault(vcpu, &fault); } vcpu->arch.apf.halted = false; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) return true; else return !kvm_event_needs_reinjection(vcpu) && kvm_x86_ops->interrupt_allowed(vcpu); } void kvm_arch_register_noncoherent_dma(struct kvm *kvm) { atomic_inc(&kvm->arch.noncoherent_dma_count); } EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) { atomic_dec(&kvm->arch.noncoherent_dma_count); } EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) { return atomic_read(&kvm->arch.noncoherent_dma_count); } EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window);
./CrossVul/dataset_final_sorted/CWE-362/c/bad_2290_0
crossvul-cpp_data_bad_5187_0
/* auditsc.c -- System-call auditing support * Handles all system-call specific auditing features. * * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. * Copyright 2005 Hewlett-Packard Development Company, L.P. * Copyright (C) 2005, 2006 IBM Corporation * All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Written by Rickard E. (Rik) Faith <faith@redhat.com> * * Many of the ideas implemented here are from Stephen C. Tweedie, * especially the idea of avoiding a copy by using getname. * * The method for actual interception of syscall entry and exit (not in * this file -- see entry.S) is based on a GPL'd patch written by * okir@suse.de and Copyright 2003 SuSE Linux AG. * * POSIX message queue support added by George Wilson <ltcgcw@us.ibm.com>, * 2006. * * The support of additional filter rules compares (>, <, >=, <=) was * added by Dustin Kirkland <dustin.kirkland@us.ibm.com>, 2005. * * Modified by Amy Griffis <amy.griffis@hp.com> to collect additional * filesystem information. * * Subject and object context labeling support added by <danjones@us.ibm.com> * and <dustin.kirkland@us.ibm.com> for LSPP certification compliance. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <asm/types.h> #include <linux/atomic.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/mm.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/mount.h> #include <linux/socket.h> #include <linux/mqueue.h> #include <linux/audit.h> #include <linux/personality.h> #include <linux/time.h> #include <linux/netlink.h> #include <linux/compiler.h> #include <asm/unistd.h> #include <linux/security.h> #include <linux/list.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/highmem.h> #include <linux/syscalls.h> #include <asm/syscall.h> #include <linux/capability.h> #include <linux/fs_struct.h> #include <linux/compat.h> #include <linux/ctype.h> #include <linux/string.h> #include <uapi/linux/limits.h> #include "audit.h" /* flags stating the success for a syscall */ #define AUDITSC_INVALID 0 #define AUDITSC_SUCCESS 1 #define AUDITSC_FAILURE 2 /* no execve audit message should be longer than this (userspace limits) */ #define MAX_EXECVE_AUDIT_LEN 7500 /* max length to print of cmdline/proctitle value during audit */ #define MAX_PROCTITLE_AUDIT_LEN 128 /* number of audit rules */ int audit_n_rules; /* determines whether we collect data for signals sent */ int audit_signals; struct audit_aux_data { struct audit_aux_data *next; int type; }; #define AUDIT_AUX_IPCPERM 0 /* Number of target pids per aux struct. */ #define AUDIT_AUX_PIDS 16 struct audit_aux_data_pids { struct audit_aux_data d; pid_t target_pid[AUDIT_AUX_PIDS]; kuid_t target_auid[AUDIT_AUX_PIDS]; kuid_t target_uid[AUDIT_AUX_PIDS]; unsigned int target_sessionid[AUDIT_AUX_PIDS]; u32 target_sid[AUDIT_AUX_PIDS]; char target_comm[AUDIT_AUX_PIDS][TASK_COMM_LEN]; int pid_count; }; struct audit_aux_data_bprm_fcaps { struct audit_aux_data d; struct audit_cap_data fcap; unsigned int fcap_ver; struct audit_cap_data old_pcap; struct audit_cap_data new_pcap; }; struct audit_tree_refs { struct audit_tree_refs *next; struct audit_chunk *c[31]; }; static int audit_match_perm(struct audit_context *ctx, int mask) { unsigned n; if (unlikely(!ctx)) return 0; n = ctx->major; switch (audit_classify_syscall(ctx->arch, n)) { case 0: /* native */ if ((mask & AUDIT_PERM_WRITE) && audit_match_class(AUDIT_CLASS_WRITE, n)) return 1; if ((mask & AUDIT_PERM_READ) && audit_match_class(AUDIT_CLASS_READ, n)) return 1; if ((mask & AUDIT_PERM_ATTR) && audit_match_class(AUDIT_CLASS_CHATTR, n)) return 1; return 0; case 1: /* 32bit on biarch */ if ((mask & AUDIT_PERM_WRITE) && audit_match_class(AUDIT_CLASS_WRITE_32, n)) return 1; if ((mask & AUDIT_PERM_READ) && audit_match_class(AUDIT_CLASS_READ_32, n)) return 1; if ((mask & AUDIT_PERM_ATTR) && audit_match_class(AUDIT_CLASS_CHATTR_32, n)) return 1; return 0; case 2: /* open */ return mask & ACC_MODE(ctx->argv[1]); case 3: /* openat */ return mask & ACC_MODE(ctx->argv[2]); case 4: /* socketcall */ return ((mask & AUDIT_PERM_WRITE) && ctx->argv[0] == SYS_BIND); case 5: /* execve */ return mask & AUDIT_PERM_EXEC; default: return 0; } } static int audit_match_filetype(struct audit_context *ctx, int val) { struct audit_names *n; umode_t mode = (umode_t)val; if (unlikely(!ctx)) return 0; list_for_each_entry(n, &ctx->names_list, list) { if ((n->ino != AUDIT_INO_UNSET) && ((n->mode & S_IFMT) == mode)) return 1; } return 0; } /* * We keep a linked list of fixed-sized (31 pointer) arrays of audit_chunk *; * ->first_trees points to its beginning, ->trees - to the current end of data. * ->tree_count is the number of free entries in array pointed to by ->trees. * Original condition is (NULL, NULL, 0); as soon as it grows we never revert to NULL, * "empty" becomes (p, p, 31) afterwards. We don't shrink the list (and seriously, * it's going to remain 1-element for almost any setup) until we free context itself. * References in it _are_ dropped - at the same time we free/drop aux stuff. */ #ifdef CONFIG_AUDIT_TREE static void audit_set_auditable(struct audit_context *ctx) { if (!ctx->prio) { ctx->prio = 1; ctx->current_state = AUDIT_RECORD_CONTEXT; } } static int put_tree_ref(struct audit_context *ctx, struct audit_chunk *chunk) { struct audit_tree_refs *p = ctx->trees; int left = ctx->tree_count; if (likely(left)) { p->c[--left] = chunk; ctx->tree_count = left; return 1; } if (!p) return 0; p = p->next; if (p) { p->c[30] = chunk; ctx->trees = p; ctx->tree_count = 30; return 1; } return 0; } static int grow_tree_refs(struct audit_context *ctx) { struct audit_tree_refs *p = ctx->trees; ctx->trees = kzalloc(sizeof(struct audit_tree_refs), GFP_KERNEL); if (!ctx->trees) { ctx->trees = p; return 0; } if (p) p->next = ctx->trees; else ctx->first_trees = ctx->trees; ctx->tree_count = 31; return 1; } #endif static void unroll_tree_refs(struct audit_context *ctx, struct audit_tree_refs *p, int count) { #ifdef CONFIG_AUDIT_TREE struct audit_tree_refs *q; int n; if (!p) { /* we started with empty chain */ p = ctx->first_trees; count = 31; /* if the very first allocation has failed, nothing to do */ if (!p) return; } n = count; for (q = p; q != ctx->trees; q = q->next, n = 31) { while (n--) { audit_put_chunk(q->c[n]); q->c[n] = NULL; } } while (n-- > ctx->tree_count) { audit_put_chunk(q->c[n]); q->c[n] = NULL; } ctx->trees = p; ctx->tree_count = count; #endif } static void free_tree_refs(struct audit_context *ctx) { struct audit_tree_refs *p, *q; for (p = ctx->first_trees; p; p = q) { q = p->next; kfree(p); } } static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree) { #ifdef CONFIG_AUDIT_TREE struct audit_tree_refs *p; int n; if (!tree) return 0; /* full ones */ for (p = ctx->first_trees; p != ctx->trees; p = p->next) { for (n = 0; n < 31; n++) if (audit_tree_match(p->c[n], tree)) return 1; } /* partial */ if (p) { for (n = ctx->tree_count; n < 31; n++) if (audit_tree_match(p->c[n], tree)) return 1; } #endif return 0; } static int audit_compare_uid(kuid_t uid, struct audit_names *name, struct audit_field *f, struct audit_context *ctx) { struct audit_names *n; int rc; if (name) { rc = audit_uid_comparator(uid, f->op, name->uid); if (rc) return rc; } if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { rc = audit_uid_comparator(uid, f->op, n->uid); if (rc) return rc; } } return 0; } static int audit_compare_gid(kgid_t gid, struct audit_names *name, struct audit_field *f, struct audit_context *ctx) { struct audit_names *n; int rc; if (name) { rc = audit_gid_comparator(gid, f->op, name->gid); if (rc) return rc; } if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { rc = audit_gid_comparator(gid, f->op, n->gid); if (rc) return rc; } } return 0; } static int audit_field_compare(struct task_struct *tsk, const struct cred *cred, struct audit_field *f, struct audit_context *ctx, struct audit_names *name) { switch (f->val) { /* process to file object comparisons */ case AUDIT_COMPARE_UID_TO_OBJ_UID: return audit_compare_uid(cred->uid, name, f, ctx); case AUDIT_COMPARE_GID_TO_OBJ_GID: return audit_compare_gid(cred->gid, name, f, ctx); case AUDIT_COMPARE_EUID_TO_OBJ_UID: return audit_compare_uid(cred->euid, name, f, ctx); case AUDIT_COMPARE_EGID_TO_OBJ_GID: return audit_compare_gid(cred->egid, name, f, ctx); case AUDIT_COMPARE_AUID_TO_OBJ_UID: return audit_compare_uid(tsk->loginuid, name, f, ctx); case AUDIT_COMPARE_SUID_TO_OBJ_UID: return audit_compare_uid(cred->suid, name, f, ctx); case AUDIT_COMPARE_SGID_TO_OBJ_GID: return audit_compare_gid(cred->sgid, name, f, ctx); case AUDIT_COMPARE_FSUID_TO_OBJ_UID: return audit_compare_uid(cred->fsuid, name, f, ctx); case AUDIT_COMPARE_FSGID_TO_OBJ_GID: return audit_compare_gid(cred->fsgid, name, f, ctx); /* uid comparisons */ case AUDIT_COMPARE_UID_TO_AUID: return audit_uid_comparator(cred->uid, f->op, tsk->loginuid); case AUDIT_COMPARE_UID_TO_EUID: return audit_uid_comparator(cred->uid, f->op, cred->euid); case AUDIT_COMPARE_UID_TO_SUID: return audit_uid_comparator(cred->uid, f->op, cred->suid); case AUDIT_COMPARE_UID_TO_FSUID: return audit_uid_comparator(cred->uid, f->op, cred->fsuid); /* auid comparisons */ case AUDIT_COMPARE_AUID_TO_EUID: return audit_uid_comparator(tsk->loginuid, f->op, cred->euid); case AUDIT_COMPARE_AUID_TO_SUID: return audit_uid_comparator(tsk->loginuid, f->op, cred->suid); case AUDIT_COMPARE_AUID_TO_FSUID: return audit_uid_comparator(tsk->loginuid, f->op, cred->fsuid); /* euid comparisons */ case AUDIT_COMPARE_EUID_TO_SUID: return audit_uid_comparator(cred->euid, f->op, cred->suid); case AUDIT_COMPARE_EUID_TO_FSUID: return audit_uid_comparator(cred->euid, f->op, cred->fsuid); /* suid comparisons */ case AUDIT_COMPARE_SUID_TO_FSUID: return audit_uid_comparator(cred->suid, f->op, cred->fsuid); /* gid comparisons */ case AUDIT_COMPARE_GID_TO_EGID: return audit_gid_comparator(cred->gid, f->op, cred->egid); case AUDIT_COMPARE_GID_TO_SGID: return audit_gid_comparator(cred->gid, f->op, cred->sgid); case AUDIT_COMPARE_GID_TO_FSGID: return audit_gid_comparator(cred->gid, f->op, cred->fsgid); /* egid comparisons */ case AUDIT_COMPARE_EGID_TO_SGID: return audit_gid_comparator(cred->egid, f->op, cred->sgid); case AUDIT_COMPARE_EGID_TO_FSGID: return audit_gid_comparator(cred->egid, f->op, cred->fsgid); /* sgid comparison */ case AUDIT_COMPARE_SGID_TO_FSGID: return audit_gid_comparator(cred->sgid, f->op, cred->fsgid); default: WARN(1, "Missing AUDIT_COMPARE define. Report as a bug\n"); return 0; } return 0; } /* Determine if any context name data matches a rule's watch data */ /* Compare a task_struct with an audit_rule. Return 1 on match, 0 * otherwise. * * If task_creation is true, this is an explicit indication that we are * filtering a task rule at task creation time. This and tsk == current are * the only situations where tsk->cred may be accessed without an rcu read lock. */ static int audit_filter_rules(struct task_struct *tsk, struct audit_krule *rule, struct audit_context *ctx, struct audit_names *name, enum audit_state *state, bool task_creation) { const struct cred *cred; int i, need_sid = 1; u32 sid; cred = rcu_dereference_check(tsk->cred, tsk == current || task_creation); for (i = 0; i < rule->field_count; i++) { struct audit_field *f = &rule->fields[i]; struct audit_names *n; int result = 0; pid_t pid; switch (f->type) { case AUDIT_PID: pid = task_pid_nr(tsk); result = audit_comparator(pid, f->op, f->val); break; case AUDIT_PPID: if (ctx) { if (!ctx->ppid) ctx->ppid = task_ppid_nr(tsk); result = audit_comparator(ctx->ppid, f->op, f->val); } break; case AUDIT_EXE: result = audit_exe_compare(tsk, rule->exe); break; case AUDIT_UID: result = audit_uid_comparator(cred->uid, f->op, f->uid); break; case AUDIT_EUID: result = audit_uid_comparator(cred->euid, f->op, f->uid); break; case AUDIT_SUID: result = audit_uid_comparator(cred->suid, f->op, f->uid); break; case AUDIT_FSUID: result = audit_uid_comparator(cred->fsuid, f->op, f->uid); break; case AUDIT_GID: result = audit_gid_comparator(cred->gid, f->op, f->gid); if (f->op == Audit_equal) { if (!result) result = in_group_p(f->gid); } else if (f->op == Audit_not_equal) { if (result) result = !in_group_p(f->gid); } break; case AUDIT_EGID: result = audit_gid_comparator(cred->egid, f->op, f->gid); if (f->op == Audit_equal) { if (!result) result = in_egroup_p(f->gid); } else if (f->op == Audit_not_equal) { if (result) result = !in_egroup_p(f->gid); } break; case AUDIT_SGID: result = audit_gid_comparator(cred->sgid, f->op, f->gid); break; case AUDIT_FSGID: result = audit_gid_comparator(cred->fsgid, f->op, f->gid); break; case AUDIT_PERS: result = audit_comparator(tsk->personality, f->op, f->val); break; case AUDIT_ARCH: if (ctx) result = audit_comparator(ctx->arch, f->op, f->val); break; case AUDIT_EXIT: if (ctx && ctx->return_valid) result = audit_comparator(ctx->return_code, f->op, f->val); break; case AUDIT_SUCCESS: if (ctx && ctx->return_valid) { if (f->val) result = audit_comparator(ctx->return_valid, f->op, AUDITSC_SUCCESS); else result = audit_comparator(ctx->return_valid, f->op, AUDITSC_FAILURE); } break; case AUDIT_DEVMAJOR: if (name) { if (audit_comparator(MAJOR(name->dev), f->op, f->val) || audit_comparator(MAJOR(name->rdev), f->op, f->val)) ++result; } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_comparator(MAJOR(n->dev), f->op, f->val) || audit_comparator(MAJOR(n->rdev), f->op, f->val)) { ++result; break; } } } break; case AUDIT_DEVMINOR: if (name) { if (audit_comparator(MINOR(name->dev), f->op, f->val) || audit_comparator(MINOR(name->rdev), f->op, f->val)) ++result; } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_comparator(MINOR(n->dev), f->op, f->val) || audit_comparator(MINOR(n->rdev), f->op, f->val)) { ++result; break; } } } break; case AUDIT_INODE: if (name) result = audit_comparator(name->ino, f->op, f->val); else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_comparator(n->ino, f->op, f->val)) { ++result; break; } } } break; case AUDIT_OBJ_UID: if (name) { result = audit_uid_comparator(name->uid, f->op, f->uid); } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_uid_comparator(n->uid, f->op, f->uid)) { ++result; break; } } } break; case AUDIT_OBJ_GID: if (name) { result = audit_gid_comparator(name->gid, f->op, f->gid); } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_gid_comparator(n->gid, f->op, f->gid)) { ++result; break; } } } break; case AUDIT_WATCH: if (name) result = audit_watch_compare(rule->watch, name->ino, name->dev); break; case AUDIT_DIR: if (ctx) result = match_tree_refs(ctx, rule->tree); break; case AUDIT_LOGINUID: result = audit_uid_comparator(tsk->loginuid, f->op, f->uid); break; case AUDIT_LOGINUID_SET: result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val); break; case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: /* NOTE: this may return negative values indicating a temporary error. We simply treat this as a match for now to avoid losing information that may be wanted. An error message will also be logged upon error */ if (f->lsm_rule) { if (need_sid) { security_task_getsecid(tsk, &sid); need_sid = 0; } result = security_audit_rule_match(sid, f->type, f->op, f->lsm_rule, ctx); } break; case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: /* The above note for AUDIT_SUBJ_USER...AUDIT_SUBJ_CLR also applies here */ if (f->lsm_rule) { /* Find files that match */ if (name) { result = security_audit_rule_match( name->osid, f->type, f->op, f->lsm_rule, ctx); } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (security_audit_rule_match(n->osid, f->type, f->op, f->lsm_rule, ctx)) { ++result; break; } } } /* Find ipc objects that match */ if (!ctx || ctx->type != AUDIT_IPC) break; if (security_audit_rule_match(ctx->ipc.osid, f->type, f->op, f->lsm_rule, ctx)) ++result; } break; case AUDIT_ARG0: case AUDIT_ARG1: case AUDIT_ARG2: case AUDIT_ARG3: if (ctx) result = audit_comparator(ctx->argv[f->type-AUDIT_ARG0], f->op, f->val); break; case AUDIT_FILTERKEY: /* ignore this field for filtering */ result = 1; break; case AUDIT_PERM: result = audit_match_perm(ctx, f->val); break; case AUDIT_FILETYPE: result = audit_match_filetype(ctx, f->val); break; case AUDIT_FIELD_COMPARE: result = audit_field_compare(tsk, cred, f, ctx, name); break; } if (!result) return 0; } if (ctx) { if (rule->prio <= ctx->prio) return 0; if (rule->filterkey) { kfree(ctx->filterkey); ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC); } ctx->prio = rule->prio; } switch (rule->action) { case AUDIT_NEVER: *state = AUDIT_DISABLED; break; case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break; } return 1; } /* At process creation time, we can determine if system-call auditing is * completely disabled for this task. Since we only have the task * structure at this point, we can only check uid and gid. */ static enum audit_state audit_filter_task(struct task_struct *tsk, char **key) { struct audit_entry *e; enum audit_state state; rcu_read_lock(); list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_TASK], list) { if (audit_filter_rules(tsk, &e->rule, NULL, NULL, &state, true)) { if (state == AUDIT_RECORD_CONTEXT) *key = kstrdup(e->rule.filterkey, GFP_ATOMIC); rcu_read_unlock(); return state; } } rcu_read_unlock(); return AUDIT_BUILD_CONTEXT; } static int audit_in_mask(const struct audit_krule *rule, unsigned long val) { int word, bit; if (val > 0xffffffff) return false; word = AUDIT_WORD(val); if (word >= AUDIT_BITMASK_SIZE) return false; bit = AUDIT_BIT(val); return rule->mask[word] & bit; } /* At syscall entry and exit time, this filter is called if the * audit_state is not low enough that auditing cannot take place, but is * also not high enough that we already know we have to write an audit * record (i.e., the state is AUDIT_SETUP_CONTEXT or AUDIT_BUILD_CONTEXT). */ static enum audit_state audit_filter_syscall(struct task_struct *tsk, struct audit_context *ctx, struct list_head *list) { struct audit_entry *e; enum audit_state state; if (audit_pid && tsk->tgid == audit_pid) return AUDIT_DISABLED; rcu_read_lock(); if (!list_empty(list)) { list_for_each_entry_rcu(e, list, list) { if (audit_in_mask(&e->rule, ctx->major) && audit_filter_rules(tsk, &e->rule, ctx, NULL, &state, false)) { rcu_read_unlock(); ctx->current_state = state; return state; } } } rcu_read_unlock(); return AUDIT_BUILD_CONTEXT; } /* * Given an audit_name check the inode hash table to see if they match. * Called holding the rcu read lock to protect the use of audit_inode_hash */ static int audit_filter_inode_name(struct task_struct *tsk, struct audit_names *n, struct audit_context *ctx) { int h = audit_hash_ino((u32)n->ino); struct list_head *list = &audit_inode_hash[h]; struct audit_entry *e; enum audit_state state; if (list_empty(list)) return 0; list_for_each_entry_rcu(e, list, list) { if (audit_in_mask(&e->rule, ctx->major) && audit_filter_rules(tsk, &e->rule, ctx, n, &state, false)) { ctx->current_state = state; return 1; } } return 0; } /* At syscall exit time, this filter is called if any audit_names have been * collected during syscall processing. We only check rules in sublists at hash * buckets applicable to the inode numbers in audit_names. * Regarding audit_state, same rules apply as for audit_filter_syscall(). */ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx) { struct audit_names *n; if (audit_pid && tsk->tgid == audit_pid) return; rcu_read_lock(); list_for_each_entry(n, &ctx->names_list, list) { if (audit_filter_inode_name(tsk, n, ctx)) break; } rcu_read_unlock(); } /* Transfer the audit context pointer to the caller, clearing it in the tsk's struct */ static inline struct audit_context *audit_take_context(struct task_struct *tsk, int return_valid, long return_code) { struct audit_context *context = tsk->audit_context; if (!context) return NULL; context->return_valid = return_valid; /* * we need to fix up the return code in the audit logs if the actual * return codes are later going to be fixed up by the arch specific * signal handlers * * This is actually a test for: * (rc == ERESTARTSYS ) || (rc == ERESTARTNOINTR) || * (rc == ERESTARTNOHAND) || (rc == ERESTART_RESTARTBLOCK) * * but is faster than a bunch of || */ if (unlikely(return_code <= -ERESTARTSYS) && (return_code >= -ERESTART_RESTARTBLOCK) && (return_code != -ENOIOCTLCMD)) context->return_code = -EINTR; else context->return_code = return_code; if (context->in_syscall && !context->dummy) { audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]); audit_filter_inodes(tsk, context); } tsk->audit_context = NULL; return context; } static inline void audit_proctitle_free(struct audit_context *context) { kfree(context->proctitle.value); context->proctitle.value = NULL; context->proctitle.len = 0; } static inline void audit_free_names(struct audit_context *context) { struct audit_names *n, *next; list_for_each_entry_safe(n, next, &context->names_list, list) { list_del(&n->list); if (n->name) putname(n->name); if (n->should_free) kfree(n); } context->name_count = 0; path_put(&context->pwd); context->pwd.dentry = NULL; context->pwd.mnt = NULL; } static inline void audit_free_aux(struct audit_context *context) { struct audit_aux_data *aux; while ((aux = context->aux)) { context->aux = aux->next; kfree(aux); } while ((aux = context->aux_pids)) { context->aux_pids = aux->next; kfree(aux); } } static inline struct audit_context *audit_alloc_context(enum audit_state state) { struct audit_context *context; context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) return NULL; context->state = state; context->prio = state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0; INIT_LIST_HEAD(&context->killed_trees); INIT_LIST_HEAD(&context->names_list); return context; } /** * audit_alloc - allocate an audit context block for a task * @tsk: task * * Filter on the task information and allocate a per-task audit context * if necessary. Doing so turns on system call auditing for the * specified task. This is called from copy_process, so no lock is * needed. */ int audit_alloc(struct task_struct *tsk) { struct audit_context *context; enum audit_state state; char *key = NULL; if (likely(!audit_ever_enabled)) return 0; /* Return if not auditing. */ state = audit_filter_task(tsk, &key); if (state == AUDIT_DISABLED) { clear_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT); return 0; } if (!(context = audit_alloc_context(state))) { kfree(key); audit_log_lost("out of memory in audit_alloc"); return -ENOMEM; } context->filterkey = key; tsk->audit_context = context; set_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT); return 0; } static inline void audit_free_context(struct audit_context *context) { audit_free_names(context); unroll_tree_refs(context, NULL, 0); free_tree_refs(context); audit_free_aux(context); kfree(context->filterkey); kfree(context->sockaddr); audit_proctitle_free(context); kfree(context); } static int audit_log_pid_context(struct audit_context *context, pid_t pid, kuid_t auid, kuid_t uid, unsigned int sessionid, u32 sid, char *comm) { struct audit_buffer *ab; char *ctx = NULL; u32 len; int rc = 0; ab = audit_log_start(context, GFP_KERNEL, AUDIT_OBJ_PID); if (!ab) return rc; audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid, from_kuid(&init_user_ns, auid), from_kuid(&init_user_ns, uid), sessionid); if (sid) { if (security_secid_to_secctx(sid, &ctx, &len)) { audit_log_format(ab, " obj=(none)"); rc = 1; } else { audit_log_format(ab, " obj=%s", ctx); security_release_secctx(ctx, len); } } audit_log_format(ab, " ocomm="); audit_log_untrustedstring(ab, comm); audit_log_end(ab); return rc; } /* * to_send and len_sent accounting are very loose estimates. We aren't * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being * within about 500 bytes (next page boundary) * * why snprintf? an int is up to 12 digits long. if we just assumed when * logging that a[%d]= was going to be 16 characters long we would be wasting * space in every audit message. In one 7500 byte message we can log up to * about 1000 min size arguments. That comes down to about 50% waste of space * if we didn't do the snprintf to find out how long arg_num_len was. */ static int audit_log_single_execve_arg(struct audit_context *context, struct audit_buffer **ab, int arg_num, size_t *len_sent, const char __user *p, char *buf) { char arg_num_len_buf[12]; const char __user *tmp_p = p; /* how many digits are in arg_num? 5 is the length of ' a=""' */ size_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 5; size_t len, len_left, to_send; size_t max_execve_audit_len = MAX_EXECVE_AUDIT_LEN; unsigned int i, has_cntl = 0, too_long = 0; int ret; /* strnlen_user includes the null we don't want to send */ len_left = len = strnlen_user(p, MAX_ARG_STRLEN) - 1; /* * We just created this mm, if we can't find the strings * we just copied into it something is _very_ wrong. Similar * for strings that are too long, we should not have created * any. */ if (WARN_ON_ONCE(len < 0 || len > MAX_ARG_STRLEN - 1)) { send_sig(SIGKILL, current, 0); return -1; } /* walk the whole argument looking for non-ascii chars */ do { if (len_left > MAX_EXECVE_AUDIT_LEN) to_send = MAX_EXECVE_AUDIT_LEN; else to_send = len_left; ret = copy_from_user(buf, tmp_p, to_send); /* * There is no reason for this copy to be short. We just * copied them here, and the mm hasn't been exposed to user- * space yet. */ if (ret) { WARN_ON(1); send_sig(SIGKILL, current, 0); return -1; } buf[to_send] = '\0'; has_cntl = audit_string_contains_control(buf, to_send); if (has_cntl) { /* * hex messages get logged as 2 bytes, so we can only * send half as much in each message */ max_execve_audit_len = MAX_EXECVE_AUDIT_LEN / 2; break; } len_left -= to_send; tmp_p += to_send; } while (len_left > 0); len_left = len; if (len > max_execve_audit_len) too_long = 1; /* rewalk the argument actually logging the message */ for (i = 0; len_left > 0; i++) { int room_left; if (len_left > max_execve_audit_len) to_send = max_execve_audit_len; else to_send = len_left; /* do we have space left to send this argument in this ab? */ room_left = MAX_EXECVE_AUDIT_LEN - arg_num_len - *len_sent; if (has_cntl) room_left -= (to_send * 2); else room_left -= to_send; if (room_left < 0) { *len_sent = 0; audit_log_end(*ab); *ab = audit_log_start(context, GFP_KERNEL, AUDIT_EXECVE); if (!*ab) return 0; } /* * first record needs to say how long the original string was * so we can be sure nothing was lost. */ if ((i == 0) && (too_long)) audit_log_format(*ab, " a%d_len=%zu", arg_num, has_cntl ? 2*len : len); /* * normally arguments are small enough to fit and we already * filled buf above when we checked for control characters * so don't bother with another copy_from_user */ if (len >= max_execve_audit_len) ret = copy_from_user(buf, p, to_send); else ret = 0; if (ret) { WARN_ON(1); send_sig(SIGKILL, current, 0); return -1; } buf[to_send] = '\0'; /* actually log it */ audit_log_format(*ab, " a%d", arg_num); if (too_long) audit_log_format(*ab, "[%d]", i); audit_log_format(*ab, "="); if (has_cntl) audit_log_n_hex(*ab, buf, to_send); else audit_log_string(*ab, buf); p += to_send; len_left -= to_send; *len_sent += arg_num_len; if (has_cntl) *len_sent += to_send * 2; else *len_sent += to_send; } /* include the null we didn't log */ return len + 1; } static void audit_log_execve_info(struct audit_context *context, struct audit_buffer **ab) { int i, len; size_t len_sent = 0; const char __user *p; char *buf; p = (const char __user *)current->mm->arg_start; audit_log_format(*ab, "argc=%d", context->execve.argc); /* * we need some kernel buffer to hold the userspace args. Just * allocate one big one rather than allocating one of the right size * for every single argument inside audit_log_single_execve_arg() * should be <8k allocation so should be pretty safe. */ buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL); if (!buf) { audit_panic("out of memory for argv string"); return; } for (i = 0; i < context->execve.argc; i++) { len = audit_log_single_execve_arg(context, ab, i, &len_sent, p, buf); if (len <= 0) break; p += len; } kfree(buf); } static void show_special(struct audit_context *context, int *call_panic) { struct audit_buffer *ab; int i; ab = audit_log_start(context, GFP_KERNEL, context->type); if (!ab) return; switch (context->type) { case AUDIT_SOCKETCALL: { int nargs = context->socketcall.nargs; audit_log_format(ab, "nargs=%d", nargs); for (i = 0; i < nargs; i++) audit_log_format(ab, " a%d=%lx", i, context->socketcall.args[i]); break; } case AUDIT_IPC: { u32 osid = context->ipc.osid; audit_log_format(ab, "ouid=%u ogid=%u mode=%#ho", from_kuid(&init_user_ns, context->ipc.uid), from_kgid(&init_user_ns, context->ipc.gid), context->ipc.mode); if (osid) { char *ctx = NULL; u32 len; if (security_secid_to_secctx(osid, &ctx, &len)) { audit_log_format(ab, " osid=%u", osid); *call_panic = 1; } else { audit_log_format(ab, " obj=%s", ctx); security_release_secctx(ctx, len); } } if (context->ipc.has_perm) { audit_log_end(ab); ab = audit_log_start(context, GFP_KERNEL, AUDIT_IPC_SET_PERM); if (unlikely(!ab)) return; audit_log_format(ab, "qbytes=%lx ouid=%u ogid=%u mode=%#ho", context->ipc.qbytes, context->ipc.perm_uid, context->ipc.perm_gid, context->ipc.perm_mode); } break; } case AUDIT_MQ_OPEN: { audit_log_format(ab, "oflag=0x%x mode=%#ho mq_flags=0x%lx mq_maxmsg=%ld " "mq_msgsize=%ld mq_curmsgs=%ld", context->mq_open.oflag, context->mq_open.mode, context->mq_open.attr.mq_flags, context->mq_open.attr.mq_maxmsg, context->mq_open.attr.mq_msgsize, context->mq_open.attr.mq_curmsgs); break; } case AUDIT_MQ_SENDRECV: { audit_log_format(ab, "mqdes=%d msg_len=%zd msg_prio=%u " "abs_timeout_sec=%ld abs_timeout_nsec=%ld", context->mq_sendrecv.mqdes, context->mq_sendrecv.msg_len, context->mq_sendrecv.msg_prio, context->mq_sendrecv.abs_timeout.tv_sec, context->mq_sendrecv.abs_timeout.tv_nsec); break; } case AUDIT_MQ_NOTIFY: { audit_log_format(ab, "mqdes=%d sigev_signo=%d", context->mq_notify.mqdes, context->mq_notify.sigev_signo); break; } case AUDIT_MQ_GETSETATTR: { struct mq_attr *attr = &context->mq_getsetattr.mqstat; audit_log_format(ab, "mqdes=%d mq_flags=0x%lx mq_maxmsg=%ld mq_msgsize=%ld " "mq_curmsgs=%ld ", context->mq_getsetattr.mqdes, attr->mq_flags, attr->mq_maxmsg, attr->mq_msgsize, attr->mq_curmsgs); break; } case AUDIT_CAPSET: { audit_log_format(ab, "pid=%d", context->capset.pid); audit_log_cap(ab, "cap_pi", &context->capset.cap.inheritable); audit_log_cap(ab, "cap_pp", &context->capset.cap.permitted); audit_log_cap(ab, "cap_pe", &context->capset.cap.effective); break; } case AUDIT_MMAP: { audit_log_format(ab, "fd=%d flags=0x%x", context->mmap.fd, context->mmap.flags); break; } case AUDIT_EXECVE: { audit_log_execve_info(context, &ab); break; } } audit_log_end(ab); } static inline int audit_proctitle_rtrim(char *proctitle, int len) { char *end = proctitle + len - 1; while (end > proctitle && !isprint(*end)) end--; /* catch the case where proctitle is only 1 non-print character */ len = end - proctitle + 1; len -= isprint(proctitle[len-1]) == 0; return len; } static void audit_log_proctitle(struct task_struct *tsk, struct audit_context *context) { int res; char *buf; char *msg = "(null)"; int len = strlen(msg); struct audit_buffer *ab; ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE); if (!ab) return; /* audit_panic or being filtered */ audit_log_format(ab, "proctitle="); /* Not cached */ if (!context->proctitle.value) { buf = kmalloc(MAX_PROCTITLE_AUDIT_LEN, GFP_KERNEL); if (!buf) goto out; /* Historically called this from procfs naming */ res = get_cmdline(tsk, buf, MAX_PROCTITLE_AUDIT_LEN); if (res == 0) { kfree(buf); goto out; } res = audit_proctitle_rtrim(buf, res); if (res == 0) { kfree(buf); goto out; } context->proctitle.value = buf; context->proctitle.len = res; } msg = context->proctitle.value; len = context->proctitle.len; out: audit_log_n_untrustedstring(ab, msg, len); audit_log_end(ab); } static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) { int i, call_panic = 0; struct audit_buffer *ab; struct audit_aux_data *aux; struct audit_names *n; /* tsk == current */ context->personality = tsk->personality; ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); if (!ab) return; /* audit_panic has been called */ audit_log_format(ab, "arch=%x syscall=%d", context->arch, context->major); if (context->personality != PER_LINUX) audit_log_format(ab, " per=%lx", context->personality); if (context->return_valid) audit_log_format(ab, " success=%s exit=%ld", (context->return_valid==AUDITSC_SUCCESS)?"yes":"no", context->return_code); audit_log_format(ab, " a0=%lx a1=%lx a2=%lx a3=%lx items=%d", context->argv[0], context->argv[1], context->argv[2], context->argv[3], context->name_count); audit_log_task_info(ab, tsk); audit_log_key(ab, context->filterkey); audit_log_end(ab); for (aux = context->aux; aux; aux = aux->next) { ab = audit_log_start(context, GFP_KERNEL, aux->type); if (!ab) continue; /* audit_panic has been called */ switch (aux->type) { case AUDIT_BPRM_FCAPS: { struct audit_aux_data_bprm_fcaps *axs = (void *)aux; audit_log_format(ab, "fver=%x", axs->fcap_ver); audit_log_cap(ab, "fp", &axs->fcap.permitted); audit_log_cap(ab, "fi", &axs->fcap.inheritable); audit_log_format(ab, " fe=%d", axs->fcap.fE); audit_log_cap(ab, "old_pp", &axs->old_pcap.permitted); audit_log_cap(ab, "old_pi", &axs->old_pcap.inheritable); audit_log_cap(ab, "old_pe", &axs->old_pcap.effective); audit_log_cap(ab, "new_pp", &axs->new_pcap.permitted); audit_log_cap(ab, "new_pi", &axs->new_pcap.inheritable); audit_log_cap(ab, "new_pe", &axs->new_pcap.effective); break; } } audit_log_end(ab); } if (context->type) show_special(context, &call_panic); if (context->fds[0] >= 0) { ab = audit_log_start(context, GFP_KERNEL, AUDIT_FD_PAIR); if (ab) { audit_log_format(ab, "fd0=%d fd1=%d", context->fds[0], context->fds[1]); audit_log_end(ab); } } if (context->sockaddr_len) { ab = audit_log_start(context, GFP_KERNEL, AUDIT_SOCKADDR); if (ab) { audit_log_format(ab, "saddr="); audit_log_n_hex(ab, (void *)context->sockaddr, context->sockaddr_len); audit_log_end(ab); } } for (aux = context->aux_pids; aux; aux = aux->next) { struct audit_aux_data_pids *axs = (void *)aux; for (i = 0; i < axs->pid_count; i++) if (audit_log_pid_context(context, axs->target_pid[i], axs->target_auid[i], axs->target_uid[i], axs->target_sessionid[i], axs->target_sid[i], axs->target_comm[i])) call_panic = 1; } if (context->target_pid && audit_log_pid_context(context, context->target_pid, context->target_auid, context->target_uid, context->target_sessionid, context->target_sid, context->target_comm)) call_panic = 1; if (context->pwd.dentry && context->pwd.mnt) { ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD); if (ab) { audit_log_d_path(ab, "cwd=", &context->pwd); audit_log_end(ab); } } i = 0; list_for_each_entry(n, &context->names_list, list) { if (n->hidden) continue; audit_log_name(context, n, NULL, i++, &call_panic); } audit_log_proctitle(tsk, context); /* Send end of event record to help user space know we are finished */ ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); if (ab) audit_log_end(ab); if (call_panic) audit_panic("error converting sid to string"); } /** * audit_free - free a per-task audit context * @tsk: task whose audit context block to free * * Called from copy_process and do_exit */ void __audit_free(struct task_struct *tsk) { struct audit_context *context; context = audit_take_context(tsk, 0, 0); if (!context) return; /* Check for system calls that do not go through the exit * function (e.g., exit_group), then free context block. * We use GFP_ATOMIC here because we might be doing this * in the context of the idle thread */ /* that can happen only if we are called from do_exit() */ if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT) audit_log_exit(context, tsk); if (!list_empty(&context->killed_trees)) audit_kill_trees(&context->killed_trees); audit_free_context(context); } /** * audit_syscall_entry - fill in an audit record at syscall entry * @major: major syscall type (function) * @a1: additional syscall register 1 * @a2: additional syscall register 2 * @a3: additional syscall register 3 * @a4: additional syscall register 4 * * Fill in audit context at syscall entry. This only happens if the * audit context was created when the task was created and the state or * filters demand the audit context be built. If the state from the * per-task filter or from the per-syscall filter is AUDIT_RECORD_CONTEXT, * then the record will be written at syscall exit time (otherwise, it * will only be written if another part of the kernel requests that it * be written). */ void __audit_syscall_entry(int major, unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4) { struct task_struct *tsk = current; struct audit_context *context = tsk->audit_context; enum audit_state state; if (!context) return; BUG_ON(context->in_syscall || context->name_count); if (!audit_enabled) return; context->arch = syscall_get_arch(); context->major = major; context->argv[0] = a1; context->argv[1] = a2; context->argv[2] = a3; context->argv[3] = a4; state = context->state; context->dummy = !audit_n_rules; if (!context->dummy && state == AUDIT_BUILD_CONTEXT) { context->prio = 0; state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_ENTRY]); } if (state == AUDIT_DISABLED) return; context->serial = 0; context->ctime = CURRENT_TIME; context->in_syscall = 1; context->current_state = state; context->ppid = 0; } /** * audit_syscall_exit - deallocate audit context after a system call * @success: success value of the syscall * @return_code: return value of the syscall * * Tear down after system call. If the audit context has been marked as * auditable (either because of the AUDIT_RECORD_CONTEXT state from * filtering, or because some other part of the kernel wrote an audit * message), then write out the syscall information. In call cases, * free the names stored from getname(). */ void __audit_syscall_exit(int success, long return_code) { struct task_struct *tsk = current; struct audit_context *context; if (success) success = AUDITSC_SUCCESS; else success = AUDITSC_FAILURE; context = audit_take_context(tsk, success, return_code); if (!context) return; if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT) audit_log_exit(context, tsk); context->in_syscall = 0; context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0; if (!list_empty(&context->killed_trees)) audit_kill_trees(&context->killed_trees); audit_free_names(context); unroll_tree_refs(context, NULL, 0); audit_free_aux(context); context->aux = NULL; context->aux_pids = NULL; context->target_pid = 0; context->target_sid = 0; context->sockaddr_len = 0; context->type = 0; context->fds[0] = -1; if (context->state != AUDIT_RECORD_CONTEXT) { kfree(context->filterkey); context->filterkey = NULL; } tsk->audit_context = context; } static inline void handle_one(const struct inode *inode) { #ifdef CONFIG_AUDIT_TREE struct audit_context *context; struct audit_tree_refs *p; struct audit_chunk *chunk; int count; if (likely(hlist_empty(&inode->i_fsnotify_marks))) return; context = current->audit_context; p = context->trees; count = context->tree_count; rcu_read_lock(); chunk = audit_tree_lookup(inode); rcu_read_unlock(); if (!chunk) return; if (likely(put_tree_ref(context, chunk))) return; if (unlikely(!grow_tree_refs(context))) { pr_warn("out of memory, audit has lost a tree reference\n"); audit_set_auditable(context); audit_put_chunk(chunk); unroll_tree_refs(context, p, count); return; } put_tree_ref(context, chunk); #endif } static void handle_path(const struct dentry *dentry) { #ifdef CONFIG_AUDIT_TREE struct audit_context *context; struct audit_tree_refs *p; const struct dentry *d, *parent; struct audit_chunk *drop; unsigned long seq; int count; context = current->audit_context; p = context->trees; count = context->tree_count; retry: drop = NULL; d = dentry; rcu_read_lock(); seq = read_seqbegin(&rename_lock); for(;;) { struct inode *inode = d_backing_inode(d); if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_marks))) { struct audit_chunk *chunk; chunk = audit_tree_lookup(inode); if (chunk) { if (unlikely(!put_tree_ref(context, chunk))) { drop = chunk; break; } } } parent = d->d_parent; if (parent == d) break; d = parent; } if (unlikely(read_seqretry(&rename_lock, seq) || drop)) { /* in this order */ rcu_read_unlock(); if (!drop) { /* just a race with rename */ unroll_tree_refs(context, p, count); goto retry; } audit_put_chunk(drop); if (grow_tree_refs(context)) { /* OK, got more space */ unroll_tree_refs(context, p, count); goto retry; } /* too bad */ pr_warn("out of memory, audit has lost a tree reference\n"); unroll_tree_refs(context, p, count); audit_set_auditable(context); return; } rcu_read_unlock(); #endif } static struct audit_names *audit_alloc_name(struct audit_context *context, unsigned char type) { struct audit_names *aname; if (context->name_count < AUDIT_NAMES) { aname = &context->preallocated_names[context->name_count]; memset(aname, 0, sizeof(*aname)); } else { aname = kzalloc(sizeof(*aname), GFP_NOFS); if (!aname) return NULL; aname->should_free = true; } aname->ino = AUDIT_INO_UNSET; aname->type = type; list_add_tail(&aname->list, &context->names_list); context->name_count++; return aname; } /** * audit_reusename - fill out filename with info from existing entry * @uptr: userland ptr to pathname * * Search the audit_names list for the current audit context. If there is an * existing entry with a matching "uptr" then return the filename * associated with that audit_name. If not, return NULL. */ struct filename * __audit_reusename(const __user char *uptr) { struct audit_context *context = current->audit_context; struct audit_names *n; list_for_each_entry(n, &context->names_list, list) { if (!n->name) continue; if (n->name->uptr == uptr) { n->name->refcnt++; return n->name; } } return NULL; } /** * audit_getname - add a name to the list * @name: name to add * * Add a name to the list of audit names for this context. * Called from fs/namei.c:getname(). */ void __audit_getname(struct filename *name) { struct audit_context *context = current->audit_context; struct audit_names *n; if (!context->in_syscall) return; n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); if (!n) return; n->name = name; n->name_len = AUDIT_NAME_FULL; name->aname = n; name->refcnt++; if (!context->pwd.dentry) get_fs_pwd(current->fs, &context->pwd); } /** * __audit_inode - store the inode and device from a lookup * @name: name being audited * @dentry: dentry being audited * @flags: attributes for this particular entry */ void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags) { struct audit_context *context = current->audit_context; struct inode *inode = d_backing_inode(dentry); struct audit_names *n; bool parent = flags & AUDIT_INODE_PARENT; if (!context->in_syscall) return; if (!name) goto out_alloc; /* * If we have a pointer to an audit_names entry already, then we can * just use it directly if the type is correct. */ n = name->aname; if (n) { if (parent) { if (n->type == AUDIT_TYPE_PARENT || n->type == AUDIT_TYPE_UNKNOWN) goto out; } else { if (n->type != AUDIT_TYPE_PARENT) goto out; } } list_for_each_entry_reverse(n, &context->names_list, list) { if (n->ino) { /* valid inode number, use that for the comparison */ if (n->ino != inode->i_ino || n->dev != inode->i_sb->s_dev) continue; } else if (n->name) { /* inode number has not been set, check the name */ if (strcmp(n->name->name, name->name)) continue; } else /* no inode and no name (?!) ... this is odd ... */ continue; /* match the correct record type */ if (parent) { if (n->type == AUDIT_TYPE_PARENT || n->type == AUDIT_TYPE_UNKNOWN) goto out; } else { if (n->type != AUDIT_TYPE_PARENT) goto out; } } out_alloc: /* unable to find an entry with both a matching name and type */ n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); if (!n) return; if (name) { n->name = name; name->refcnt++; } out: if (parent) { n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL; n->type = AUDIT_TYPE_PARENT; if (flags & AUDIT_INODE_HIDDEN) n->hidden = true; } else { n->name_len = AUDIT_NAME_FULL; n->type = AUDIT_TYPE_NORMAL; } handle_path(dentry); audit_copy_inode(n, dentry, inode); } void __audit_file(const struct file *file) { __audit_inode(NULL, file->f_path.dentry, 0); } /** * __audit_inode_child - collect inode info for created/removed objects * @parent: inode of dentry parent * @dentry: dentry being audited * @type: AUDIT_TYPE_* value that we're looking for * * For syscalls that create or remove filesystem objects, audit_inode * can only collect information for the filesystem object's parent. * This call updates the audit context with the child's information. * Syscalls that create a new filesystem object must be hooked after * the object is created. Syscalls that remove a filesystem object * must be hooked prior, in order to capture the target inode during * unsuccessful attempts. */ void __audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type) { struct audit_context *context = current->audit_context; struct inode *inode = d_backing_inode(dentry); const char *dname = dentry->d_name.name; struct audit_names *n, *found_parent = NULL, *found_child = NULL; if (!context->in_syscall) return; if (inode) handle_one(inode); /* look for a parent entry first */ list_for_each_entry(n, &context->names_list, list) { if (!n->name || (n->type != AUDIT_TYPE_PARENT && n->type != AUDIT_TYPE_UNKNOWN)) continue; if (n->ino == parent->i_ino && n->dev == parent->i_sb->s_dev && !audit_compare_dname_path(dname, n->name->name, n->name_len)) { if (n->type == AUDIT_TYPE_UNKNOWN) n->type = AUDIT_TYPE_PARENT; found_parent = n; break; } } /* is there a matching child entry? */ list_for_each_entry(n, &context->names_list, list) { /* can only match entries that have a name */ if (!n->name || (n->type != type && n->type != AUDIT_TYPE_UNKNOWN)) continue; if (!strcmp(dname, n->name->name) || !audit_compare_dname_path(dname, n->name->name, found_parent ? found_parent->name_len : AUDIT_NAME_FULL)) { if (n->type == AUDIT_TYPE_UNKNOWN) n->type = type; found_child = n; break; } } if (!found_parent) { /* create a new, "anonymous" parent record */ n = audit_alloc_name(context, AUDIT_TYPE_PARENT); if (!n) return; audit_copy_inode(n, NULL, parent); } if (!found_child) { found_child = audit_alloc_name(context, type); if (!found_child) return; /* Re-use the name belonging to the slot for a matching parent * directory. All names for this context are relinquished in * audit_free_names() */ if (found_parent) { found_child->name = found_parent->name; found_child->name_len = AUDIT_NAME_FULL; found_child->name->refcnt++; } } if (inode) audit_copy_inode(found_child, dentry, inode); else found_child->ino = AUDIT_INO_UNSET; } EXPORT_SYMBOL_GPL(__audit_inode_child); /** * auditsc_get_stamp - get local copies of audit_context values * @ctx: audit_context for the task * @t: timespec to store time recorded in the audit_context * @serial: serial value that is recorded in the audit_context * * Also sets the context as auditable. */ int auditsc_get_stamp(struct audit_context *ctx, struct timespec *t, unsigned int *serial) { if (!ctx->in_syscall) return 0; if (!ctx->serial) ctx->serial = audit_serial(); t->tv_sec = ctx->ctime.tv_sec; t->tv_nsec = ctx->ctime.tv_nsec; *serial = ctx->serial; if (!ctx->prio) { ctx->prio = 1; ctx->current_state = AUDIT_RECORD_CONTEXT; } return 1; } /* global counter which is incremented every time something logs in */ static atomic_t session_id = ATOMIC_INIT(0); static int audit_set_loginuid_perm(kuid_t loginuid) { /* if we are unset, we don't need privs */ if (!audit_loginuid_set(current)) return 0; /* if AUDIT_FEATURE_LOGINUID_IMMUTABLE means never ever allow a change*/ if (is_audit_feature_set(AUDIT_FEATURE_LOGINUID_IMMUTABLE)) return -EPERM; /* it is set, you need permission */ if (!capable(CAP_AUDIT_CONTROL)) return -EPERM; /* reject if this is not an unset and we don't allow that */ if (is_audit_feature_set(AUDIT_FEATURE_ONLY_UNSET_LOGINUID) && uid_valid(loginuid)) return -EPERM; return 0; } static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid, unsigned int oldsessionid, unsigned int sessionid, int rc) { struct audit_buffer *ab; uid_t uid, oldloginuid, loginuid; if (!audit_enabled) return; uid = from_kuid(&init_user_ns, task_uid(current)); oldloginuid = from_kuid(&init_user_ns, koldloginuid); loginuid = from_kuid(&init_user_ns, kloginuid), ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); if (!ab) return; audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid); audit_log_task_context(ab); audit_log_format(ab, " old-auid=%u auid=%u old-ses=%u ses=%u res=%d", oldloginuid, loginuid, oldsessionid, sessionid, !rc); audit_log_end(ab); } /** * audit_set_loginuid - set current task's audit_context loginuid * @loginuid: loginuid value * * Returns 0. * * Called (set) from fs/proc/base.c::proc_loginuid_write(). */ int audit_set_loginuid(kuid_t loginuid) { struct task_struct *task = current; unsigned int oldsessionid, sessionid = (unsigned int)-1; kuid_t oldloginuid; int rc; oldloginuid = audit_get_loginuid(current); oldsessionid = audit_get_sessionid(current); rc = audit_set_loginuid_perm(loginuid); if (rc) goto out; /* are we setting or clearing? */ if (uid_valid(loginuid)) sessionid = (unsigned int)atomic_inc_return(&session_id); task->sessionid = sessionid; task->loginuid = loginuid; out: audit_log_set_loginuid(oldloginuid, loginuid, oldsessionid, sessionid, rc); return rc; } /** * __audit_mq_open - record audit data for a POSIX MQ open * @oflag: open flag * @mode: mode bits * @attr: queue attributes * */ void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) { struct audit_context *context = current->audit_context; if (attr) memcpy(&context->mq_open.attr, attr, sizeof(struct mq_attr)); else memset(&context->mq_open.attr, 0, sizeof(struct mq_attr)); context->mq_open.oflag = oflag; context->mq_open.mode = mode; context->type = AUDIT_MQ_OPEN; } /** * __audit_mq_sendrecv - record audit data for a POSIX MQ timed send/receive * @mqdes: MQ descriptor * @msg_len: Message length * @msg_prio: Message priority * @abs_timeout: Message timeout in absolute time * */ void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout) { struct audit_context *context = current->audit_context; struct timespec *p = &context->mq_sendrecv.abs_timeout; if (abs_timeout) memcpy(p, abs_timeout, sizeof(struct timespec)); else memset(p, 0, sizeof(struct timespec)); context->mq_sendrecv.mqdes = mqdes; context->mq_sendrecv.msg_len = msg_len; context->mq_sendrecv.msg_prio = msg_prio; context->type = AUDIT_MQ_SENDRECV; } /** * __audit_mq_notify - record audit data for a POSIX MQ notify * @mqdes: MQ descriptor * @notification: Notification event * */ void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) { struct audit_context *context = current->audit_context; if (notification) context->mq_notify.sigev_signo = notification->sigev_signo; else context->mq_notify.sigev_signo = 0; context->mq_notify.mqdes = mqdes; context->type = AUDIT_MQ_NOTIFY; } /** * __audit_mq_getsetattr - record audit data for a POSIX MQ get/set attribute * @mqdes: MQ descriptor * @mqstat: MQ flags * */ void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) { struct audit_context *context = current->audit_context; context->mq_getsetattr.mqdes = mqdes; context->mq_getsetattr.mqstat = *mqstat; context->type = AUDIT_MQ_GETSETATTR; } /** * audit_ipc_obj - record audit data for ipc object * @ipcp: ipc permissions * */ void __audit_ipc_obj(struct kern_ipc_perm *ipcp) { struct audit_context *context = current->audit_context; context->ipc.uid = ipcp->uid; context->ipc.gid = ipcp->gid; context->ipc.mode = ipcp->mode; context->ipc.has_perm = 0; security_ipc_getsecid(ipcp, &context->ipc.osid); context->type = AUDIT_IPC; } /** * audit_ipc_set_perm - record audit data for new ipc permissions * @qbytes: msgq bytes * @uid: msgq user id * @gid: msgq group id * @mode: msgq mode (permissions) * * Called only after audit_ipc_obj(). */ void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) { struct audit_context *context = current->audit_context; context->ipc.qbytes = qbytes; context->ipc.perm_uid = uid; context->ipc.perm_gid = gid; context->ipc.perm_mode = mode; context->ipc.has_perm = 1; } void __audit_bprm(struct linux_binprm *bprm) { struct audit_context *context = current->audit_context; context->type = AUDIT_EXECVE; context->execve.argc = bprm->argc; } /** * audit_socketcall - record audit data for sys_socketcall * @nargs: number of args, which should not be more than AUDITSC_ARGS. * @args: args array * */ int __audit_socketcall(int nargs, unsigned long *args) { struct audit_context *context = current->audit_context; if (nargs <= 0 || nargs > AUDITSC_ARGS || !args) return -EINVAL; context->type = AUDIT_SOCKETCALL; context->socketcall.nargs = nargs; memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long)); return 0; } /** * __audit_fd_pair - record audit data for pipe and socketpair * @fd1: the first file descriptor * @fd2: the second file descriptor * */ void __audit_fd_pair(int fd1, int fd2) { struct audit_context *context = current->audit_context; context->fds[0] = fd1; context->fds[1] = fd2; } /** * audit_sockaddr - record audit data for sys_bind, sys_connect, sys_sendto * @len: data length in user space * @a: data address in kernel space * * Returns 0 for success or NULL context or < 0 on error. */ int __audit_sockaddr(int len, void *a) { struct audit_context *context = current->audit_context; if (!context->sockaddr) { void *p = kmalloc(sizeof(struct sockaddr_storage), GFP_KERNEL); if (!p) return -ENOMEM; context->sockaddr = p; } context->sockaddr_len = len; memcpy(context->sockaddr, a, len); return 0; } void __audit_ptrace(struct task_struct *t) { struct audit_context *context = current->audit_context; context->target_pid = task_pid_nr(t); context->target_auid = audit_get_loginuid(t); context->target_uid = task_uid(t); context->target_sessionid = audit_get_sessionid(t); security_task_getsecid(t, &context->target_sid); memcpy(context->target_comm, t->comm, TASK_COMM_LEN); } /** * audit_signal_info - record signal info for shutting down audit subsystem * @sig: signal value * @t: task being signaled * * If the audit subsystem is being terminated, record the task (pid) * and uid that is doing that. */ int __audit_signal_info(int sig, struct task_struct *t) { struct audit_aux_data_pids *axp; struct task_struct *tsk = current; struct audit_context *ctx = tsk->audit_context; kuid_t uid = current_uid(), t_uid = task_uid(t); if (audit_pid && t->tgid == audit_pid) { if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { audit_sig_pid = task_pid_nr(tsk); if (uid_valid(tsk->loginuid)) audit_sig_uid = tsk->loginuid; else audit_sig_uid = uid; security_task_getsecid(tsk, &audit_sig_sid); } if (!audit_signals || audit_dummy_context()) return 0; } /* optimize the common case by putting first signal recipient directly * in audit_context */ if (!ctx->target_pid) { ctx->target_pid = task_tgid_nr(t); ctx->target_auid = audit_get_loginuid(t); ctx->target_uid = t_uid; ctx->target_sessionid = audit_get_sessionid(t); security_task_getsecid(t, &ctx->target_sid); memcpy(ctx->target_comm, t->comm, TASK_COMM_LEN); return 0; } axp = (void *)ctx->aux_pids; if (!axp || axp->pid_count == AUDIT_AUX_PIDS) { axp = kzalloc(sizeof(*axp), GFP_ATOMIC); if (!axp) return -ENOMEM; axp->d.type = AUDIT_OBJ_PID; axp->d.next = ctx->aux_pids; ctx->aux_pids = (void *)axp; } BUG_ON(axp->pid_count >= AUDIT_AUX_PIDS); axp->target_pid[axp->pid_count] = task_tgid_nr(t); axp->target_auid[axp->pid_count] = audit_get_loginuid(t); axp->target_uid[axp->pid_count] = t_uid; axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t); security_task_getsecid(t, &axp->target_sid[axp->pid_count]); memcpy(axp->target_comm[axp->pid_count], t->comm, TASK_COMM_LEN); axp->pid_count++; return 0; } /** * __audit_log_bprm_fcaps - store information about a loading bprm and relevant fcaps * @bprm: pointer to the bprm being processed * @new: the proposed new credentials * @old: the old credentials * * Simply check if the proc already has the caps given by the file and if not * store the priv escalation info for later auditing at the end of the syscall * * -Eric */ int __audit_log_bprm_fcaps(struct linux_binprm *bprm, const struct cred *new, const struct cred *old) { struct audit_aux_data_bprm_fcaps *ax; struct audit_context *context = current->audit_context; struct cpu_vfs_cap_data vcaps; ax = kmalloc(sizeof(*ax), GFP_KERNEL); if (!ax) return -ENOMEM; ax->d.type = AUDIT_BPRM_FCAPS; ax->d.next = context->aux; context->aux = (void *)ax; get_vfs_caps_from_disk(bprm->file->f_path.dentry, &vcaps); ax->fcap.permitted = vcaps.permitted; ax->fcap.inheritable = vcaps.inheritable; ax->fcap.fE = !!(vcaps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE); ax->fcap_ver = (vcaps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT; ax->old_pcap.permitted = old->cap_permitted; ax->old_pcap.inheritable = old->cap_inheritable; ax->old_pcap.effective = old->cap_effective; ax->new_pcap.permitted = new->cap_permitted; ax->new_pcap.inheritable = new->cap_inheritable; ax->new_pcap.effective = new->cap_effective; return 0; } /** * __audit_log_capset - store information about the arguments to the capset syscall * @new: the new credentials * @old: the old (current) credentials * * Record the arguments userspace sent to sys_capset for later printing by the * audit system if applicable */ void __audit_log_capset(const struct cred *new, const struct cred *old) { struct audit_context *context = current->audit_context; context->capset.pid = task_pid_nr(current); context->capset.cap.effective = new->cap_effective; context->capset.cap.inheritable = new->cap_effective; context->capset.cap.permitted = new->cap_permitted; context->type = AUDIT_CAPSET; } void __audit_mmap_fd(int fd, int flags) { struct audit_context *context = current->audit_context; context->mmap.fd = fd; context->mmap.flags = flags; context->type = AUDIT_MMAP; } static void audit_log_task(struct audit_buffer *ab) { kuid_t auid, uid; kgid_t gid; unsigned int sessionid; char comm[sizeof(current->comm)]; auid = audit_get_loginuid(current); sessionid = audit_get_sessionid(current); current_uid_gid(&uid, &gid); audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u", from_kuid(&init_user_ns, auid), from_kuid(&init_user_ns, uid), from_kgid(&init_user_ns, gid), sessionid); audit_log_task_context(ab); audit_log_format(ab, " pid=%d comm=", task_pid_nr(current)); audit_log_untrustedstring(ab, get_task_comm(comm, current)); audit_log_d_path_exe(ab, current->mm); } /** * audit_core_dumps - record information about processes that end abnormally * @signr: signal value * * If a process ends with a core dump, something fishy is going on and we * should record the event for investigation. */ void audit_core_dumps(long signr) { struct audit_buffer *ab; if (!audit_enabled) return; if (signr == SIGQUIT) /* don't care for those */ return; ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); if (unlikely(!ab)) return; audit_log_task(ab); audit_log_format(ab, " sig=%ld", signr); audit_log_end(ab); } void __audit_seccomp(unsigned long syscall, long signr, int code) { struct audit_buffer *ab; ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_SECCOMP); if (unlikely(!ab)) return; audit_log_task(ab); audit_log_format(ab, " sig=%ld arch=%x syscall=%ld compat=%d ip=0x%lx code=0x%x", signr, syscall_get_arch(), syscall, in_compat_syscall(), KSTK_EIP(current), code); audit_log_end(ab); } struct list_head *audit_killed_trees(void) { struct audit_context *ctx = current->audit_context; if (likely(!ctx || !ctx->in_syscall)) return NULL; return &ctx->killed_trees; }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_5187_0
crossvul-cpp_data_bad_1404_0
/* * Kernel-based Virtual Machine driver for Linux * * This module enables machines with Intel VT-x extensions to run virtual * machines without emulation or binary translation. * * Copyright (C) 2006 Qumranet, Inc. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include <kvm/iodev.h> #include <linux/kvm_host.h> #include <linux/kvm.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/percpu.h> #include <linux/mm.h> #include <linux/miscdevice.h> #include <linux/vmalloc.h> #include <linux/reboot.h> #include <linux/debugfs.h> #include <linux/highmem.h> #include <linux/file.h> #include <linux/syscore_ops.h> #include <linux/cpu.h> #include <linux/sched/signal.h> #include <linux/sched/mm.h> #include <linux/sched/stat.h> #include <linux/cpumask.h> #include <linux/smp.h> #include <linux/anon_inodes.h> #include <linux/profile.h> #include <linux/kvm_para.h> #include <linux/pagemap.h> #include <linux/mman.h> #include <linux/swap.h> #include <linux/bitops.h> #include <linux/spinlock.h> #include <linux/compat.h> #include <linux/srcu.h> #include <linux/hugetlb.h> #include <linux/slab.h> #include <linux/sort.h> #include <linux/bsearch.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/ioctl.h> #include <linux/uaccess.h> #include <asm/pgtable.h> #include "coalesced_mmio.h" #include "async_pf.h" #include "vfio.h" #define CREATE_TRACE_POINTS #include <trace/events/kvm.h> /* Worst case buffer size needed for holding an integer. */ #define ITOA_MAX_LEN 12 MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); /* Architectures should define their poll value according to the halt latency */ unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; module_param(halt_poll_ns, uint, 0644); EXPORT_SYMBOL_GPL(halt_poll_ns); /* Default doubles per-vcpu halt_poll_ns. */ unsigned int halt_poll_ns_grow = 2; module_param(halt_poll_ns_grow, uint, 0644); EXPORT_SYMBOL_GPL(halt_poll_ns_grow); /* Default resets per-vcpu halt_poll_ns . */ unsigned int halt_poll_ns_shrink; module_param(halt_poll_ns_shrink, uint, 0644); EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); /* * Ordering of locks: * * kvm->lock --> kvm->slots_lock --> kvm->irq_lock */ DEFINE_SPINLOCK(kvm_lock); static DEFINE_RAW_SPINLOCK(kvm_count_lock); LIST_HEAD(vm_list); static cpumask_var_t cpus_hardware_enabled; static int kvm_usage_count; static atomic_t hardware_enable_failed; struct kmem_cache *kvm_vcpu_cache; EXPORT_SYMBOL_GPL(kvm_vcpu_cache); static __read_mostly struct preempt_ops kvm_preempt_ops; struct dentry *kvm_debugfs_dir; EXPORT_SYMBOL_GPL(kvm_debugfs_dir); static int kvm_debugfs_num_entries; static const struct file_operations *stat_fops_per_vm[]; static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, unsigned long arg); #ifdef CONFIG_KVM_COMPAT static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, unsigned long arg); #define KVM_COMPAT(c) .compat_ioctl = (c) #else static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) { return -EINVAL; } #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl #endif static int hardware_enable_all(void); static void hardware_disable_all(void); static void kvm_io_bus_destroy(struct kvm_io_bus *bus); static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn); __visible bool kvm_rebooting; EXPORT_SYMBOL_GPL(kvm_rebooting); static bool largepages_enabled = true; #define KVM_EVENT_CREATE_VM 0 #define KVM_EVENT_DESTROY_VM 1 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); static unsigned long long kvm_createvm_count; static unsigned long long kvm_active_vms; __weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable) { return 0; } bool kvm_is_reserved_pfn(kvm_pfn_t pfn) { if (pfn_valid(pfn)) return PageReserved(pfn_to_page(pfn)); return true; } /* * Switches to specified vcpu, until a matching vcpu_put() */ void vcpu_load(struct kvm_vcpu *vcpu) { int cpu = get_cpu(); preempt_notifier_register(&vcpu->preempt_notifier); kvm_arch_vcpu_load(vcpu, cpu); put_cpu(); } EXPORT_SYMBOL_GPL(vcpu_load); void vcpu_put(struct kvm_vcpu *vcpu) { preempt_disable(); kvm_arch_vcpu_put(vcpu); preempt_notifier_unregister(&vcpu->preempt_notifier); preempt_enable(); } EXPORT_SYMBOL_GPL(vcpu_put); /* TODO: merge with kvm_arch_vcpu_should_kick */ static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) { int mode = kvm_vcpu_exiting_guest_mode(vcpu); /* * We need to wait for the VCPU to reenable interrupts and get out of * READING_SHADOW_PAGE_TABLES mode. */ if (req & KVM_REQUEST_WAIT) return mode != OUTSIDE_GUEST_MODE; /* * Need to kick a running VCPU, but otherwise there is nothing to do. */ return mode == IN_GUEST_MODE; } static void ack_flush(void *_completed) { } static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait) { if (unlikely(!cpus)) cpus = cpu_online_mask; if (cpumask_empty(cpus)) return false; smp_call_function_many(cpus, ack_flush, NULL, wait); return true; } bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, unsigned long *vcpu_bitmap, cpumask_var_t tmp) { int i, cpu, me; struct kvm_vcpu *vcpu; bool called; me = get_cpu(); kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu_bitmap && !test_bit(i, vcpu_bitmap)) continue; kvm_make_request(req, vcpu); cpu = vcpu->cpu; if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) continue; if (tmp != NULL && cpu != -1 && cpu != me && kvm_request_needs_ipi(vcpu, req)) __cpumask_set_cpu(cpu, tmp); } called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT)); put_cpu(); return called; } bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) { cpumask_var_t cpus; bool called; zalloc_cpumask_var(&cpus, GFP_ATOMIC); called = kvm_make_vcpus_request_mask(kvm, req, NULL, cpus); free_cpumask_var(cpus); return called; } #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL void kvm_flush_remote_tlbs(struct kvm *kvm) { /* * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in * kvm_make_all_cpus_request. */ long dirty_count = smp_load_acquire(&kvm->tlbs_dirty); /* * We want to publish modifications to the page tables before reading * mode. Pairs with a memory barrier in arch-specific code. * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest * and smp_mb in walk_shadow_page_lockless_begin/end. * - powerpc: smp_mb in kvmppc_prepare_to_enter. * * There is already an smp_mb__after_atomic() before * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that * barrier here. */ if (!kvm_arch_flush_remote_tlb(kvm) || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) ++kvm->stat.remote_tlb_flush; cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); } EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); #endif void kvm_reload_remote_mmus(struct kvm *kvm) { kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); } int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) { struct page *page; int r; mutex_init(&vcpu->mutex); vcpu->cpu = -1; vcpu->kvm = kvm; vcpu->vcpu_id = id; vcpu->pid = NULL; init_swait_queue_head(&vcpu->wq); kvm_async_pf_vcpu_init(vcpu); vcpu->pre_pcpu = -1; INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) { r = -ENOMEM; goto fail; } vcpu->run = page_address(page); kvm_vcpu_set_in_spin_loop(vcpu, false); kvm_vcpu_set_dy_eligible(vcpu, false); vcpu->preempted = false; r = kvm_arch_vcpu_init(vcpu); if (r < 0) goto fail_free_run; return 0; fail_free_run: free_page((unsigned long)vcpu->run); fail: return r; } EXPORT_SYMBOL_GPL(kvm_vcpu_init); void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) { /* * no need for rcu_read_lock as VCPU_RUN is the only place that * will change the vcpu->pid pointer and on uninit all file * descriptors are already gone. */ put_pid(rcu_dereference_protected(vcpu->pid, 1)); kvm_arch_vcpu_uninit(vcpu); free_page((unsigned long)vcpu->run); } EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) { return container_of(mn, struct kvm, mmu_notifier); } static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address, pte_t pte) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int idx; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); kvm->mmu_notifier_seq++; if (kvm_set_spte_hva(kvm, address, pte)) kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); } static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, const struct mmu_notifier_range *range) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int need_tlb_flush = 0, idx; int ret; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); /* * The count increase must become visible at unlock time as no * spte can be established without taking the mmu_lock and * count is also read inside the mmu_lock critical section. */ kvm->mmu_notifier_count++; need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end); need_tlb_flush |= kvm->tlbs_dirty; /* we've to flush the tlb before the pages can be freed */ if (need_tlb_flush) kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start, range->end, range->blockable); srcu_read_unlock(&kvm->srcu, idx); return ret; } static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, const struct mmu_notifier_range *range) { struct kvm *kvm = mmu_notifier_to_kvm(mn); spin_lock(&kvm->mmu_lock); /* * This sequence increase will notify the kvm page fault that * the page that is going to be mapped in the spte could have * been freed. */ kvm->mmu_notifier_seq++; smp_wmb(); /* * The above sequence increase must be visible before the * below count decrease, which is ensured by the smp_wmb above * in conjunction with the smp_rmb in mmu_notifier_retry(). */ kvm->mmu_notifier_count--; spin_unlock(&kvm->mmu_lock); BUG_ON(kvm->mmu_notifier_count < 0); } static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int young, idx; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); young = kvm_age_hva(kvm, start, end); if (young) kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); return young; } static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int young, idx; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); /* * Even though we do not flush TLB, this will still adversely * affect performance on pre-Haswell Intel EPT, where there is * no EPT Access Bit to clear so that we have to tear down EPT * tables instead. If we find this unacceptable, we can always * add a parameter to kvm_age_hva so that it effectively doesn't * do anything on clear_young. * * Also note that currently we never issue secondary TLB flushes * from clear_young, leaving this job up to the regular system * cadence. If we find this inaccurate, we might come up with a * more sophisticated heuristic later. */ young = kvm_age_hva(kvm, start, end); spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); return young; } static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int young, idx; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); young = kvm_test_age_hva(kvm, address); spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); return young; } static void kvm_mmu_notifier_release(struct mmu_notifier *mn, struct mm_struct *mm) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int idx; idx = srcu_read_lock(&kvm->srcu); kvm_arch_flush_shadow_all(kvm); srcu_read_unlock(&kvm->srcu, idx); } static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, .clear_flush_young = kvm_mmu_notifier_clear_flush_young, .clear_young = kvm_mmu_notifier_clear_young, .test_young = kvm_mmu_notifier_test_young, .change_pte = kvm_mmu_notifier_change_pte, .release = kvm_mmu_notifier_release, }; static int kvm_init_mmu_notifier(struct kvm *kvm) { kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; return mmu_notifier_register(&kvm->mmu_notifier, current->mm); } #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ static int kvm_init_mmu_notifier(struct kvm *kvm) { return 0; } #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ static struct kvm_memslots *kvm_alloc_memslots(void) { int i; struct kvm_memslots *slots; slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); if (!slots) return NULL; for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) slots->id_to_index[i] = slots->memslots[i].id = i; return slots; } static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) { if (!memslot->dirty_bitmap) return; kvfree(memslot->dirty_bitmap); memslot->dirty_bitmap = NULL; } /* * Free any memory in @free but not in @dont. */ static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { if (!dont || free->dirty_bitmap != dont->dirty_bitmap) kvm_destroy_dirty_bitmap(free); kvm_arch_free_memslot(kvm, free, dont); free->npages = 0; } static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) { struct kvm_memory_slot *memslot; if (!slots) return; kvm_for_each_memslot(memslot, slots) kvm_free_memslot(kvm, memslot, NULL); kvfree(slots); } static void kvm_destroy_vm_debugfs(struct kvm *kvm) { int i; if (!kvm->debugfs_dentry) return; debugfs_remove_recursive(kvm->debugfs_dentry); if (kvm->debugfs_stat_data) { for (i = 0; i < kvm_debugfs_num_entries; i++) kfree(kvm->debugfs_stat_data[i]); kfree(kvm->debugfs_stat_data); } } static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) { char dir_name[ITOA_MAX_LEN * 2]; struct kvm_stat_data *stat_data; struct kvm_stats_debugfs_item *p; if (!debugfs_initialized()) return 0; snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir); kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, sizeof(*kvm->debugfs_stat_data), GFP_KERNEL); if (!kvm->debugfs_stat_data) return -ENOMEM; for (p = debugfs_entries; p->name; p++) { stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL); if (!stat_data) return -ENOMEM; stat_data->kvm = kvm; stat_data->offset = p->offset; kvm->debugfs_stat_data[p - debugfs_entries] = stat_data; debugfs_create_file(p->name, 0644, kvm->debugfs_dentry, stat_data, stat_fops_per_vm[p->kind]); } return 0; } static struct kvm *kvm_create_vm(unsigned long type) { int r, i; struct kvm *kvm = kvm_arch_alloc_vm(); if (!kvm) return ERR_PTR(-ENOMEM); spin_lock_init(&kvm->mmu_lock); mmgrab(current->mm); kvm->mm = current->mm; kvm_eventfd_init(kvm); mutex_init(&kvm->lock); mutex_init(&kvm->irq_lock); mutex_init(&kvm->slots_lock); refcount_set(&kvm->users_count, 1); INIT_LIST_HEAD(&kvm->devices); r = kvm_arch_init_vm(kvm, type); if (r) goto out_err_no_disable; r = hardware_enable_all(); if (r) goto out_err_no_disable; #ifdef CONFIG_HAVE_KVM_IRQFD INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); #endif BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); r = -ENOMEM; for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { struct kvm_memslots *slots = kvm_alloc_memslots(); if (!slots) goto out_err_no_srcu; /* * Generations must be different for each address space. * Init kvm generation close to the maximum to easily test the * code of handling generation number wrap-around. */ slots->generation = i * 2 - 150; rcu_assign_pointer(kvm->memslots[i], slots); } if (init_srcu_struct(&kvm->srcu)) goto out_err_no_srcu; if (init_srcu_struct(&kvm->irq_srcu)) goto out_err_no_irq_srcu; for (i = 0; i < KVM_NR_BUSES; i++) { rcu_assign_pointer(kvm->buses[i], kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL)); if (!kvm->buses[i]) goto out_err; } r = kvm_init_mmu_notifier(kvm); if (r) goto out_err; spin_lock(&kvm_lock); list_add(&kvm->vm_list, &vm_list); spin_unlock(&kvm_lock); preempt_notifier_inc(); return kvm; out_err: cleanup_srcu_struct(&kvm->irq_srcu); out_err_no_irq_srcu: cleanup_srcu_struct(&kvm->srcu); out_err_no_srcu: hardware_disable_all(); out_err_no_disable: refcount_set(&kvm->users_count, 0); for (i = 0; i < KVM_NR_BUSES; i++) kfree(kvm_get_bus(kvm, i)); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); kvm_arch_free_vm(kvm); mmdrop(current->mm); return ERR_PTR(r); } static void kvm_destroy_devices(struct kvm *kvm) { struct kvm_device *dev, *tmp; /* * We do not need to take the kvm->lock here, because nobody else * has a reference to the struct kvm at this point and therefore * cannot access the devices list anyhow. */ list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { list_del(&dev->vm_node); dev->ops->destroy(dev); } } static void kvm_destroy_vm(struct kvm *kvm) { int i; struct mm_struct *mm = kvm->mm; kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); kvm_destroy_vm_debugfs(kvm); kvm_arch_sync_events(kvm); spin_lock(&kvm_lock); list_del(&kvm->vm_list); spin_unlock(&kvm_lock); kvm_free_irq_routing(kvm); for (i = 0; i < KVM_NR_BUSES; i++) { struct kvm_io_bus *bus = kvm_get_bus(kvm, i); if (bus) kvm_io_bus_destroy(bus); kvm->buses[i] = NULL; } kvm_coalesced_mmio_free(kvm); #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); #else kvm_arch_flush_shadow_all(kvm); #endif kvm_arch_destroy_vm(kvm); kvm_destroy_devices(kvm); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); cleanup_srcu_struct(&kvm->irq_srcu); cleanup_srcu_struct(&kvm->srcu); kvm_arch_free_vm(kvm); preempt_notifier_dec(); hardware_disable_all(); mmdrop(mm); } void kvm_get_kvm(struct kvm *kvm) { refcount_inc(&kvm->users_count); } EXPORT_SYMBOL_GPL(kvm_get_kvm); void kvm_put_kvm(struct kvm *kvm) { if (refcount_dec_and_test(&kvm->users_count)) kvm_destroy_vm(kvm); } EXPORT_SYMBOL_GPL(kvm_put_kvm); static int kvm_vm_release(struct inode *inode, struct file *filp) { struct kvm *kvm = filp->private_data; kvm_irqfd_release(kvm); kvm_put_kvm(kvm); return 0; } /* * Allocation size is twice as large as the actual dirty bitmap size. * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed. */ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) { unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL); if (!memslot->dirty_bitmap) return -ENOMEM; return 0; } /* * Insert memslot and re-sort memslots based on their GFN, * so binary search could be used to lookup GFN. * Sorting algorithm takes advantage of having initially * sorted array and known changed memslot position. */ static void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new, enum kvm_mr_change change) { int id = new->id; int i = slots->id_to_index[id]; struct kvm_memory_slot *mslots = slots->memslots; WARN_ON(mslots[i].id != id); switch (change) { case KVM_MR_CREATE: slots->used_slots++; WARN_ON(mslots[i].npages || !new->npages); break; case KVM_MR_DELETE: slots->used_slots--; WARN_ON(new->npages || !mslots[i].npages); break; default: break; } while (i < KVM_MEM_SLOTS_NUM - 1 && new->base_gfn <= mslots[i + 1].base_gfn) { if (!mslots[i + 1].npages) break; mslots[i] = mslots[i + 1]; slots->id_to_index[mslots[i].id] = i; i++; } /* * The ">=" is needed when creating a slot with base_gfn == 0, * so that it moves before all those with base_gfn == npages == 0. * * On the other hand, if new->npages is zero, the above loop has * already left i pointing to the beginning of the empty part of * mslots, and the ">=" would move the hole backwards in this * case---which is wrong. So skip the loop when deleting a slot. */ if (new->npages) { while (i > 0 && new->base_gfn >= mslots[i - 1].base_gfn) { mslots[i] = mslots[i - 1]; slots->id_to_index[mslots[i].id] = i; i--; } } else WARN_ON_ONCE(i != slots->used_slots); mslots[i] = *new; slots->id_to_index[mslots[i].id] = i; } static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) { u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; #ifdef __KVM_HAVE_READONLY_MEM valid_flags |= KVM_MEM_READONLY; #endif if (mem->flags & ~valid_flags) return -EINVAL; return 0; } static struct kvm_memslots *install_new_memslots(struct kvm *kvm, int as_id, struct kvm_memslots *slots) { struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); /* * Set the low bit in the generation, which disables SPTE caching * until the end of synchronize_srcu_expedited. */ WARN_ON(old_memslots->generation & 1); slots->generation = old_memslots->generation + 1; rcu_assign_pointer(kvm->memslots[as_id], slots); synchronize_srcu_expedited(&kvm->srcu); /* * Increment the new memslot generation a second time. This prevents * vm exits that race with memslot updates from caching a memslot * generation that will (potentially) be valid forever. * * Generations must be unique even across address spaces. We do not need * a global counter for that, instead the generation space is evenly split * across address spaces. For example, with two address spaces, address * space 0 will use generations 0, 4, 8, ... while * address space 1 will * use generations 2, 6, 10, 14, ... */ slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1; kvm_arch_memslots_updated(kvm, slots); return old_memslots; } /* * Allocate some memory and give it an address in the guest physical address * space. * * Discontiguous memory is allowed, mostly for framebuffers. * * Must be called holding kvm->slots_lock for write. */ int __kvm_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem) { int r; gfn_t base_gfn; unsigned long npages; struct kvm_memory_slot *slot; struct kvm_memory_slot old, new; struct kvm_memslots *slots = NULL, *old_memslots; int as_id, id; enum kvm_mr_change change; r = check_memory_region_flags(mem); if (r) goto out; r = -EINVAL; as_id = mem->slot >> 16; id = (u16)mem->slot; /* General sanity checks */ if (mem->memory_size & (PAGE_SIZE - 1)) goto out; if (mem->guest_phys_addr & (PAGE_SIZE - 1)) goto out; /* We can read the guest memory with __xxx_user() later on. */ if ((id < KVM_USER_MEM_SLOTS) && ((mem->userspace_addr & (PAGE_SIZE - 1)) || !access_ok((void __user *)(unsigned long)mem->userspace_addr, mem->memory_size))) goto out; if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) goto out; if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) goto out; slot = id_to_memslot(__kvm_memslots(kvm, as_id), id); base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; npages = mem->memory_size >> PAGE_SHIFT; if (npages > KVM_MEM_MAX_NR_PAGES) goto out; new = old = *slot; new.id = id; new.base_gfn = base_gfn; new.npages = npages; new.flags = mem->flags; if (npages) { if (!old.npages) change = KVM_MR_CREATE; else { /* Modify an existing slot. */ if ((mem->userspace_addr != old.userspace_addr) || (npages != old.npages) || ((new.flags ^ old.flags) & KVM_MEM_READONLY)) goto out; if (base_gfn != old.base_gfn) change = KVM_MR_MOVE; else if (new.flags != old.flags) change = KVM_MR_FLAGS_ONLY; else { /* Nothing to change. */ r = 0; goto out; } } } else { if (!old.npages) goto out; change = KVM_MR_DELETE; new.base_gfn = 0; new.flags = 0; } if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { /* Check for overlaps */ r = -EEXIST; kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { if (slot->id == id) continue; if (!((base_gfn + npages <= slot->base_gfn) || (base_gfn >= slot->base_gfn + slot->npages))) goto out; } } /* Free page dirty bitmap if unneeded */ if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) new.dirty_bitmap = NULL; r = -ENOMEM; if (change == KVM_MR_CREATE) { new.userspace_addr = mem->userspace_addr; if (kvm_arch_create_memslot(kvm, &new, npages)) goto out_free; } /* Allocate page dirty bitmap if needed */ if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { if (kvm_create_dirty_bitmap(&new) < 0) goto out_free; } slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); if (!slots) goto out_free; memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { slot = id_to_memslot(slots, id); slot->flags |= KVM_MEMSLOT_INVALID; old_memslots = install_new_memslots(kvm, as_id, slots); /* From this point no new shadow pages pointing to a deleted, * or moved, memslot will be created. * * validation of sp->gfn happens in: * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) * - kvm_is_visible_gfn (mmu_check_roots) */ kvm_arch_flush_shadow_memslot(kvm, slot); /* * We can re-use the old_memslots from above, the only difference * from the currently installed memslots is the invalid flag. This * will get overwritten by update_memslots anyway. */ slots = old_memslots; } r = kvm_arch_prepare_memory_region(kvm, &new, mem, change); if (r) goto out_slots; /* actual memory is freed via old in kvm_free_memslot below */ if (change == KVM_MR_DELETE) { new.dirty_bitmap = NULL; memset(&new.arch, 0, sizeof(new.arch)); } update_memslots(slots, &new, change); old_memslots = install_new_memslots(kvm, as_id, slots); kvm_arch_commit_memory_region(kvm, mem, &old, &new, change); kvm_free_memslot(kvm, &old, &new); kvfree(old_memslots); return 0; out_slots: kvfree(slots); out_free: kvm_free_memslot(kvm, &new, &old); out: return r; } EXPORT_SYMBOL_GPL(__kvm_set_memory_region); int kvm_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem) { int r; mutex_lock(&kvm->slots_lock); r = __kvm_set_memory_region(kvm, mem); mutex_unlock(&kvm->slots_lock); return r; } EXPORT_SYMBOL_GPL(kvm_set_memory_region); static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem) { if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) return -EINVAL; return kvm_set_memory_region(kvm, mem); } int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, int *is_dirty) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; int i, as_id, id; unsigned long n; unsigned long any = 0; as_id = log->slot >> 16; id = (u16)log->slot; if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) return -EINVAL; slots = __kvm_memslots(kvm, as_id); memslot = id_to_memslot(slots, id); if (!memslot->dirty_bitmap) return -ENOENT; n = kvm_dirty_bitmap_bytes(memslot); for (i = 0; !any && i < n/sizeof(long); ++i) any = memslot->dirty_bitmap[i]; if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) return -EFAULT; if (any) *is_dirty = 1; return 0; } EXPORT_SYMBOL_GPL(kvm_get_dirty_log); #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT /** * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages * and reenable dirty page tracking for the corresponding pages. * @kvm: pointer to kvm instance * @log: slot id and address to which we copy the log * @is_dirty: flag set if any page is dirty * * We need to keep it in mind that VCPU threads can write to the bitmap * concurrently. So, to avoid losing track of dirty pages we keep the * following order: * * 1. Take a snapshot of the bit and clear it if needed. * 2. Write protect the corresponding page. * 3. Copy the snapshot to the userspace. * 4. Upon return caller flushes TLB's if needed. * * Between 2 and 4, the guest may write to the page using the remaining TLB * entry. This is not a problem because the page is reported dirty using * the snapshot taken before and step 4 ensures that writes done after * exiting to userspace will be logged for the next call. * */ int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log, bool *flush) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; int i, as_id, id; unsigned long n; unsigned long *dirty_bitmap; unsigned long *dirty_bitmap_buffer; as_id = log->slot >> 16; id = (u16)log->slot; if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) return -EINVAL; slots = __kvm_memslots(kvm, as_id); memslot = id_to_memslot(slots, id); dirty_bitmap = memslot->dirty_bitmap; if (!dirty_bitmap) return -ENOENT; n = kvm_dirty_bitmap_bytes(memslot); *flush = false; if (kvm->manual_dirty_log_protect) { /* * Unlike kvm_get_dirty_log, we always return false in *flush, * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There * is some code duplication between this function and * kvm_get_dirty_log, but hopefully all architecture * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log * can be eliminated. */ dirty_bitmap_buffer = dirty_bitmap; } else { dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); memset(dirty_bitmap_buffer, 0, n); spin_lock(&kvm->mmu_lock); for (i = 0; i < n / sizeof(long); i++) { unsigned long mask; gfn_t offset; if (!dirty_bitmap[i]) continue; *flush = true; mask = xchg(&dirty_bitmap[i], 0); dirty_bitmap_buffer[i] = mask; if (mask) { offset = i * BITS_PER_LONG; kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask); } } spin_unlock(&kvm->mmu_lock); } if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect); /** * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap * and reenable dirty page tracking for the corresponding pages. * @kvm: pointer to kvm instance * @log: slot id and address from which to fetch the bitmap of dirty pages */ int kvm_clear_dirty_log_protect(struct kvm *kvm, struct kvm_clear_dirty_log *log, bool *flush) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; int as_id, id; gfn_t offset; unsigned long i, n; unsigned long *dirty_bitmap; unsigned long *dirty_bitmap_buffer; as_id = log->slot >> 16; id = (u16)log->slot; if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) return -EINVAL; if ((log->first_page & 63) || (log->num_pages & 63)) return -EINVAL; slots = __kvm_memslots(kvm, as_id); memslot = id_to_memslot(slots, id); dirty_bitmap = memslot->dirty_bitmap; if (!dirty_bitmap) return -ENOENT; n = kvm_dirty_bitmap_bytes(memslot); if (log->first_page > memslot->npages || log->num_pages > memslot->npages - log->first_page) return -EINVAL; *flush = false; dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) return -EFAULT; spin_lock(&kvm->mmu_lock); for (offset = log->first_page, i = offset / BITS_PER_LONG, n = log->num_pages / BITS_PER_LONG; n--; i++, offset += BITS_PER_LONG) { unsigned long mask = *dirty_bitmap_buffer++; atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i]; if (!mask) continue; mask &= atomic_long_fetch_andnot(mask, p); /* * mask contains the bits that really have been cleared. This * never includes any bits beyond the length of the memslot (if * the length is not aligned to 64 pages), therefore it is not * a problem if userspace sets them in log->dirty_bitmap. */ if (mask) { *flush = true; kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask); } } spin_unlock(&kvm->mmu_lock); return 0; } EXPORT_SYMBOL_GPL(kvm_clear_dirty_log_protect); #endif bool kvm_largepages_enabled(void) { return largepages_enabled; } void kvm_disable_largepages(void) { largepages_enabled = false; } EXPORT_SYMBOL_GPL(kvm_disable_largepages); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) { return __gfn_to_memslot(kvm_memslots(kvm), gfn); } EXPORT_SYMBOL_GPL(gfn_to_memslot); struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) { return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); } bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS || memslot->flags & KVM_MEMSLOT_INVALID) return false; return true; } EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) { struct vm_area_struct *vma; unsigned long addr, size; size = PAGE_SIZE; addr = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(addr)) return PAGE_SIZE; down_read(&current->mm->mmap_sem); vma = find_vma(current->mm, addr); if (!vma) goto out; size = vma_kernel_pagesize(vma); out: up_read(&current->mm->mmap_sem); return size; } static bool memslot_is_readonly(struct kvm_memory_slot *slot) { return slot->flags & KVM_MEM_READONLY; } static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, gfn_t *nr_pages, bool write) { if (!slot || slot->flags & KVM_MEMSLOT_INVALID) return KVM_HVA_ERR_BAD; if (memslot_is_readonly(slot) && write) return KVM_HVA_ERR_RO_BAD; if (nr_pages) *nr_pages = slot->npages - (gfn - slot->base_gfn); return __gfn_to_hva_memslot(slot, gfn); } static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, gfn_t *nr_pages) { return __gfn_to_hva_many(slot, gfn, nr_pages, true); } unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) { return gfn_to_hva_many(slot, gfn, NULL); } EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) { return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); } EXPORT_SYMBOL_GPL(gfn_to_hva); unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) { return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); } EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); /* * Return the hva of a @gfn and the R/W attribute if possible. * * @slot: the kvm_memory_slot which contains @gfn * @gfn: the gfn to be translated * @writable: used to return the read/write attribute of the @slot if the hva * is valid and @writable is not NULL */ unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, bool *writable) { unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); if (!kvm_is_error_hva(hva) && writable) *writable = !memslot_is_readonly(slot); return hva; } unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) { struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); return gfn_to_hva_memslot_prot(slot, gfn, writable); } unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) { struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); return gfn_to_hva_memslot_prot(slot, gfn, writable); } static inline int check_user_page_hwpoison(unsigned long addr) { int rc, flags = FOLL_HWPOISON | FOLL_WRITE; rc = get_user_pages(addr, 1, flags, NULL, NULL); return rc == -EHWPOISON; } /* * The fast path to get the writable pfn which will be stored in @pfn, * true indicates success, otherwise false is returned. It's also the * only part that runs if we can are in atomic context. */ static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, bool *writable, kvm_pfn_t *pfn) { struct page *page[1]; int npages; /* * Fast pin a writable pfn only if it is a write fault request * or the caller allows to map a writable pfn for a read fault * request. */ if (!(write_fault || writable)) return false; npages = __get_user_pages_fast(addr, 1, 1, page); if (npages == 1) { *pfn = page_to_pfn(page[0]); if (writable) *writable = true; return true; } return false; } /* * The slow path to get the pfn of the specified host virtual address, * 1 indicates success, -errno is returned if error is detected. */ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, bool *writable, kvm_pfn_t *pfn) { unsigned int flags = FOLL_HWPOISON; struct page *page; int npages = 0; might_sleep(); if (writable) *writable = write_fault; if (write_fault) flags |= FOLL_WRITE; if (async) flags |= FOLL_NOWAIT; npages = get_user_pages_unlocked(addr, 1, &page, flags); if (npages != 1) return npages; /* map read fault as writable if possible */ if (unlikely(!write_fault) && writable) { struct page *wpage; if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) { *writable = true; put_page(page); page = wpage; } } *pfn = page_to_pfn(page); return npages; } static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) { if (unlikely(!(vma->vm_flags & VM_READ))) return false; if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) return false; return true; } static int hva_to_pfn_remapped(struct vm_area_struct *vma, unsigned long addr, bool *async, bool write_fault, bool *writable, kvm_pfn_t *p_pfn) { unsigned long pfn; int r; r = follow_pfn(vma, addr, &pfn); if (r) { /* * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does * not call the fault handler, so do it here. */ bool unlocked = false; r = fixup_user_fault(current, current->mm, addr, (write_fault ? FAULT_FLAG_WRITE : 0), &unlocked); if (unlocked) return -EAGAIN; if (r) return r; r = follow_pfn(vma, addr, &pfn); if (r) return r; } if (writable) *writable = true; /* * Get a reference here because callers of *hva_to_pfn* and * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the * returned pfn. This is only needed if the VMA has VM_MIXEDMAP * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will * simply do nothing for reserved pfns. * * Whoever called remap_pfn_range is also going to call e.g. * unmap_mapping_range before the underlying pages are freed, * causing a call to our MMU notifier. */ kvm_get_pfn(pfn); *p_pfn = pfn; return 0; } /* * Pin guest page in memory and return its pfn. * @addr: host virtual address which maps memory to the guest * @atomic: whether this function can sleep * @async: whether this function need to wait IO complete if the * host page is not in the memory * @write_fault: whether we should get a writable host page * @writable: whether it allows to map a writable host page for !@write_fault * * The function will map a writable host page for these two cases: * 1): @write_fault = true * 2): @write_fault = false && @writable, @writable will tell the caller * whether the mapping is writable. */ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, bool write_fault, bool *writable) { struct vm_area_struct *vma; kvm_pfn_t pfn = 0; int npages, r; /* we can do it either atomically or asynchronously, not both */ BUG_ON(atomic && async); if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) return pfn; if (atomic) return KVM_PFN_ERR_FAULT; npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); if (npages == 1) return pfn; down_read(&current->mm->mmap_sem); if (npages == -EHWPOISON || (!async && check_user_page_hwpoison(addr))) { pfn = KVM_PFN_ERR_HWPOISON; goto exit; } retry: vma = find_vma_intersection(current->mm, addr, addr + 1); if (vma == NULL) pfn = KVM_PFN_ERR_FAULT; else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn); if (r == -EAGAIN) goto retry; if (r < 0) pfn = KVM_PFN_ERR_FAULT; } else { if (async && vma_is_valid(vma, write_fault)) *async = true; pfn = KVM_PFN_ERR_FAULT; } exit: up_read(&current->mm->mmap_sem); return pfn; } kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool *async, bool write_fault, bool *writable) { unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); if (addr == KVM_HVA_ERR_RO_BAD) { if (writable) *writable = false; return KVM_PFN_ERR_RO_FAULT; } if (kvm_is_error_hva(addr)) { if (writable) *writable = false; return KVM_PFN_NOSLOT; } /* Do not map writable pfn in the readonly memslot. */ if (writable && memslot_is_readonly(slot)) { *writable = false; writable = NULL; } return hva_to_pfn(addr, atomic, async, write_fault, writable); } EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, bool *writable) { return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, write_fault, writable); } EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) { return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); } EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) { return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); } EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) { return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn); } EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) { return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); } EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) { return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); } EXPORT_SYMBOL_GPL(gfn_to_pfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) { return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); } EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, struct page **pages, int nr_pages) { unsigned long addr; gfn_t entry = 0; addr = gfn_to_hva_many(slot, gfn, &entry); if (kvm_is_error_hva(addr)) return -1; if (entry < nr_pages) return 0; return __get_user_pages_fast(addr, nr_pages, 1, pages); } EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); static struct page *kvm_pfn_to_page(kvm_pfn_t pfn) { if (is_error_noslot_pfn(pfn)) return KVM_ERR_PTR_BAD_PAGE; if (kvm_is_reserved_pfn(pfn)) { WARN_ON(1); return KVM_ERR_PTR_BAD_PAGE; } return pfn_to_page(pfn); } struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) { kvm_pfn_t pfn; pfn = gfn_to_pfn(kvm, gfn); return kvm_pfn_to_page(pfn); } EXPORT_SYMBOL_GPL(gfn_to_page); struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) { kvm_pfn_t pfn; pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); return kvm_pfn_to_page(pfn); } EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); void kvm_release_page_clean(struct page *page) { WARN_ON(is_error_page(page)); kvm_release_pfn_clean(page_to_pfn(page)); } EXPORT_SYMBOL_GPL(kvm_release_page_clean); void kvm_release_pfn_clean(kvm_pfn_t pfn) { if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) put_page(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); void kvm_release_page_dirty(struct page *page) { WARN_ON(is_error_page(page)); kvm_release_pfn_dirty(page_to_pfn(page)); } EXPORT_SYMBOL_GPL(kvm_release_page_dirty); void kvm_release_pfn_dirty(kvm_pfn_t pfn) { kvm_set_pfn_dirty(pfn); kvm_release_pfn_clean(pfn); } EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); void kvm_set_pfn_dirty(kvm_pfn_t pfn) { if (!kvm_is_reserved_pfn(pfn)) { struct page *page = pfn_to_page(pfn); if (!PageReserved(page)) SetPageDirty(page); } } EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); void kvm_set_pfn_accessed(kvm_pfn_t pfn) { if (!kvm_is_reserved_pfn(pfn)) mark_page_accessed(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); void kvm_get_pfn(kvm_pfn_t pfn) { if (!kvm_is_reserved_pfn(pfn)) get_page(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_get_pfn); static int next_segment(unsigned long len, int offset) { if (len > PAGE_SIZE - offset) return PAGE_SIZE - offset; else return len; } static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, void *data, int offset, int len) { int r; unsigned long addr; addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); if (kvm_is_error_hva(addr)) return -EFAULT; r = __copy_from_user(data, (void __user *)addr + offset, len); if (r) return -EFAULT; return 0; } int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len) { struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); return __kvm_read_guest_page(slot, gfn, data, offset, len); } EXPORT_SYMBOL_GPL(kvm_read_guest_page); int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len) { struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); return __kvm_read_guest_page(slot, gfn, data, offset, len); } EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) { gfn_t gfn = gpa >> PAGE_SHIFT; int seg; int offset = offset_in_page(gpa); int ret; while ((seg = next_segment(len, offset)) != 0) { ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); if (ret < 0) return ret; offset = 0; len -= seg; data += seg; ++gfn; } return 0; } EXPORT_SYMBOL_GPL(kvm_read_guest); int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) { gfn_t gfn = gpa >> PAGE_SHIFT; int seg; int offset = offset_in_page(gpa); int ret; while ((seg = next_segment(len, offset)) != 0) { ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); if (ret < 0) return ret; offset = 0; len -= seg; data += seg; ++gfn; } return 0; } EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, void *data, int offset, unsigned long len) { int r; unsigned long addr; addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); if (kvm_is_error_hva(addr)) return -EFAULT; pagefault_disable(); r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); pagefault_enable(); if (r) return -EFAULT; return 0; } int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) { gfn_t gfn = gpa >> PAGE_SHIFT; struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); int offset = offset_in_page(gpa); return __kvm_read_guest_atomic(slot, gfn, data, offset, len); } EXPORT_SYMBOL_GPL(kvm_read_guest_atomic); int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) { gfn_t gfn = gpa >> PAGE_SHIFT; struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); int offset = offset_in_page(gpa); return __kvm_read_guest_atomic(slot, gfn, data, offset, len); } EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn, const void *data, int offset, int len) { int r; unsigned long addr; addr = gfn_to_hva_memslot(memslot, gfn); if (kvm_is_error_hva(addr)) return -EFAULT; r = __copy_to_user((void __user *)addr + offset, data, len); if (r) return -EFAULT; mark_page_dirty_in_slot(memslot, gfn); return 0; } int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len) { struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); return __kvm_write_guest_page(slot, gfn, data, offset, len); } EXPORT_SYMBOL_GPL(kvm_write_guest_page); int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, int offset, int len) { struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); return __kvm_write_guest_page(slot, gfn, data, offset, len); } EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len) { gfn_t gfn = gpa >> PAGE_SHIFT; int seg; int offset = offset_in_page(gpa); int ret; while ((seg = next_segment(len, offset)) != 0) { ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); if (ret < 0) return ret; offset = 0; len -= seg; data += seg; ++gfn; } return 0; } EXPORT_SYMBOL_GPL(kvm_write_guest); int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, unsigned long len) { gfn_t gfn = gpa >> PAGE_SHIFT; int seg; int offset = offset_in_page(gpa); int ret; while ((seg = next_segment(len, offset)) != 0) { ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); if (ret < 0) return ret; offset = 0; len -= seg; data += seg; ++gfn; } return 0; } EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len) { int offset = offset_in_page(gpa); gfn_t start_gfn = gpa >> PAGE_SHIFT; gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; gfn_t nr_pages_needed = end_gfn - start_gfn + 1; gfn_t nr_pages_avail; int r = start_gfn <= end_gfn ? 0 : -EINVAL; ghc->gpa = gpa; ghc->generation = slots->generation; ghc->len = len; ghc->hva = KVM_HVA_ERR_BAD; /* * If the requested region crosses two memslots, we still * verify that the entire region is valid here. */ while (!r && start_gfn <= end_gfn) { ghc->memslot = __gfn_to_memslot(slots, start_gfn); ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); if (kvm_is_error_hva(ghc->hva)) r = -EFAULT; start_gfn += nr_pages_avail; } /* Use the slow path for cross page reads and writes. */ if (!r && nr_pages_needed == 1) ghc->hva += offset; else ghc->memslot = NULL; return r; } int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(kvm); return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); } EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned int offset, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(kvm); int r; gpa_t gpa = ghc->gpa + offset; BUG_ON(len + offset > ghc->len); if (slots->generation != ghc->generation) __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len); if (unlikely(!ghc->memslot)) return kvm_write_guest(kvm, gpa, data, len); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; r = __copy_to_user((void __user *)ghc->hva + offset, data, len); if (r) return -EFAULT; mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT); return 0; } EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len) { return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); } EXPORT_SYMBOL_GPL(kvm_write_guest_cached); int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(kvm); int r; BUG_ON(len > ghc->len); if (slots->generation != ghc->generation) __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len); if (unlikely(!ghc->memslot)) return kvm_read_guest(kvm, ghc->gpa, data, len); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; r = __copy_from_user(data, (void __user *)ghc->hva, len); if (r) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(kvm_read_guest_cached); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) { const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); } EXPORT_SYMBOL_GPL(kvm_clear_guest_page); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) { gfn_t gfn = gpa >> PAGE_SHIFT; int seg; int offset = offset_in_page(gpa); int ret; while ((seg = next_segment(len, offset)) != 0) { ret = kvm_clear_guest_page(kvm, gfn, offset, seg); if (ret < 0) return ret; offset = 0; len -= seg; ++gfn; } return 0; } EXPORT_SYMBOL_GPL(kvm_clear_guest); static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn) { if (memslot && memslot->dirty_bitmap) { unsigned long rel_gfn = gfn - memslot->base_gfn; set_bit_le(rel_gfn, memslot->dirty_bitmap); } } void mark_page_dirty(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *memslot; memslot = gfn_to_memslot(kvm, gfn); mark_page_dirty_in_slot(memslot, gfn); } EXPORT_SYMBOL_GPL(mark_page_dirty); void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) { struct kvm_memory_slot *memslot; memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); mark_page_dirty_in_slot(memslot, gfn); } EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); void kvm_sigset_activate(struct kvm_vcpu *vcpu) { if (!vcpu->sigset_active) return; /* * This does a lockless modification of ->real_blocked, which is fine * because, only current can change ->real_blocked and all readers of * ->real_blocked don't care as long ->real_blocked is always a subset * of ->blocked. */ sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked); } void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) { if (!vcpu->sigset_active) return; sigprocmask(SIG_SETMASK, &current->real_blocked, NULL); sigemptyset(&current->real_blocked); } static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) { unsigned int old, val, grow; old = val = vcpu->halt_poll_ns; grow = READ_ONCE(halt_poll_ns_grow); /* 10us base */ if (val == 0 && grow) val = 10000; else val *= grow; if (val > halt_poll_ns) val = halt_poll_ns; vcpu->halt_poll_ns = val; trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); } static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) { unsigned int old, val, shrink; old = val = vcpu->halt_poll_ns; shrink = READ_ONCE(halt_poll_ns_shrink); if (shrink == 0) val = 0; else val /= shrink; vcpu->halt_poll_ns = val; trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); } static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) { int ret = -EINTR; int idx = srcu_read_lock(&vcpu->kvm->srcu); if (kvm_arch_vcpu_runnable(vcpu)) { kvm_make_request(KVM_REQ_UNHALT, vcpu); goto out; } if (kvm_cpu_has_pending_timer(vcpu)) goto out; if (signal_pending(current)) goto out; ret = 0; out: srcu_read_unlock(&vcpu->kvm->srcu, idx); return ret; } /* * The vCPU has executed a HLT instruction with in-kernel mode enabled. */ void kvm_vcpu_block(struct kvm_vcpu *vcpu) { ktime_t start, cur; DECLARE_SWAITQUEUE(wait); bool waited = false; u64 block_ns; start = cur = ktime_get(); if (vcpu->halt_poll_ns) { ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); ++vcpu->stat.halt_attempted_poll; do { /* * This sets KVM_REQ_UNHALT if an interrupt * arrives. */ if (kvm_vcpu_check_block(vcpu) < 0) { ++vcpu->stat.halt_successful_poll; if (!vcpu_valid_wakeup(vcpu)) ++vcpu->stat.halt_poll_invalid; goto out; } cur = ktime_get(); } while (single_task_running() && ktime_before(cur, stop)); } kvm_arch_vcpu_blocking(vcpu); for (;;) { prepare_to_swait_exclusive(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); if (kvm_vcpu_check_block(vcpu) < 0) break; waited = true; schedule(); } finish_swait(&vcpu->wq, &wait); cur = ktime_get(); kvm_arch_vcpu_unblocking(vcpu); out: block_ns = ktime_to_ns(cur) - ktime_to_ns(start); if (!vcpu_valid_wakeup(vcpu)) shrink_halt_poll_ns(vcpu); else if (halt_poll_ns) { if (block_ns <= vcpu->halt_poll_ns) ; /* we had a long block, shrink polling */ else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns) shrink_halt_poll_ns(vcpu); /* we had a short halt and our poll time is too small */ else if (vcpu->halt_poll_ns < halt_poll_ns && block_ns < halt_poll_ns) grow_halt_poll_ns(vcpu); } else vcpu->halt_poll_ns = 0; trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu)); kvm_arch_vcpu_block_finish(vcpu); } EXPORT_SYMBOL_GPL(kvm_vcpu_block); bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) { struct swait_queue_head *wqp; wqp = kvm_arch_vcpu_wq(vcpu); if (swq_has_sleeper(wqp)) { swake_up_one(wqp); ++vcpu->stat.halt_wakeup; return true; } return false; } EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); #ifndef CONFIG_S390 /* * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. */ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) { int me; int cpu = vcpu->cpu; if (kvm_vcpu_wake_up(vcpu)) return; me = get_cpu(); if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) if (kvm_arch_vcpu_should_kick(vcpu)) smp_send_reschedule(cpu); put_cpu(); } EXPORT_SYMBOL_GPL(kvm_vcpu_kick); #endif /* !CONFIG_S390 */ int kvm_vcpu_yield_to(struct kvm_vcpu *target) { struct pid *pid; struct task_struct *task = NULL; int ret = 0; rcu_read_lock(); pid = rcu_dereference(target->pid); if (pid) task = get_pid_task(pid, PIDTYPE_PID); rcu_read_unlock(); if (!task) return ret; ret = yield_to(task, 1); put_task_struct(task); return ret; } EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); /* * Helper that checks whether a VCPU is eligible for directed yield. * Most eligible candidate to yield is decided by following heuristics: * * (a) VCPU which has not done pl-exit or cpu relax intercepted recently * (preempted lock holder), indicated by @in_spin_loop. * Set at the beiginning and cleared at the end of interception/PLE handler. * * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get * chance last time (mostly it has become eligible now since we have probably * yielded to lockholder in last iteration. This is done by toggling * @dy_eligible each time a VCPU checked for eligibility.) * * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding * to preempted lock-holder could result in wrong VCPU selection and CPU * burning. Giving priority for a potential lock-holder increases lock * progress. * * Since algorithm is based on heuristics, accessing another VCPU data without * locking does not harm. It may result in trying to yield to same VCPU, fail * and continue with next VCPU and so on. */ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) { #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT bool eligible; eligible = !vcpu->spin_loop.in_spin_loop || vcpu->spin_loop.dy_eligible; if (vcpu->spin_loop.in_spin_loop) kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); return eligible; #else return true; #endif } void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) { struct kvm *kvm = me->kvm; struct kvm_vcpu *vcpu; int last_boosted_vcpu = me->kvm->last_boosted_vcpu; int yielded = 0; int try = 3; int pass; int i; kvm_vcpu_set_in_spin_loop(me, true); /* * We boost the priority of a VCPU that is runnable but not * currently running, because it got preempted by something * else and called schedule in __vcpu_run. Hopefully that * VCPU is holding the lock that we need and will release it. * We approximate round-robin by starting at the last boosted VCPU. */ for (pass = 0; pass < 2 && !yielded && try; pass++) { kvm_for_each_vcpu(i, vcpu, kvm) { if (!pass && i <= last_boosted_vcpu) { i = last_boosted_vcpu; continue; } else if (pass && i > last_boosted_vcpu) break; if (!READ_ONCE(vcpu->preempted)) continue; if (vcpu == me) continue; if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) continue; if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu)) continue; if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) continue; yielded = kvm_vcpu_yield_to(vcpu); if (yielded > 0) { kvm->last_boosted_vcpu = i; break; } else if (yielded < 0) { try--; if (!try) break; } } } kvm_vcpu_set_in_spin_loop(me, false); /* Ensure vcpu is not eligible during next spinloop */ kvm_vcpu_set_dy_eligible(me, false); } EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) { struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; struct page *page; if (vmf->pgoff == 0) page = virt_to_page(vcpu->run); #ifdef CONFIG_X86 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) page = virt_to_page(vcpu->arch.pio_data); #endif #ifdef CONFIG_KVM_MMIO else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); #endif else return kvm_arch_vcpu_fault(vcpu, vmf); get_page(page); vmf->page = page; return 0; } static const struct vm_operations_struct kvm_vcpu_vm_ops = { .fault = kvm_vcpu_fault, }; static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) { vma->vm_ops = &kvm_vcpu_vm_ops; return 0; } static int kvm_vcpu_release(struct inode *inode, struct file *filp) { struct kvm_vcpu *vcpu = filp->private_data; debugfs_remove_recursive(vcpu->debugfs_dentry); kvm_put_kvm(vcpu->kvm); return 0; } static struct file_operations kvm_vcpu_fops = { .release = kvm_vcpu_release, .unlocked_ioctl = kvm_vcpu_ioctl, .mmap = kvm_vcpu_mmap, .llseek = noop_llseek, KVM_COMPAT(kvm_vcpu_compat_ioctl), }; /* * Allocates an inode for the vcpu. */ static int create_vcpu_fd(struct kvm_vcpu *vcpu) { char name[8 + 1 + ITOA_MAX_LEN + 1]; snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id); return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); } static int kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) { char dir_name[ITOA_MAX_LEN * 2]; int ret; if (!kvm_arch_has_vcpu_debugfs()) return 0; if (!debugfs_initialized()) return 0; snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); vcpu->debugfs_dentry = debugfs_create_dir(dir_name, vcpu->kvm->debugfs_dentry); if (!vcpu->debugfs_dentry) return -ENOMEM; ret = kvm_arch_create_vcpu_debugfs(vcpu); if (ret < 0) { debugfs_remove_recursive(vcpu->debugfs_dentry); return ret; } return 0; } /* * Creates some virtual cpus. Good luck creating more than one. */ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) { int r; struct kvm_vcpu *vcpu; if (id >= KVM_MAX_VCPU_ID) return -EINVAL; mutex_lock(&kvm->lock); if (kvm->created_vcpus == KVM_MAX_VCPUS) { mutex_unlock(&kvm->lock); return -EINVAL; } kvm->created_vcpus++; mutex_unlock(&kvm->lock); vcpu = kvm_arch_vcpu_create(kvm, id); if (IS_ERR(vcpu)) { r = PTR_ERR(vcpu); goto vcpu_decrement; } preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); r = kvm_arch_vcpu_setup(vcpu); if (r) goto vcpu_destroy; r = kvm_create_vcpu_debugfs(vcpu); if (r) goto vcpu_destroy; mutex_lock(&kvm->lock); if (kvm_get_vcpu_by_id(kvm, id)) { r = -EEXIST; goto unlock_vcpu_destroy; } BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); /* Now it's all set up, let userspace reach it */ kvm_get_kvm(kvm); r = create_vcpu_fd(vcpu); if (r < 0) { kvm_put_kvm(kvm); goto unlock_vcpu_destroy; } kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; /* * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus * before kvm->online_vcpu's incremented value. */ smp_wmb(); atomic_inc(&kvm->online_vcpus); mutex_unlock(&kvm->lock); kvm_arch_vcpu_postcreate(vcpu); return r; unlock_vcpu_destroy: mutex_unlock(&kvm->lock); debugfs_remove_recursive(vcpu->debugfs_dentry); vcpu_destroy: kvm_arch_vcpu_destroy(vcpu); vcpu_decrement: mutex_lock(&kvm->lock); kvm->created_vcpus--; mutex_unlock(&kvm->lock); return r; } static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) { if (sigset) { sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); vcpu->sigset_active = 1; vcpu->sigset = *sigset; } else vcpu->sigset_active = 0; return 0; } static long kvm_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; int r; struct kvm_fpu *fpu = NULL; struct kvm_sregs *kvm_sregs = NULL; if (vcpu->kvm->mm != current->mm) return -EIO; if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) return -EINVAL; /* * Some architectures have vcpu ioctls that are asynchronous to vcpu * execution; mutex_lock() would break them. */ r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg); if (r != -ENOIOCTLCMD) return r; if (mutex_lock_killable(&vcpu->mutex)) return -EINTR; switch (ioctl) { case KVM_RUN: { struct pid *oldpid; r = -EINVAL; if (arg) goto out; oldpid = rcu_access_pointer(vcpu->pid); if (unlikely(oldpid != task_pid(current))) { /* The thread running this VCPU changed. */ struct pid *newpid; r = kvm_arch_vcpu_run_pid_change(vcpu); if (r) break; newpid = get_task_pid(current, PIDTYPE_PID); rcu_assign_pointer(vcpu->pid, newpid); if (oldpid) synchronize_rcu(); put_pid(oldpid); } r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); trace_kvm_userspace_exit(vcpu->run->exit_reason, r); break; } case KVM_GET_REGS: { struct kvm_regs *kvm_regs; r = -ENOMEM; kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); if (!kvm_regs) goto out; r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); if (r) goto out_free1; r = -EFAULT; if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) goto out_free1; r = 0; out_free1: kfree(kvm_regs); break; } case KVM_SET_REGS: { struct kvm_regs *kvm_regs; r = -ENOMEM; kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); if (IS_ERR(kvm_regs)) { r = PTR_ERR(kvm_regs); goto out; } r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); kfree(kvm_regs); break; } case KVM_GET_SREGS: { kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); r = -ENOMEM; if (!kvm_sregs) goto out; r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) goto out; r = 0; break; } case KVM_SET_SREGS: { kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); if (IS_ERR(kvm_sregs)) { r = PTR_ERR(kvm_sregs); kvm_sregs = NULL; goto out; } r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); break; } case KVM_GET_MP_STATE: { struct kvm_mp_state mp_state; r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &mp_state, sizeof(mp_state))) goto out; r = 0; break; } case KVM_SET_MP_STATE: { struct kvm_mp_state mp_state; r = -EFAULT; if (copy_from_user(&mp_state, argp, sizeof(mp_state))) goto out; r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); break; } case KVM_TRANSLATE: { struct kvm_translation tr; r = -EFAULT; if (copy_from_user(&tr, argp, sizeof(tr))) goto out; r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &tr, sizeof(tr))) goto out; r = 0; break; } case KVM_SET_GUEST_DEBUG: { struct kvm_guest_debug dbg; r = -EFAULT; if (copy_from_user(&dbg, argp, sizeof(dbg))) goto out; r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); break; } case KVM_SET_SIGNAL_MASK: { struct kvm_signal_mask __user *sigmask_arg = argp; struct kvm_signal_mask kvm_sigmask; sigset_t sigset, *p; p = NULL; if (argp) { r = -EFAULT; if (copy_from_user(&kvm_sigmask, argp, sizeof(kvm_sigmask))) goto out; r = -EINVAL; if (kvm_sigmask.len != sizeof(sigset)) goto out; r = -EFAULT; if (copy_from_user(&sigset, sigmask_arg->sigset, sizeof(sigset))) goto out; p = &sigset; } r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); break; } case KVM_GET_FPU: { fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); r = -ENOMEM; if (!fpu) goto out; r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) goto out; r = 0; break; } case KVM_SET_FPU: { fpu = memdup_user(argp, sizeof(*fpu)); if (IS_ERR(fpu)) { r = PTR_ERR(fpu); fpu = NULL; goto out; } r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); break; } default: r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); } out: mutex_unlock(&vcpu->mutex); kfree(fpu); kfree(kvm_sregs); return r; } #ifdef CONFIG_KVM_COMPAT static long kvm_vcpu_compat_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = compat_ptr(arg); int r; if (vcpu->kvm->mm != current->mm) return -EIO; switch (ioctl) { case KVM_SET_SIGNAL_MASK: { struct kvm_signal_mask __user *sigmask_arg = argp; struct kvm_signal_mask kvm_sigmask; sigset_t sigset; if (argp) { r = -EFAULT; if (copy_from_user(&kvm_sigmask, argp, sizeof(kvm_sigmask))) goto out; r = -EINVAL; if (kvm_sigmask.len != sizeof(compat_sigset_t)) goto out; r = -EFAULT; if (get_compat_sigset(&sigset, (void *)sigmask_arg->sigset)) goto out; r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); } else r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); break; } default: r = kvm_vcpu_ioctl(filp, ioctl, arg); } out: return r; } #endif static int kvm_device_ioctl_attr(struct kvm_device *dev, int (*accessor)(struct kvm_device *dev, struct kvm_device_attr *attr), unsigned long arg) { struct kvm_device_attr attr; if (!accessor) return -EPERM; if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) return -EFAULT; return accessor(dev, &attr); } static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_device *dev = filp->private_data; switch (ioctl) { case KVM_SET_DEVICE_ATTR: return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); case KVM_GET_DEVICE_ATTR: return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); case KVM_HAS_DEVICE_ATTR: return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); default: if (dev->ops->ioctl) return dev->ops->ioctl(dev, ioctl, arg); return -ENOTTY; } } static int kvm_device_release(struct inode *inode, struct file *filp) { struct kvm_device *dev = filp->private_data; struct kvm *kvm = dev->kvm; kvm_put_kvm(kvm); return 0; } static const struct file_operations kvm_device_fops = { .unlocked_ioctl = kvm_device_ioctl, .release = kvm_device_release, KVM_COMPAT(kvm_device_ioctl), }; struct kvm_device *kvm_device_from_filp(struct file *filp) { if (filp->f_op != &kvm_device_fops) return NULL; return filp->private_data; } static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { #ifdef CONFIG_KVM_MPIC [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, #endif }; int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type) { if (type >= ARRAY_SIZE(kvm_device_ops_table)) return -ENOSPC; if (kvm_device_ops_table[type] != NULL) return -EEXIST; kvm_device_ops_table[type] = ops; return 0; } void kvm_unregister_device_ops(u32 type) { if (kvm_device_ops_table[type] != NULL) kvm_device_ops_table[type] = NULL; } static int kvm_ioctl_create_device(struct kvm *kvm, struct kvm_create_device *cd) { struct kvm_device_ops *ops = NULL; struct kvm_device *dev; bool test = cd->flags & KVM_CREATE_DEVICE_TEST; int ret; if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) return -ENODEV; ops = kvm_device_ops_table[cd->type]; if (ops == NULL) return -ENODEV; if (test) return 0; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->ops = ops; dev->kvm = kvm; mutex_lock(&kvm->lock); ret = ops->create(dev, cd->type); if (ret < 0) { mutex_unlock(&kvm->lock); kfree(dev); return ret; } list_add(&dev->vm_node, &kvm->devices); mutex_unlock(&kvm->lock); if (ops->init) ops->init(dev); ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); if (ret < 0) { mutex_lock(&kvm->lock); list_del(&dev->vm_node); mutex_unlock(&kvm->lock); ops->destroy(dev); return ret; } kvm_get_kvm(kvm); cd->fd = ret; return 0; } static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) { switch (arg) { case KVM_CAP_USER_MEMORY: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: case KVM_CAP_INTERNAL_ERROR_DATA: #ifdef CONFIG_HAVE_KVM_MSI case KVM_CAP_SIGNAL_MSI: #endif #ifdef CONFIG_HAVE_KVM_IRQFD case KVM_CAP_IRQFD: case KVM_CAP_IRQFD_RESAMPLE: #endif case KVM_CAP_IOEVENTFD_ANY_LENGTH: case KVM_CAP_CHECK_EXTENSION_VM: case KVM_CAP_ENABLE_CAP_VM: #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT: #endif return 1; #ifdef CONFIG_KVM_MMIO case KVM_CAP_COALESCED_MMIO: return KVM_COALESCED_MMIO_PAGE_OFFSET; case KVM_CAP_COALESCED_PIO: return 1; #endif #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING case KVM_CAP_IRQ_ROUTING: return KVM_MAX_IRQ_ROUTES; #endif #if KVM_ADDRESS_SPACE_NUM > 1 case KVM_CAP_MULTI_ADDRESS_SPACE: return KVM_ADDRESS_SPACE_NUM; #endif case KVM_CAP_MAX_VCPU_ID: return KVM_MAX_VCPU_ID; default: break; } return kvm_vm_ioctl_check_extension(kvm, arg); } int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) { return -EINVAL; } static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, struct kvm_enable_cap *cap) { switch (cap->cap) { #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT: if (cap->flags || (cap->args[0] & ~1)) return -EINVAL; kvm->manual_dirty_log_protect = cap->args[0]; return 0; #endif default: return kvm_vm_ioctl_enable_cap(kvm, cap); } } static long kvm_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; int r; if (kvm->mm != current->mm) return -EIO; switch (ioctl) { case KVM_CREATE_VCPU: r = kvm_vm_ioctl_create_vcpu(kvm, arg); break; case KVM_ENABLE_CAP: { struct kvm_enable_cap cap; r = -EFAULT; if (copy_from_user(&cap, argp, sizeof(cap))) goto out; r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); break; } case KVM_SET_USER_MEMORY_REGION: { struct kvm_userspace_memory_region kvm_userspace_mem; r = -EFAULT; if (copy_from_user(&kvm_userspace_mem, argp, sizeof(kvm_userspace_mem))) goto out; r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); break; } case KVM_GET_DIRTY_LOG: { struct kvm_dirty_log log; r = -EFAULT; if (copy_from_user(&log, argp, sizeof(log))) goto out; r = kvm_vm_ioctl_get_dirty_log(kvm, &log); break; } #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT case KVM_CLEAR_DIRTY_LOG: { struct kvm_clear_dirty_log log; r = -EFAULT; if (copy_from_user(&log, argp, sizeof(log))) goto out; r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); break; } #endif #ifdef CONFIG_KVM_MMIO case KVM_REGISTER_COALESCED_MMIO: { struct kvm_coalesced_mmio_zone zone; r = -EFAULT; if (copy_from_user(&zone, argp, sizeof(zone))) goto out; r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); break; } case KVM_UNREGISTER_COALESCED_MMIO: { struct kvm_coalesced_mmio_zone zone; r = -EFAULT; if (copy_from_user(&zone, argp, sizeof(zone))) goto out; r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); break; } #endif case KVM_IRQFD: { struct kvm_irqfd data; r = -EFAULT; if (copy_from_user(&data, argp, sizeof(data))) goto out; r = kvm_irqfd(kvm, &data); break; } case KVM_IOEVENTFD: { struct kvm_ioeventfd data; r = -EFAULT; if (copy_from_user(&data, argp, sizeof(data))) goto out; r = kvm_ioeventfd(kvm, &data); break; } #ifdef CONFIG_HAVE_KVM_MSI case KVM_SIGNAL_MSI: { struct kvm_msi msi; r = -EFAULT; if (copy_from_user(&msi, argp, sizeof(msi))) goto out; r = kvm_send_userspace_msi(kvm, &msi); break; } #endif #ifdef __KVM_HAVE_IRQ_LINE case KVM_IRQ_LINE_STATUS: case KVM_IRQ_LINE: { struct kvm_irq_level irq_event; r = -EFAULT; if (copy_from_user(&irq_event, argp, sizeof(irq_event))) goto out; r = kvm_vm_ioctl_irq_line(kvm, &irq_event, ioctl == KVM_IRQ_LINE_STATUS); if (r) goto out; r = -EFAULT; if (ioctl == KVM_IRQ_LINE_STATUS) { if (copy_to_user(argp, &irq_event, sizeof(irq_event))) goto out; } r = 0; break; } #endif #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING case KVM_SET_GSI_ROUTING: { struct kvm_irq_routing routing; struct kvm_irq_routing __user *urouting; struct kvm_irq_routing_entry *entries = NULL; r = -EFAULT; if (copy_from_user(&routing, argp, sizeof(routing))) goto out; r = -EINVAL; if (!kvm_arch_can_set_irq_routing(kvm)) goto out; if (routing.nr > KVM_MAX_IRQ_ROUTES) goto out; if (routing.flags) goto out; if (routing.nr) { r = -ENOMEM; entries = vmalloc(array_size(sizeof(*entries), routing.nr)); if (!entries) goto out; r = -EFAULT; urouting = argp; if (copy_from_user(entries, urouting->entries, routing.nr * sizeof(*entries))) goto out_free_irq_routing; } r = kvm_set_irq_routing(kvm, entries, routing.nr, routing.flags); out_free_irq_routing: vfree(entries); break; } #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ case KVM_CREATE_DEVICE: { struct kvm_create_device cd; r = -EFAULT; if (copy_from_user(&cd, argp, sizeof(cd))) goto out; r = kvm_ioctl_create_device(kvm, &cd); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &cd, sizeof(cd))) goto out; r = 0; break; } case KVM_CHECK_EXTENSION: r = kvm_vm_ioctl_check_extension_generic(kvm, arg); break; default: r = kvm_arch_vm_ioctl(filp, ioctl, arg); } out: return r; } #ifdef CONFIG_KVM_COMPAT struct compat_kvm_dirty_log { __u32 slot; __u32 padding1; union { compat_uptr_t dirty_bitmap; /* one bit per page */ __u64 padding2; }; }; static long kvm_vm_compat_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; int r; if (kvm->mm != current->mm) return -EIO; switch (ioctl) { case KVM_GET_DIRTY_LOG: { struct compat_kvm_dirty_log compat_log; struct kvm_dirty_log log; if (copy_from_user(&compat_log, (void __user *)arg, sizeof(compat_log))) return -EFAULT; log.slot = compat_log.slot; log.padding1 = compat_log.padding1; log.padding2 = compat_log.padding2; log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); r = kvm_vm_ioctl_get_dirty_log(kvm, &log); break; } default: r = kvm_vm_ioctl(filp, ioctl, arg); } return r; } #endif static struct file_operations kvm_vm_fops = { .release = kvm_vm_release, .unlocked_ioctl = kvm_vm_ioctl, .llseek = noop_llseek, KVM_COMPAT(kvm_vm_compat_ioctl), }; static int kvm_dev_ioctl_create_vm(unsigned long type) { int r; struct kvm *kvm; struct file *file; kvm = kvm_create_vm(type); if (IS_ERR(kvm)) return PTR_ERR(kvm); #ifdef CONFIG_KVM_MMIO r = kvm_coalesced_mmio_init(kvm); if (r < 0) goto put_kvm; #endif r = get_unused_fd_flags(O_CLOEXEC); if (r < 0) goto put_kvm; file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); if (IS_ERR(file)) { put_unused_fd(r); r = PTR_ERR(file); goto put_kvm; } /* * Don't call kvm_put_kvm anymore at this point; file->f_op is * already set, with ->release() being kvm_vm_release(). In error * cases it will be called by the final fput(file) and will take * care of doing kvm_put_kvm(kvm). */ if (kvm_create_vm_debugfs(kvm, r) < 0) { put_unused_fd(r); fput(file); return -ENOMEM; } kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); fd_install(r, file); return r; put_kvm: kvm_put_kvm(kvm); return r; } static long kvm_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { long r = -EINVAL; switch (ioctl) { case KVM_GET_API_VERSION: if (arg) goto out; r = KVM_API_VERSION; break; case KVM_CREATE_VM: r = kvm_dev_ioctl_create_vm(arg); break; case KVM_CHECK_EXTENSION: r = kvm_vm_ioctl_check_extension_generic(NULL, arg); break; case KVM_GET_VCPU_MMAP_SIZE: if (arg) goto out; r = PAGE_SIZE; /* struct kvm_run */ #ifdef CONFIG_X86 r += PAGE_SIZE; /* pio data page */ #endif #ifdef CONFIG_KVM_MMIO r += PAGE_SIZE; /* coalesced mmio ring page */ #endif break; case KVM_TRACE_ENABLE: case KVM_TRACE_PAUSE: case KVM_TRACE_DISABLE: r = -EOPNOTSUPP; break; default: return kvm_arch_dev_ioctl(filp, ioctl, arg); } out: return r; } static struct file_operations kvm_chardev_ops = { .unlocked_ioctl = kvm_dev_ioctl, .llseek = noop_llseek, KVM_COMPAT(kvm_dev_ioctl), }; static struct miscdevice kvm_dev = { KVM_MINOR, "kvm", &kvm_chardev_ops, }; static void hardware_enable_nolock(void *junk) { int cpu = raw_smp_processor_id(); int r; if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) return; cpumask_set_cpu(cpu, cpus_hardware_enabled); r = kvm_arch_hardware_enable(); if (r) { cpumask_clear_cpu(cpu, cpus_hardware_enabled); atomic_inc(&hardware_enable_failed); pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); } } static int kvm_starting_cpu(unsigned int cpu) { raw_spin_lock(&kvm_count_lock); if (kvm_usage_count) hardware_enable_nolock(NULL); raw_spin_unlock(&kvm_count_lock); return 0; } static void hardware_disable_nolock(void *junk) { int cpu = raw_smp_processor_id(); if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) return; cpumask_clear_cpu(cpu, cpus_hardware_enabled); kvm_arch_hardware_disable(); } static int kvm_dying_cpu(unsigned int cpu) { raw_spin_lock(&kvm_count_lock); if (kvm_usage_count) hardware_disable_nolock(NULL); raw_spin_unlock(&kvm_count_lock); return 0; } static void hardware_disable_all_nolock(void) { BUG_ON(!kvm_usage_count); kvm_usage_count--; if (!kvm_usage_count) on_each_cpu(hardware_disable_nolock, NULL, 1); } static void hardware_disable_all(void) { raw_spin_lock(&kvm_count_lock); hardware_disable_all_nolock(); raw_spin_unlock(&kvm_count_lock); } static int hardware_enable_all(void) { int r = 0; raw_spin_lock(&kvm_count_lock); kvm_usage_count++; if (kvm_usage_count == 1) { atomic_set(&hardware_enable_failed, 0); on_each_cpu(hardware_enable_nolock, NULL, 1); if (atomic_read(&hardware_enable_failed)) { hardware_disable_all_nolock(); r = -EBUSY; } } raw_spin_unlock(&kvm_count_lock); return r; } static int kvm_reboot(struct notifier_block *notifier, unsigned long val, void *v) { /* * Some (well, at least mine) BIOSes hang on reboot if * in vmx root mode. * * And Intel TXT required VMX off for all cpu when system shutdown. */ pr_info("kvm: exiting hardware virtualization\n"); kvm_rebooting = true; on_each_cpu(hardware_disable_nolock, NULL, 1); return NOTIFY_OK; } static struct notifier_block kvm_reboot_notifier = { .notifier_call = kvm_reboot, .priority = 0, }; static void kvm_io_bus_destroy(struct kvm_io_bus *bus) { int i; for (i = 0; i < bus->dev_count; i++) { struct kvm_io_device *pos = bus->range[i].dev; kvm_iodevice_destructor(pos); } kfree(bus); } static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, const struct kvm_io_range *r2) { gpa_t addr1 = r1->addr; gpa_t addr2 = r2->addr; if (addr1 < addr2) return -1; /* If r2->len == 0, match the exact address. If r2->len != 0, * accept any overlapping write. Any order is acceptable for * overlapping ranges, because kvm_io_bus_get_first_dev ensures * we process all of them. */ if (r2->len) { addr1 += r1->len; addr2 += r2->len; } if (addr1 > addr2) return 1; return 0; } static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) { return kvm_io_bus_cmp(p1, p2); } static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, gpa_t addr, int len) { struct kvm_io_range *range, key; int off; key = (struct kvm_io_range) { .addr = addr, .len = len, }; range = bsearch(&key, bus->range, bus->dev_count, sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); if (range == NULL) return -ENOENT; off = range - bus->range; while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) off--; return off; } static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, struct kvm_io_range *range, const void *val) { int idx; idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); if (idx < 0) return -EOPNOTSUPP; while (idx < bus->dev_count && kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, range->len, val)) return idx; idx++; } return -EOPNOTSUPP; } /* kvm_io_bus_write - called under kvm->slots_lock */ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, const void *val) { struct kvm_io_bus *bus; struct kvm_io_range range; int r; range = (struct kvm_io_range) { .addr = addr, .len = len, }; bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); if (!bus) return -ENOMEM; r = __kvm_io_bus_write(vcpu, bus, &range, val); return r < 0 ? r : 0; } /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, const void *val, long cookie) { struct kvm_io_bus *bus; struct kvm_io_range range; range = (struct kvm_io_range) { .addr = addr, .len = len, }; bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); if (!bus) return -ENOMEM; /* First try the device referenced by cookie. */ if ((cookie >= 0) && (cookie < bus->dev_count) && (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, val)) return cookie; /* * cookie contained garbage; fall back to search and return the * correct cookie value. */ return __kvm_io_bus_write(vcpu, bus, &range, val); } static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, struct kvm_io_range *range, void *val) { int idx; idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); if (idx < 0) return -EOPNOTSUPP; while (idx < bus->dev_count && kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, range->len, val)) return idx; idx++; } return -EOPNOTSUPP; } EXPORT_SYMBOL_GPL(kvm_io_bus_write); /* kvm_io_bus_read - called under kvm->slots_lock */ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, void *val) { struct kvm_io_bus *bus; struct kvm_io_range range; int r; range = (struct kvm_io_range) { .addr = addr, .len = len, }; bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); if (!bus) return -ENOMEM; r = __kvm_io_bus_read(vcpu, bus, &range, val); return r < 0 ? r : 0; } /* Caller must hold slots_lock. */ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, struct kvm_io_device *dev) { int i; struct kvm_io_bus *new_bus, *bus; struct kvm_io_range range; bus = kvm_get_bus(kvm, bus_idx); if (!bus) return -ENOMEM; /* exclude ioeventfd which is limited by maximum fd */ if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) return -ENOSPC; new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count + 1) * sizeof(struct kvm_io_range)), GFP_KERNEL); if (!new_bus) return -ENOMEM; range = (struct kvm_io_range) { .addr = addr, .len = len, .dev = dev, }; for (i = 0; i < bus->dev_count; i++) if (kvm_io_bus_cmp(&bus->range[i], &range) > 0) break; memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); new_bus->dev_count++; new_bus->range[i] = range; memcpy(new_bus->range + i + 1, bus->range + i, (bus->dev_count - i) * sizeof(struct kvm_io_range)); rcu_assign_pointer(kvm->buses[bus_idx], new_bus); synchronize_srcu_expedited(&kvm->srcu); kfree(bus); return 0; } /* Caller must hold slots_lock. */ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *dev) { int i; struct kvm_io_bus *new_bus, *bus; bus = kvm_get_bus(kvm, bus_idx); if (!bus) return; for (i = 0; i < bus->dev_count; i++) if (bus->range[i].dev == dev) { break; } if (i == bus->dev_count) return; new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * sizeof(struct kvm_io_range)), GFP_KERNEL); if (!new_bus) { pr_err("kvm: failed to shrink bus, removing it completely\n"); goto broken; } memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); new_bus->dev_count--; memcpy(new_bus->range + i, bus->range + i + 1, (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); broken: rcu_assign_pointer(kvm->buses[bus_idx], new_bus); synchronize_srcu_expedited(&kvm->srcu); kfree(bus); return; } struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr) { struct kvm_io_bus *bus; int dev_idx, srcu_idx; struct kvm_io_device *iodev = NULL; srcu_idx = srcu_read_lock(&kvm->srcu); bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); if (!bus) goto out_unlock; dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); if (dev_idx < 0) goto out_unlock; iodev = bus->range[dev_idx].dev; out_unlock: srcu_read_unlock(&kvm->srcu, srcu_idx); return iodev; } EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); static int kvm_debugfs_open(struct inode *inode, struct file *file, int (*get)(void *, u64 *), int (*set)(void *, u64), const char *fmt) { struct kvm_stat_data *stat_data = (struct kvm_stat_data *) inode->i_private; /* The debugfs files are a reference to the kvm struct which * is still valid when kvm_destroy_vm is called. * To avoid the race between open and the removal of the debugfs * directory we test against the users count. */ if (!refcount_inc_not_zero(&stat_data->kvm->users_count)) return -ENOENT; if (simple_attr_open(inode, file, get, set, fmt)) { kvm_put_kvm(stat_data->kvm); return -ENOMEM; } return 0; } static int kvm_debugfs_release(struct inode *inode, struct file *file) { struct kvm_stat_data *stat_data = (struct kvm_stat_data *) inode->i_private; simple_attr_release(inode, file); kvm_put_kvm(stat_data->kvm); return 0; } static int vm_stat_get_per_vm(void *data, u64 *val) { struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; *val = *(ulong *)((void *)stat_data->kvm + stat_data->offset); return 0; } static int vm_stat_clear_per_vm(void *data, u64 val) { struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; if (val) return -EINVAL; *(ulong *)((void *)stat_data->kvm + stat_data->offset) = 0; return 0; } static int vm_stat_get_per_vm_open(struct inode *inode, struct file *file) { __simple_attr_check_format("%llu\n", 0ull); return kvm_debugfs_open(inode, file, vm_stat_get_per_vm, vm_stat_clear_per_vm, "%llu\n"); } static const struct file_operations vm_stat_get_per_vm_fops = { .owner = THIS_MODULE, .open = vm_stat_get_per_vm_open, .release = kvm_debugfs_release, .read = simple_attr_read, .write = simple_attr_write, .llseek = no_llseek, }; static int vcpu_stat_get_per_vm(void *data, u64 *val) { int i; struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; struct kvm_vcpu *vcpu; *val = 0; kvm_for_each_vcpu(i, vcpu, stat_data->kvm) *val += *(u64 *)((void *)vcpu + stat_data->offset); return 0; } static int vcpu_stat_clear_per_vm(void *data, u64 val) { int i; struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; struct kvm_vcpu *vcpu; if (val) return -EINVAL; kvm_for_each_vcpu(i, vcpu, stat_data->kvm) *(u64 *)((void *)vcpu + stat_data->offset) = 0; return 0; } static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file) { __simple_attr_check_format("%llu\n", 0ull); return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm, vcpu_stat_clear_per_vm, "%llu\n"); } static const struct file_operations vcpu_stat_get_per_vm_fops = { .owner = THIS_MODULE, .open = vcpu_stat_get_per_vm_open, .release = kvm_debugfs_release, .read = simple_attr_read, .write = simple_attr_write, .llseek = no_llseek, }; static const struct file_operations *stat_fops_per_vm[] = { [KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops, [KVM_STAT_VM] = &vm_stat_get_per_vm_fops, }; static int vm_stat_get(void *_offset, u64 *val) { unsigned offset = (long)_offset; struct kvm *kvm; struct kvm_stat_data stat_tmp = {.offset = offset}; u64 tmp_val; *val = 0; spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { stat_tmp.kvm = kvm; vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val); *val += tmp_val; } spin_unlock(&kvm_lock); return 0; } static int vm_stat_clear(void *_offset, u64 val) { unsigned offset = (long)_offset; struct kvm *kvm; struct kvm_stat_data stat_tmp = {.offset = offset}; if (val) return -EINVAL; spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { stat_tmp.kvm = kvm; vm_stat_clear_per_vm((void *)&stat_tmp, 0); } spin_unlock(&kvm_lock); return 0; } DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n"); static int vcpu_stat_get(void *_offset, u64 *val) { unsigned offset = (long)_offset; struct kvm *kvm; struct kvm_stat_data stat_tmp = {.offset = offset}; u64 tmp_val; *val = 0; spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { stat_tmp.kvm = kvm; vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val); *val += tmp_val; } spin_unlock(&kvm_lock); return 0; } static int vcpu_stat_clear(void *_offset, u64 val) { unsigned offset = (long)_offset; struct kvm *kvm; struct kvm_stat_data stat_tmp = {.offset = offset}; if (val) return -EINVAL; spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { stat_tmp.kvm = kvm; vcpu_stat_clear_per_vm((void *)&stat_tmp, 0); } spin_unlock(&kvm_lock); return 0; } DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, "%llu\n"); static const struct file_operations *stat_fops[] = { [KVM_STAT_VCPU] = &vcpu_stat_fops, [KVM_STAT_VM] = &vm_stat_fops, }; static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) { struct kobj_uevent_env *env; unsigned long long created, active; if (!kvm_dev.this_device || !kvm) return; spin_lock(&kvm_lock); if (type == KVM_EVENT_CREATE_VM) { kvm_createvm_count++; kvm_active_vms++; } else if (type == KVM_EVENT_DESTROY_VM) { kvm_active_vms--; } created = kvm_createvm_count; active = kvm_active_vms; spin_unlock(&kvm_lock); env = kzalloc(sizeof(*env), GFP_KERNEL); if (!env) return; add_uevent_var(env, "CREATED=%llu", created); add_uevent_var(env, "COUNT=%llu", active); if (type == KVM_EVENT_CREATE_VM) { add_uevent_var(env, "EVENT=create"); kvm->userspace_pid = task_pid_nr(current); } else if (type == KVM_EVENT_DESTROY_VM) { add_uevent_var(env, "EVENT=destroy"); } add_uevent_var(env, "PID=%d", kvm->userspace_pid); if (kvm->debugfs_dentry) { char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL); if (p) { tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); if (!IS_ERR(tmp)) add_uevent_var(env, "STATS_PATH=%s", tmp); kfree(p); } } /* no need for checks, since we are adding at most only 5 keys */ env->envp[env->envp_idx++] = NULL; kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); kfree(env); } static void kvm_init_debug(void) { struct kvm_stats_debugfs_item *p; kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); kvm_debugfs_num_entries = 0; for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) { debugfs_create_file(p->name, 0644, kvm_debugfs_dir, (void *)(long)p->offset, stat_fops[p->kind]); } } static int kvm_suspend(void) { if (kvm_usage_count) hardware_disable_nolock(NULL); return 0; } static void kvm_resume(void) { if (kvm_usage_count) { WARN_ON(raw_spin_is_locked(&kvm_count_lock)); hardware_enable_nolock(NULL); } } static struct syscore_ops kvm_syscore_ops = { .suspend = kvm_suspend, .resume = kvm_resume, }; static inline struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) { return container_of(pn, struct kvm_vcpu, preempt_notifier); } static void kvm_sched_in(struct preempt_notifier *pn, int cpu) { struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); if (vcpu->preempted) vcpu->preempted = false; kvm_arch_sched_in(vcpu, cpu); kvm_arch_vcpu_load(vcpu, cpu); } static void kvm_sched_out(struct preempt_notifier *pn, struct task_struct *next) { struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); if (current->state == TASK_RUNNING) vcpu->preempted = true; kvm_arch_vcpu_put(vcpu); } int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, struct module *module) { int r; int cpu; r = kvm_arch_init(opaque); if (r) goto out_fail; /* * kvm_arch_init makes sure there's at most one caller * for architectures that support multiple implementations, * like intel and amd on x86. * kvm_arch_init must be called before kvm_irqfd_init to avoid creating * conflicts in case kvm is already setup for another implementation. */ r = kvm_irqfd_init(); if (r) goto out_irqfd; if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { r = -ENOMEM; goto out_free_0; } r = kvm_arch_hardware_setup(); if (r < 0) goto out_free_0a; for_each_online_cpu(cpu) { smp_call_function_single(cpu, kvm_arch_check_processor_compat, &r, 1); if (r < 0) goto out_free_1; } r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting", kvm_starting_cpu, kvm_dying_cpu); if (r) goto out_free_2; register_reboot_notifier(&kvm_reboot_notifier); /* A kmem cache lets us meet the alignment requirements of fx_save. */ if (!vcpu_align) vcpu_align = __alignof__(struct kvm_vcpu); kvm_vcpu_cache = kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align, SLAB_ACCOUNT, offsetof(struct kvm_vcpu, arch), sizeof_field(struct kvm_vcpu, arch), NULL); if (!kvm_vcpu_cache) { r = -ENOMEM; goto out_free_3; } r = kvm_async_pf_init(); if (r) goto out_free; kvm_chardev_ops.owner = module; kvm_vm_fops.owner = module; kvm_vcpu_fops.owner = module; r = misc_register(&kvm_dev); if (r) { pr_err("kvm: misc device register failed\n"); goto out_unreg; } register_syscore_ops(&kvm_syscore_ops); kvm_preempt_ops.sched_in = kvm_sched_in; kvm_preempt_ops.sched_out = kvm_sched_out; kvm_init_debug(); r = kvm_vfio_ops_init(); WARN_ON(r); return 0; out_unreg: kvm_async_pf_deinit(); out_free: kmem_cache_destroy(kvm_vcpu_cache); out_free_3: unregister_reboot_notifier(&kvm_reboot_notifier); cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); out_free_2: out_free_1: kvm_arch_hardware_unsetup(); out_free_0a: free_cpumask_var(cpus_hardware_enabled); out_free_0: kvm_irqfd_exit(); out_irqfd: kvm_arch_exit(); out_fail: return r; } EXPORT_SYMBOL_GPL(kvm_init); void kvm_exit(void) { debugfs_remove_recursive(kvm_debugfs_dir); misc_deregister(&kvm_dev); kmem_cache_destroy(kvm_vcpu_cache); kvm_async_pf_deinit(); unregister_syscore_ops(&kvm_syscore_ops); unregister_reboot_notifier(&kvm_reboot_notifier); cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); on_each_cpu(hardware_disable_nolock, NULL, 1); kvm_arch_hardware_unsetup(); kvm_arch_exit(); kvm_irqfd_exit(); free_cpumask_var(cpus_hardware_enabled); kvm_vfio_ops_exit(); } EXPORT_SYMBOL_GPL(kvm_exit);
./CrossVul/dataset_final_sorted/CWE-362/c/bad_1404_0
crossvul-cpp_data_bad_1664_1
/* BEGIN_ICS_COPYRIGHT5 **************************************** Copyright (c) 2015, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ** END_ICS_COPYRIGHT5 ****************************************/ /* * FILE NAME * sm_diag.c * * DESCRIPTION * FMI sm_diag utility used to control and diagnose a local SM instance * * RESPONSIBLE ENGINEER * John Seraphin * */ #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/un.h> #include <sys/stat.h> #include <signal.h> #include <time.h> #include "hsm_config_client_api.h" #include "hsm_config_client_data.h" extern int getopt(int, char *const *, const char *); typedef struct command_s{ char *name; int (*cmdPtr)(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); fm_mgr_type_t mgr; char *desc; }command_t; /* function prototypes */ int mgr_force_sweep(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_restore_priority(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_broadcast_xml_config(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_get_counters(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_reset_counters(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int pm_get_counters(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int pm_reset_counters(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_state_dump(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int pm_restore_priority(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int mgr_log_level(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int mgr_log_mode(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int mgr_log_mask(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_perf_debug_toggle(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sa_perf_debug_toggle(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int mgr_rmpp_debug_toggle(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int mgr_debug_toggle(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_force_rebalance_toggle(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_adaptive_routing(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_looptest_start(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_looptest_fast_mode_start(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_looptest_stop(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_looptest_inject_packets(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_looptest_inject_at_node(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_looptest_inject_packets_each_sweep(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_looptest_path_length(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_looptest_show_loop_paths(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_looptest_min_isl_redundancy(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_looptest_show_switch_lfts(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_looptest_show_loop_topology(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_looptest_show_config(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_looptest_fast(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_force_attribute_rewrite(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_skip_attr_write(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_pause_sweeps(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int sm_resume_sweeps(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]); int optind = 1; /* index into parent argv vector */ char *optarg; /* argument associated with option */ static command_t commandList[] = { {"smForceSweep", mgr_force_sweep, FM_MGR_SM, "Make the SM sweep now"}, {"smRestorePriority", sm_restore_priority, FM_MGR_SM, "Restore the normal priority of the SM (if it is\n currently elevated)"}, {"smShowCounters", sm_get_counters, FM_MGR_SM, "Get statistics and performance counters from the SM"}, {"smResetCounters",sm_reset_counters, FM_MGR_SM, "Reset SM statistics and performace counters"}, {"smStateDump",sm_state_dump, FM_MGR_SM, "Dump Internal SM state into directory specified"}, {"smLogLevel", mgr_log_level, FM_MGR_SM, "Set the SM logging level (1=WARN+, 2=INFINI_INFO+,\n 3=INFO+, 4=VERBOSE+, 5=DEBUG2+, 6=DEBUG3+, 7=TRACE+)"}, {"smLogMode", mgr_log_mode, FM_MGR_SM, "Set the SM log mode flags (0/1 1=downgrade\n non-actionable, 0/2 2=logfile only)"}, {"smLogMask", mgr_log_mask, FM_MGR_SM, "Set the SM log mask for a specific subsystem to the\n value given see /etc/sysconfig/opafm.xml-sample\n for a list of subsystems and mask bit meanings"}, {"smPerfDebug", sm_perf_debug_toggle, FM_MGR_SM, "Toggle performance debug output for SM"}, {"saPerfDebug", sa_perf_debug_toggle, FM_MGR_SM, "Toggle performance debug output for SA"}, {"saRmppDebug", mgr_rmpp_debug_toggle, FM_MGR_SM, "Toggle Rmpp debug output for SA"}, {"pmRestorePriority", pm_restore_priority, FM_MGR_PM, "No longer supported, use smRestorePriority"}, {"pmLogLevel", mgr_log_level, FM_MGR_PM, "No longer supported, use smLogLevel"}, {"pmLogMode", mgr_log_mode, FM_MGR_PM, "No longer supported, use smLogMode"}, {"pmLogMask", mgr_log_mask, FM_MGR_PM, "No longer supported, use smLogMask"}, // these commands can be issued direct to PM without issue {"pmShowCounters", pm_get_counters, FM_MGR_PM, "Get statistics and performance counters about the PM"}, {"pmResetCounters",pm_reset_counters, FM_MGR_PM, "Reset statistics and performace counters about the PM"}, {"pmDebug", mgr_debug_toggle, FM_MGR_PM, "Toggle debug output for PM"}, {"pmRmppDebug", mgr_rmpp_debug_toggle, FM_MGR_PM, "Toggle Rmpp debug output for PM"}, {"feLogLevel", mgr_log_level, FM_MGR_FE, "Set the FE logging level (1=WARN+, 2=INFINI_INFO+,\n 3=INFO+, 4=VERBOSE+, 5=DEBUG2+, 6=DEBUG3+, 7=TRACE+)"}, {"feLogMode", mgr_log_mode, FM_MGR_FE, "Set the FE log mode flags (0/1 1=downgrade\n non-actionable, 0/2 2=logfile only)"}, {"feLogMask", mgr_log_mask, FM_MGR_FE, "Set the FE log mask for a specific subsystem to the\n value given see /etc/sysconfig/opafm.xml-sample\n for a list of subsystems and mask bit meanings"}, {"feDebug", mgr_debug_toggle, FM_MGR_FE, "Toggle debug output for FE"}, {"feRmppDebug", mgr_rmpp_debug_toggle, FM_MGR_FE, "Toggle Rmpp debug output for FE"}, {"smLooptestStart", sm_looptest_start, FM_MGR_SM, "START loop test in normal mode - specify the number of 256 byte packets\n (default=0)"}, {"smLooptestFastModeStart", sm_looptest_fast_mode_start, FM_MGR_SM, "START loop test in fast mode - specify the number of 256 byte packets\n (default=4)"}, {"smLooptestStop", sm_looptest_stop, FM_MGR_SM, "STOP the loop test (puts switch LFTs back to normal)"}, //{"smLooptestFastMode", sm_looptest_fast, FM_MGR_SM, "1 to turn loop test fast mode ON, 0 to turn OFF"}, {"smLooptestInjectPackets", sm_looptest_inject_packets, FM_MGR_SM, "Enter numPkts to send to all switch loops\n (default=1)"}, {"smLooptestInjectAtNode", sm_looptest_inject_at_node, FM_MGR_SM, "Enter the switch node index to inject loop packets\n (default=0)"}, {"smLooptestInjectEachSweep", sm_looptest_inject_packets_each_sweep, FM_MGR_SM, "1 to inject packets each sweep, 0 to stop injecting each sweep"}, {"smLooptestPathLength", sm_looptest_path_length, FM_MGR_SM, "Sets the loop path length 2-4\n (default=3)"}, {"smLooptestMinISLRedundancy", sm_looptest_min_isl_redundancy, FM_MGR_SM, "Sets the minimum number of loops in which to include each ISL\n (default=4)"}, {"smLooptestShowLoopPaths",sm_looptest_show_loop_paths, FM_MGR_SM, "Displays the loop paths given node index or all loop paths\n (default=all)"}, {"smLooptestShowSwitchLft",sm_looptest_show_switch_lfts, FM_MGR_SM, "Displays a switch LFT given node index or all switches LFTs\n (default=all)"}, {"smLooptestShowTopology",sm_looptest_show_loop_topology, FM_MGR_SM, "Displays the topology for the SM Loop Test"}, {"smLooptestShowConfig",sm_looptest_show_config, FM_MGR_SM, "Displays the current active loop configuration"}, {"smForceRebalance", sm_force_rebalance_toggle, FM_MGR_SM, "Toggle Force Rebalance setting for SM"}, {"smAdaptiveRouting", sm_adaptive_routing, FM_MGR_SM, "Displays or modifies Adaptive Routing setting for SM. Display (no arg), Disable=0, Enable=1"}, {"smForceAttributeRewrite", sm_force_attribute_rewrite, FM_MGR_SM, "Set rewriting of all attributes upon resweeping. Disable=0, Enable=1"}, {"smSkipAttrWrite", sm_skip_attr_write, FM_MGR_SM, "Bitmask of attributes to be skipped(not written) during sweeps (-help for list)"}, {"smPauseSweeps", sm_pause_sweeps, FM_MGR_SM, "Pause SM sweeps"}, {"smResumeSweeps", sm_resume_sweeps, FM_MGR_SM, "Resume SM sweeps"}, // JPW - may implement in future as part of the maint mode feature // {"smBroadcastConfig", sm_broadcast_xml_config, FM_MGR_SM, "Broadcast the XML configuration file to STANDBY and INACTIVE SM's"}, }; static int commandListLen = (sizeof(commandList)/sizeof(commandList[0])); void usage(char *cmd) { int i; fprintf(stderr, "USAGE: %s", cmd); fprintf(stderr, " [OPTIONS] COMMAND [COMMAND ARGS]\n\n"); fprintf(stderr, "OPTIONS:\n"); fprintf(stderr, " -i <VAL>\t\tinstance to connect to (0 - default)\n"); //fprintf(stderr, " -d <VAL>\t\tdestination ip address or hostname. (Use to connect to a remote instance)\n"); fprintf(stderr, "COMMANDS:\n"); for(i=0;i<commandListLen;i++){ fprintf(stderr, " %-21s %s\n",commandList[i].name,commandList[i].desc); } fflush(stderr); } int mgr_force_sweep(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_FORCE_SWEEP, mgr, 0, NULL, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "mgr_force_sweep: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("mgr_force_sweep: Successfully sent Force Sweep control to local mgr instance\n"); } return 0; } int sm_broadcast_xml_config(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_BROADCAST_XML_CONFIG, mgr, 0, NULL, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_broadcast_xml_config: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("sm_broadcast_xml_config: Successfully sent XML broadcast config command to local mgr instance\n"); } return 0; } int sm_restore_priority(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_RESTORE_PRIORITY, mgr, 0, NULL, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_restore_priority: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("sm_restore_priority: Successfully sent Relinquish Master control to local mgr instance\n"); } return 0; } #define BUF_SZ 16384 int sm_get_counters(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; uint8_t data[BUF_SZ]; time_t timeNow; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_GET_COUNTERS, mgr, BUF_SZ, data, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_get_counters: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { time(&timeNow); data[BUF_SZ-1]=0; printf("%35s: %s%s", "SM Counters as of", ctime(&timeNow), (char*) data); } return 0; } int pm_get_counters(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; uint8_t data[BUF_SZ]; time_t timeNow; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_PM_GET_COUNTERS, mgr, BUF_SZ, data, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "pm_get_counters: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { time(&timeNow); data[BUF_SZ-1]=0; printf("PM Counters as of %s%s", ctime(&timeNow), (char*) data); } return 0; } int sm_reset_counters(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_RESET_COUNTERS, mgr, 0, NULL, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_reset_counters: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("sm_reset_counters: Successfully sent reset command " "to the SM\n"); } return 0; } int pm_reset_counters(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_PM_RESET_COUNTERS, mgr, 0, NULL, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "pm_reset_counters: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("pm_reset_counters: Successfully sent reset command " "to the PM\n"); } return 0; } int sm_state_dump(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; char dirName[256]; if (argc == 1 && strlen(argv[0]) < 256) { strncpy(dirName, argv[0], sizeof(dirName)); dirName[sizeof(dirName)-1]=0; } else { sprintf(dirName, "/tmp"); } printf("Sending command to dump the SM state into the directory %s\n", dirName); if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_DUMP_STATE, mgr, strlen(dirName) + 1, dirName, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_state_dump: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent state dump command to local SM instance\n"); } return 0; } int pm_restore_priority(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fprintf(stderr, "pmRestorePriority:\n"); fprintf(stderr, "\tThis command is not supported any more. The priority of the\n"); fprintf(stderr, "\tPerformance Manager(PM) is now based on the priority of the\n"); fprintf(stderr, "\tSubnet manager(SM). Use the smRestorePriority command\n"); fprintf(stderr, "\tfor restoring the priority of the SM and PM.\n"); return 0; } int mgr_log_level(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; uint32_t loglevel=0; if (mgr == FM_MGR_PM) { fprintf(stderr, "pmLogLevel:\n"); fprintf(stderr, "\tThis command is not supported any more. The logging of the\n"); fprintf(stderr, "\tPerformance Manager(PM) is now\n"); fprintf(stderr, "\tbased on the logging of the Subnet manager(SM). Use the\n"); fprintf(stderr, "\tsmLogLevel command for changing the logging level of the\n"); fprintf(stderr, "\tSM and PM\n"); } else if (argc == 1) { loglevel = atol(argv[0]); if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_LOG_LEVEL, mgr, sizeof(loglevel), (void *)&loglevel, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "mgr_log_level: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("mgr_log_level: Successfully sent Log Level control to local mgr instance\n"); } } else { fprintf(stderr, "mgr_log_level: must specify the log level parameter (1 > 5): \n"); } return 0; } int mgr_log_mode(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; uint32_t logmode=0; if (mgr == FM_MGR_PM) { fprintf(stderr, "pmLogMode:\n"); fprintf(stderr, "\tThis command is not supported any more. The logging of the\n"); fprintf(stderr, "\tPerformance Manager(PM) is now\n"); fprintf(stderr, "\tbased on the logging of the Subnet manager(SM). Use the\n"); fprintf(stderr, "\tsmLogMode command for changing the logging level of the\n"); fprintf(stderr, "\tSM and PM\n"); } else if (argc == 1) { logmode = atol(argv[0]); if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_LOG_MODE, mgr, sizeof(logmode), (void *)&logmode, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "mgr_log_mode: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("mgr_log_mode: Successfully sent Log Mode control to local mgr instance\n"); } } else { fprintf(stderr, "mgr_log_mode: must specify the log mode parameter (1 > 5): \n"); } return 0; } int mgr_log_mask(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; uint32_t mask=0; char buf[32]; // 32 bit mask followed by subsystem name if (mgr == FM_MGR_PM) { fprintf(stderr, "pmLogMask:\n"); fprintf(stderr, "\tThis command is not supported any more. The logging of the\n"); fprintf(stderr, "\tPerformance Manager(PM) is now\n"); fprintf(stderr, "\tbased on the logging of the Subnet manager(SM). Use the\n"); fprintf(stderr, "\tsmLogMask command for changing the logging level of the\n"); fprintf(stderr, "\tSM and PM\n"); } else if (argc == 2) { mask = strtoul(argv[1], NULL, 0); //mask = hton32(mask); // TBD - endian issues seem to be ignored here memcpy(buf, &mask, sizeof(mask)); strncpy(buf+sizeof(mask), argv[0], sizeof(buf)-sizeof(mask)); buf[sizeof(buf)-1] = '\0'; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_LOG_MASK, mgr, sizeof(buf), (void *)&buf[0], &ret_code)) != FM_CONF_OK) { fprintf(stderr, "mgr_log_mask: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("mgr_log_mask: Successfully sent Log Mask control to local mgr instance\n"); } } else { fprintf(stderr, "mgr_log_mask: must specify the subsystem and mask\n"); } return 0; } int sm_perf_debug_toggle(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_PERF_DEBUG_TOGGLE, mgr, 0, NULL, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_perf_debug_toggle: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent SM Perf Debug output control to local SM instance\n"); } return 0; } int sa_perf_debug_toggle(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SA_PERF_DEBUG_TOGGLE, mgr, 0, NULL, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sa_perf_debug_toggle: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent SA Perf Debug output control to local SM instance\n"); } return 0; } int mgr_rmpp_debug_toggle(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_RMPP_DEBUG_TOGGLE, mgr, 0, NULL, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sa_rmpp_debug_toggle: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent Rmpp Debug output control to local Manager instance\n"); } return 0; } int mgr_debug_toggle(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_DEBUG_TOGGLE, mgr, 0, NULL, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "mgr_debug_toggle: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent Debug output control to local Manager instance\n"); } return 0; } int sm_force_rebalance_toggle(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_FORCE_REBALANCE_TOGGLE, mgr, 0, NULL, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_force_rebalance_toggle: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent SM Force Rebalance control to local SM instance\n"); } return 0; } int sm_adaptive_routing(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; uint32_t enable=0; if (argc == 1) { enable = atol(argv[0]); if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_SET_ADAPTIVE_ROUTING, mgr, sizeof(enable), (void*)&enable, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_adaptive_routing: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent SM Adaptive Routing control to local SM instance\n"); } } else if (argc == 0) { if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_GET_ADAPTIVE_ROUTING, mgr, sizeof(enable), (void*)&enable, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_adaptive_routing: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("SM Adaptive Routing is %s\n", enable ? "enabled" : "disabled"); } } return 0; } int sm_pause_sweeps(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_PAUSE_SWEEPS, mgr, 0, NULL, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_pause_sweeps: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("sm_pause_sweeps: Successfully sent Pause SM Sweeps command to local mgr instance\n"); } return 0; } int sm_resume_sweeps(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_RESUME_SWEEPS, mgr, 0, NULL, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_pause_sweeps: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("sm_resume_sweeps: Successfully sent Resume SM Sweeps command to local mgr instance\n"); } return 0; } int sm_looptest_start(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; int numpkts=0; uint8_t data[BUF_SZ]; if (argc > 1) { printf("Error: only 1 argument expected\n"); return 0; } if (argc == 1) { numpkts = atol(argv[0]); if (numpkts < 0 || numpkts > 10) { printf("Error: number of packets must be from 0 to 10\n"); return 0; } } *(int*)data = numpkts; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_LOOP_TEST_START, mgr, BUF_SZ, data, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_looptest_start: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent Loop Test START control (%d inject packets) to local SM instance\n", numpkts); data[BUF_SZ-1]=0; printf("%s", (char*) data); } return 0; } int sm_looptest_fast_mode_start(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; int numpkts=4; uint8_t data[BUF_SZ]; if (argc > 1) { printf("Error: only 1 argument expected\n"); return 0; } if (argc == 1) { numpkts = atol(argv[0]); if (numpkts < 0 || numpkts > 10) { printf("Error: number of packets must be from 0 to 10\n"); return 0; } } *(int*)data = numpkts; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_LOOP_TEST_FAST_MODE_START, mgr, BUF_SZ, data, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_looptest_fast_mode_start: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent Loop Test Fast Mode START control (%d inject packets) to local SM instance\n", numpkts); data[BUF_SZ-1]=0; printf("%s", (char*) data); } return 0; } int sm_looptest_stop(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; uint8_t data[BUF_SZ]; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_LOOP_TEST_STOP, mgr, BUF_SZ, data, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_looptest_stop: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent Loop Test STOP control to local SM instance\n"); data[BUF_SZ-1]=0; printf("%s", (char*) data); } return 0; } int sm_looptest_inject_packets(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; int numpkts=1; uint8_t data[BUF_SZ]; if (argc > 1) { printf("Error: only 1 argument expected\n"); return 0; } if (argc == 1) { numpkts = atol(argv[0]); if (numpkts < 1 || numpkts > 10) { printf("Error: number of packets must be from 1 to 10\n"); return 0; } } *(int*)data = numpkts; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_LOOP_TEST_INJECT_PACKETS, mgr, BUF_SZ, data, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_looptest_inject_packets: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent Loop Test Inject %d Packets control to local SM instance\n", numpkts); data[BUF_SZ-1]=0; printf("%s", (char*) data); } return 0; } int sm_looptest_inject_at_node(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; int nodeidx=0; uint8_t data[BUF_SZ]; if (argc > 1) { printf("Error: only 1 argument expected\n"); return 0; } if (argc == 1) { nodeidx = atol(argv[0]); } *(int*)data = nodeidx; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_LOOP_TEST_INJECT_ATNODE, mgr, BUF_SZ, data, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_looptest_inject_at_node: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent Loop Test Inject Packets at Node index %d control to local SM instance\n", nodeidx); data[BUF_SZ-1]=0; printf("%s", (char*) data); } return 0; } int sm_looptest_inject_packets_each_sweep(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; int inject=1; uint8_t data[BUF_SZ]; if (argc > 1) { printf("Error: only 1 argument expected\n"); return 0; } if (argc == 1) { inject = atol(argv[0]); } *(int*)data = inject; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_LOOP_TEST_INJECT_EACH_SWEEP, mgr, BUF_SZ, data, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_looptest_inject_packets_each_sweep: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent Loop Test Inject Packet Each Sweep %d control to local SM instance\n", inject); data[BUF_SZ-1]=0; printf("%s", (char*) data); } return 0; } int sm_looptest_path_length(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; int plen=3; uint8_t data[BUF_SZ]; if (argc > 1) { printf("Error: only 1 argument expected\n"); return 0; } if (argc == 1) { plen = atol(argv[0]); if (plen < 2 || plen > 4) { printf("Error: length must be 2-4\n"); return 0; } } *(int*)data = plen; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_LOOP_TEST_PATH_LEN, mgr, BUF_SZ, data, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_looptest_path_length: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent Loop Test Path Length set to %d control to local SM instance\n", plen); data[BUF_SZ-1]=0; printf("%s", (char*) data); } return 0; } int sm_looptest_show_loop_paths(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; fm_config_interation_data_t interationData; uint8_t data[BUF_SZ]; int index = -1; int start; if (argc > 1) { printf("Error: only 1 argument expected\n"); return 0; } if (argc == 1) { index = atol(argv[0]); } memset(&interationData, 0, sizeof(fm_config_interation_data_t)); interationData.start = start = 1; interationData.index = index; while (!interationData.done) { memcpy(data, &interationData, sizeof(fm_config_interation_data_t)); if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_LOOP_TEST_SHOW_PATHS, mgr, BUF_SZ, data, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_looptest_show_loop_paths: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); return 0; } if (start) { if (index == -1) printf("Successfully sent Loop Test Path show for node index (all) to local SM instance\n"); else printf("Successfully sent Loop Test Path show for node index %d to local SM instance\n", index); start = 0; } memcpy(&interationData, data, sizeof(fm_config_interation_data_t)); printf("%s", interationData.intermediateBuffer); } return 0; } int sm_looptest_min_isl_redundancy(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; int plen=1; uint8_t data[BUF_SZ]; if (argc > 1) { printf("Error: only 1 argument expected\n"); return 0; } if (argc == 1) { plen = atol(argv[0]); } *(int*)data = plen; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_LOOP_TEST_MIN_ISL_REDUNDANCY, mgr, BUF_SZ, data, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_looptest_path_min_isl_redundancy: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent Loop Test Min ISL redundancy set to %d control to local SM instance\n", plen); data[BUF_SZ-1]=0; printf("%s", (char*) data); } return 0; } int sm_looptest_fast(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; int plen=1; if (argc > 1) { printf("Error: only 1 argument expected\n"); return 0; } if (argc == 1) { plen = atol(argv[0]); } if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_LOOP_TEST_FAST, mgr, sizeof(plen), (void *)&plen, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_looptest_fast: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent Loop Test Fast Mode %d control to local SM instance\n", plen); } return 0; } int sm_looptest_show_switch_lfts(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; uint8_t data[BUF_SZ]; int index=-1; fm_config_interation_data_t interationData; int start; if (argc > 1) { printf("Error: only 1 argument expected\n"); return 0; } if (argc == 1) { index = atol(argv[0]); } memset(&interationData, 0, sizeof(fm_config_interation_data_t)); interationData.start = start = 1; interationData.index = index; while (!interationData.done) { memcpy(data, &interationData, sizeof(fm_config_interation_data_t)); if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_LOOP_TEST_SHOW_LFTS, mgr, BUF_SZ, data, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_looptest_show_switch_lfts: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); return 0; } if (start) { start = 0; if (index == -1) printf("Successfully sent Loop Test LFT show for node index (all) to local SM instance\n"); else printf("Successfully sent Loop Test LFT show for node index %d to local SM instance\n", index); } memcpy(&interationData, data, sizeof(fm_config_interation_data_t)); printf("%s", interationData.intermediateBuffer); } return 0; } int sm_looptest_show_loop_topology(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; uint8_t data[BUF_SZ]; fm_config_interation_data_t interationData; int start; memset(&interationData, 0, sizeof(fm_config_interation_data_t)); interationData.start = start = 1; interationData.index = 0; while (!interationData.done) { memcpy(data, &interationData, sizeof(fm_config_interation_data_t)); if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_LOOP_TEST_SHOW_TOPO, mgr, BUF_SZ, data, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_looptest_show_loop_topology: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); return 0; } if (start) { start = 0; printf("Successfully sent Loop Test topology show to local SM instance\n"); } memcpy(&interationData, data, sizeof(fm_config_interation_data_t)); printf("%s", interationData.intermediateBuffer); } return 0; } int sm_looptest_show_config(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; uint8_t data[BUF_SZ]; if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_LOOP_TEST_SHOW_CONFIG, mgr, BUF_SZ, data, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_looptest_show_config: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent Loop Test configuration show to local SM instance\n"); data[BUF_SZ-1]=0; printf("%s", (char*) data); } return 0; } int sm_force_attribute_rewrite(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; int attrRewrite = 0; if (argc > 1) { printf("Error: only 1 argument expected\n"); return 0; } if (argc == 1) { attrRewrite = atol(argv[0]); if (attrRewrite < 0 || attrRewrite > 1) { printf("Error: attrRewrite must be either 0 (disable) or 1 (enable)\n"); return 0; } } if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_FORCE_ATTRIBUTE_REWRITE, mgr, sizeof(attrRewrite), (void*) &attrRewrite, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_force_attribute_rewrite: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent set to %d of force attribute rewriting to local SM instance\n", attrRewrite); } return 0; } int sm_skip_attr_write(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) { fm_mgr_config_errno_t res; fm_msg_ret_code_t ret_code; unsigned int attrSkip = 0; if (argc > 1) { printf("Error: only 1 argument or less expected\n"); return 0; } if ((argc==0) || ((argc==1) && (strcmp(argv[0],"-help")==0)) ) { printf(" SM SKIP WRITE BITMASKS...\n"); printf(" SM_SKIP_WRITE_PORTINFO 0x00000001 (Includes Port Info)\n"); printf(" SM_SKIP_WRITE_SMINFO 0x00000002 (Includes Sm Info)\n"); printf(" SM_SKIP_WRITE_GUID 0x00000004 (Includes GUID Info\n"); printf(" SM_SKIP_WRITE_SWITCHINFO 0x00000008 (Includes Switch Info\n"); printf(" SM_SKIP_WRITE_SWITCHLTV 0x00000010 (Includes Switch LTV)\n"); printf(" SM_SKIP_WRITE_VLARB 0x00000020 (Includes VLArb Tables/Preempt Tables)\n"); printf(" SM_SKIP_WRITE_MAPS 0x00000040 (Includes SL::SC, SC::SL, SC::VL)\n"); printf(" SM_SKIP_WRITE_LFT 0x00000080 (Includes LFT, MFT)\n"); printf(" SM_SKIP_WRITE_AR 0x00000100 (Includes PG table, PG FDB)\n"); printf(" SM_SKIP_WRITE_PKEY 0x00000200\n"); printf(" SM_SKIP_WRITE_CONG 0x00000400 (Includes HFI / Switch congestion)\n"); printf(" SM_SKIP_WRITE_BFRCTRL 0x00000800\n"); printf(" SM_SKIP_WRITE_NOTICE 0x00001000\n"); return 0; } attrSkip = strtol(argv[0],NULL,0); if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_SKIP_ATTRIBUTE_WRITE, mgr, sizeof(attrSkip), (void*) &attrSkip, &ret_code)) != FM_CONF_OK) { fprintf(stderr, "sm_skip_attr_write: Failed to retrieve data: \n" "\tError:(%d) %s \n\tRet code:(%d) %s\n", res, fm_mgr_get_error_str(res),ret_code, fm_mgr_get_resp_error_str(ret_code)); } else { printf("Successfully sent set to 0x%x of skip write to local SM instance\n", attrSkip); } return 0; } int main(int argc, char *argv[]) { p_fm_config_conx_hdlt hdl; int instance = 0; fm_mgr_config_errno_t res; char *rem_addr = NULL; char *community = "public"; char Opts[256]; int arg; char *command; int i; /* Get options at the command line (overide default values) */ strcpy(Opts, "i:d:h-"); while ((arg = getopt(argc, argv, Opts)) != EOF) { switch (arg) { case 'h': case '-': usage(argv[0]); return(0); case 'i': instance = atol(optarg); break; case 'd': rem_addr = optarg; break; default: usage(argv[0]); return(-1); } } if(optind >= argc){ fprintf(stderr, "Command required\n"); usage(argv[0]); return -1; } command = argv[optind++]; printf("Connecting to %s FM instance %d\n", (rem_addr==NULL) ? "LOCAL":rem_addr, instance); if((res = fm_mgr_config_init(&hdl,instance, rem_addr, community)) != FM_CONF_OK) { fprintf(stderr, "Failed to initialize the client handle: %d\n", res); goto die_clean; } if((res = fm_mgr_config_connect(hdl)) != FM_CONF_OK) { fprintf(stderr, "Failed to connect: (%d) %s\n",res,fm_mgr_get_error_str(res)); goto die_clean; } for(i=0;i<commandListLen;i++){ if(strcmp(command,commandList[i].name) == 0){ return commandList[i].cmdPtr(hdl, commandList[i].mgr, (argc - optind), &argv[optind]); } } fprintf(stderr, "Command (%s) is not valid\n",command); usage(argv[0]); res = -1; die_clean: if (hdl) free(hdl); return res; }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_1664_1
crossvul-cpp_data_bad_2970_0
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * RAW - implementation of IP "raw" sockets. * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * * Fixes: * Alan Cox : verify_area() fixed up * Alan Cox : ICMP error handling * Alan Cox : EMSGSIZE if you send too big a packet * Alan Cox : Now uses generic datagrams and shared * skbuff library. No more peek crashes, * no more backlogs * Alan Cox : Checks sk->broadcast. * Alan Cox : Uses skb_free_datagram/skb_copy_datagram * Alan Cox : Raw passes ip options too * Alan Cox : Setsocketopt added * Alan Cox : Fixed error return for broadcasts * Alan Cox : Removed wake_up calls * Alan Cox : Use ttl/tos * Alan Cox : Cleaned up old debugging * Alan Cox : Use new kernel side addresses * Arnt Gulbrandsen : Fixed MSG_DONTROUTE in raw sockets. * Alan Cox : BSD style RAW socket demultiplexing. * Alan Cox : Beginnings of mrouted support. * Alan Cox : Added IP_HDRINCL option. * Alan Cox : Skip broadcast check if BSDism set. * David S. Miller : New socket lookup architecture. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/types.h> #include <linux/atomic.h> #include <asm/byteorder.h> #include <asm/current.h> #include <linux/uaccess.h> #include <asm/ioctls.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/sockios.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/mroute.h> #include <linux/netdevice.h> #include <linux/in_route.h> #include <linux/route.h> #include <linux/skbuff.h> #include <linux/igmp.h> #include <net/net_namespace.h> #include <net/dst.h> #include <net/sock.h> #include <linux/ip.h> #include <linux/net.h> #include <net/ip.h> #include <net/icmp.h> #include <net/udp.h> #include <net/raw.h> #include <net/snmp.h> #include <net/tcp_states.h> #include <net/inet_common.h> #include <net/checksum.h> #include <net/xfrm.h> #include <linux/rtnetlink.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/compat.h> #include <linux/uio.h> struct raw_frag_vec { struct msghdr *msg; union { struct icmphdr icmph; char c[1]; } hdr; int hlen; }; struct raw_hashinfo raw_v4_hashinfo = { .lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock), }; EXPORT_SYMBOL_GPL(raw_v4_hashinfo); int raw_hash_sk(struct sock *sk) { struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; struct hlist_head *head; head = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)]; write_lock_bh(&h->lock); sk_add_node(sk, head); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); write_unlock_bh(&h->lock); return 0; } EXPORT_SYMBOL_GPL(raw_hash_sk); void raw_unhash_sk(struct sock *sk) { struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; write_lock_bh(&h->lock); if (sk_del_node_init(sk)) sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); write_unlock_bh(&h->lock); } EXPORT_SYMBOL_GPL(raw_unhash_sk); struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, unsigned short num, __be32 raddr, __be32 laddr, int dif, int sdif) { sk_for_each_from(sk) { struct inet_sock *inet = inet_sk(sk); if (net_eq(sock_net(sk), net) && inet->inet_num == num && !(inet->inet_daddr && inet->inet_daddr != raddr) && !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif && sk->sk_bound_dev_if != sdif)) goto found; /* gotcha */ } sk = NULL; found: return sk; } EXPORT_SYMBOL_GPL(__raw_v4_lookup); /* * 0 - deliver * 1 - block */ static int icmp_filter(const struct sock *sk, const struct sk_buff *skb) { struct icmphdr _hdr; const struct icmphdr *hdr; hdr = skb_header_pointer(skb, skb_transport_offset(skb), sizeof(_hdr), &_hdr); if (!hdr) return 1; if (hdr->type < 32) { __u32 data = raw_sk(sk)->filter.data; return ((1U << hdr->type) & data) != 0; } /* Do not block unknown ICMP types */ return 0; } /* IP input processing comes here for RAW socket delivery. * Caller owns SKB, so we must make clones. * * RFC 1122: SHOULD pass TOS value up to the transport layer. * -> It does. And not only TOS, but all IP header. */ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash) { int sdif = inet_sdif(skb); struct sock *sk; struct hlist_head *head; int delivered = 0; struct net *net; read_lock(&raw_v4_hashinfo.lock); head = &raw_v4_hashinfo.ht[hash]; if (hlist_empty(head)) goto out; net = dev_net(skb->dev); sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex, sdif); while (sk) { delivered = 1; if ((iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) && ip_mc_sf_allow(sk, iph->daddr, iph->saddr, skb->dev->ifindex, sdif)) { struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); /* Not releasing hash table! */ if (clone) raw_rcv(sk, clone); } sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex, sdif); } out: read_unlock(&raw_v4_hashinfo.lock); return delivered; } int raw_local_deliver(struct sk_buff *skb, int protocol) { int hash; struct sock *raw_sk; hash = protocol & (RAW_HTABLE_SIZE - 1); raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); /* If there maybe a raw socket we must check - if not we * don't care less */ if (raw_sk && !raw_v4_input(skb, ip_hdr(skb), hash)) raw_sk = NULL; return raw_sk != NULL; } static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info) { struct inet_sock *inet = inet_sk(sk); const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; int err = 0; int harderr = 0; if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) ipv4_sk_update_pmtu(skb, sk, info); else if (type == ICMP_REDIRECT) { ipv4_sk_redirect(skb, sk); return; } /* Report error on raw socket, if: 1. User requested ip_recverr. 2. Socket is connected (otherwise the error indication is useless without ip_recverr and error is hard. */ if (!inet->recverr && sk->sk_state != TCP_ESTABLISHED) return; switch (type) { default: case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; case ICMP_SOURCE_QUENCH: return; case ICMP_PARAMETERPROB: err = EPROTO; harderr = 1; break; case ICMP_DEST_UNREACH: err = EHOSTUNREACH; if (code > NR_ICMP_UNREACH) break; err = icmp_err_convert[code].errno; harderr = icmp_err_convert[code].fatal; if (code == ICMP_FRAG_NEEDED) { harderr = inet->pmtudisc != IP_PMTUDISC_DONT; err = EMSGSIZE; } } if (inet->recverr) { const struct iphdr *iph = (const struct iphdr *)skb->data; u8 *payload = skb->data + (iph->ihl << 2); if (inet->hdrincl) payload = skb->data; ip_icmp_error(sk, skb, err, 0, info, payload); } if (inet->recverr || harderr) { sk->sk_err = err; sk->sk_error_report(sk); } } void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info) { int hash; struct sock *raw_sk; const struct iphdr *iph; struct net *net; hash = protocol & (RAW_HTABLE_SIZE - 1); read_lock(&raw_v4_hashinfo.lock); raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); if (raw_sk) { int dif = skb->dev->ifindex; int sdif = inet_sdif(skb); iph = (const struct iphdr *)skb->data; net = dev_net(skb->dev); while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol, iph->daddr, iph->saddr, dif, sdif)) != NULL) { raw_err(raw_sk, skb, info); raw_sk = sk_next(raw_sk); iph = (const struct iphdr *)skb->data; } } read_unlock(&raw_v4_hashinfo.lock); } static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) { /* Charge it to the socket. */ ipv4_pktinfo_prepare(sk, skb); if (sock_queue_rcv_skb(sk, skb) < 0) { kfree_skb(skb); return NET_RX_DROP; } return NET_RX_SUCCESS; } int raw_rcv(struct sock *sk, struct sk_buff *skb) { if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { atomic_inc(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } nf_reset(skb); skb_push(skb, skb->data - skb_network_header(skb)); raw_rcv_skb(sk, skb); return 0; } static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, struct msghdr *msg, size_t length, struct rtable **rtp, unsigned int flags, const struct sockcm_cookie *sockc) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); struct iphdr *iph; struct sk_buff *skb; unsigned int iphlen; int err; struct rtable *rt = *rtp; int hlen, tlen; if (length > rt->dst.dev->mtu) { ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, rt->dst.dev->mtu); return -EMSGSIZE; } if (length < sizeof(struct iphdr)) return -EINVAL; if (flags&MSG_PROBE) goto out; hlen = LL_RESERVED_SPACE(rt->dst.dev); tlen = rt->dst.dev->needed_tailroom; skb = sock_alloc_send_skb(sk, length + hlen + tlen + 15, flags & MSG_DONTWAIT, &err); if (!skb) goto error; skb_reserve(skb, hlen); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; skb_dst_set(skb, &rt->dst); *rtp = NULL; skb_reset_network_header(skb); iph = ip_hdr(skb); skb_put(skb, length); skb->ip_summed = CHECKSUM_NONE; sock_tx_timestamp(sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); if (flags & MSG_CONFIRM) skb_set_dst_pending_confirm(skb, 1); skb->transport_header = skb->network_header; err = -EFAULT; if (memcpy_from_msg(iph, msg, length)) goto error_free; iphlen = iph->ihl * 4; /* * We don't want to modify the ip header, but we do need to * be sure that it won't cause problems later along the network * stack. Specifically we want to make sure that iph->ihl is a * sane value. If ihl points beyond the length of the buffer passed * in, reject the frame as invalid */ err = -EINVAL; if (iphlen > length) goto error_free; if (iphlen >= sizeof(*iph)) { if (!iph->saddr) iph->saddr = fl4->saddr; iph->check = 0; iph->tot_len = htons(length); if (!iph->id) ip_select_ident(net, skb, NULL); iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); skb->transport_header += iphlen; if (iph->protocol == IPPROTO_ICMP && length >= iphlen + sizeof(struct icmphdr)) icmp_out_count(net, ((struct icmphdr *) skb_transport_header(skb))->type); } err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, rt->dst.dev, dst_output); if (err > 0) err = net_xmit_errno(err); if (err) goto error; out: return 0; error_free: kfree_skb(skb); error: IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); if (err == -ENOBUFS && !inet->recverr) err = 0; return err; } static int raw_probe_proto_opt(struct raw_frag_vec *rfv, struct flowi4 *fl4) { int err; if (fl4->flowi4_proto != IPPROTO_ICMP) return 0; /* We only need the first two bytes. */ rfv->hlen = 2; err = memcpy_from_msg(rfv->hdr.c, rfv->msg, rfv->hlen); if (err) return err; fl4->fl4_icmp_type = rfv->hdr.icmph.type; fl4->fl4_icmp_code = rfv->hdr.icmph.code; return 0; } static int raw_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { struct raw_frag_vec *rfv = from; if (offset < rfv->hlen) { int copy = min(rfv->hlen - offset, len); if (skb->ip_summed == CHECKSUM_PARTIAL) memcpy(to, rfv->hdr.c + offset, copy); else skb->csum = csum_block_add( skb->csum, csum_partial_copy_nocheck(rfv->hdr.c + offset, to, copy, 0), odd); odd = 0; offset += copy; to += copy; len -= copy; if (!len) return 0; } offset -= rfv->hlen; return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb); } static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); struct ipcm_cookie ipc; struct rtable *rt = NULL; struct flowi4 fl4; int free = 0; __be32 daddr; __be32 saddr; u8 tos; int err; struct ip_options_data opt_copy; struct raw_frag_vec rfv; err = -EMSGSIZE; if (len > 0xFFFF) goto out; /* * Check the flags. */ err = -EOPNOTSUPP; if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */ goto out; /* compatibility */ /* * Get and verify the address. */ if (msg->msg_namelen) { DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); err = -EINVAL; if (msg->msg_namelen < sizeof(*usin)) goto out; if (usin->sin_family != AF_INET) { pr_info_once("%s: %s forgot to set AF_INET. Fix it!\n", __func__, current->comm); err = -EAFNOSUPPORT; if (usin->sin_family) goto out; } daddr = usin->sin_addr.s_addr; /* ANK: I did not forget to get protocol from port field. * I just do not know, who uses this weirdness. * IP_HDRINCL is much more convenient. */ } else { err = -EDESTADDRREQ; if (sk->sk_state != TCP_ESTABLISHED) goto out; daddr = inet->inet_daddr; } ipc.sockc.tsflags = sk->sk_tsflags; ipc.addr = inet->inet_saddr; ipc.opt = NULL; ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; ipc.oif = sk->sk_bound_dev_if; if (msg->msg_controllen) { err = ip_cmsg_send(sk, msg, &ipc, false); if (unlikely(err)) { kfree(ipc.opt); goto out; } if (ipc.opt) free = 1; } saddr = ipc.addr; ipc.addr = daddr; if (!ipc.opt) { struct ip_options_rcu *inet_opt; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt) { memcpy(&opt_copy, inet_opt, sizeof(*inet_opt) + inet_opt->opt.optlen); ipc.opt = &opt_copy.opt; } rcu_read_unlock(); } if (ipc.opt) { err = -EINVAL; /* Linux does not mangle headers on raw sockets, * so that IP options + IP_HDRINCL is non-sense. */ if (inet->hdrincl) goto done; if (ipc.opt->opt.srr) { if (!daddr) goto done; daddr = ipc.opt->opt.faddr; } } tos = get_rtconn_flags(&ipc, sk); if (msg->msg_flags & MSG_DONTROUTE) tos |= RTO_ONLINK; if (ipv4_is_multicast(daddr)) { if (!ipc.oif) ipc.oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; } else if (!ipc.oif) ipc.oif = inet->uc_index; flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, inet_sk_flowi_flags(sk) | (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), daddr, saddr, 0, 0, sk->sk_uid); if (!inet->hdrincl) { rfv.msg = msg; rfv.hlen = 0; err = raw_probe_proto_opt(&rfv, &fl4); if (err) goto done; } security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); rt = ip_route_output_flow(net, &fl4, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto done; } err = -EACCES; if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST)) goto done; if (msg->msg_flags & MSG_CONFIRM) goto do_confirm; back_from_confirm: if (inet->hdrincl) err = raw_send_hdrinc(sk, &fl4, msg, len, &rt, msg->msg_flags, &ipc.sockc); else { sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); if (!ipc.addr) ipc.addr = fl4.daddr; lock_sock(sk); err = ip_append_data(sk, &fl4, raw_getfrag, &rfv, len, 0, &ipc, &rt, msg->msg_flags); if (err) ip_flush_pending_frames(sk); else if (!(msg->msg_flags & MSG_MORE)) { err = ip_push_pending_frames(sk, &fl4); if (err == -ENOBUFS && !inet->recverr) err = 0; } release_sock(sk); } done: if (free) kfree(ipc.opt); ip_rt_put(rt); out: if (err < 0) return err; return len; do_confirm: if (msg->msg_flags & MSG_PROBE) dst_confirm_neigh(&rt->dst, &fl4.daddr); if (!(msg->msg_flags & MSG_PROBE) || len) goto back_from_confirm; err = 0; goto done; } static void raw_close(struct sock *sk, long timeout) { /* * Raw sockets may have direct kernel references. Kill them. */ rtnl_lock(); ip_ra_control(sk, 0, NULL); rtnl_unlock(); sk_common_release(sk); } static void raw_destroy(struct sock *sk) { lock_sock(sk); ip_flush_pending_frames(sk); release_sock(sk); } /* This gets rid of all the nasties in af_inet. -DaveM */ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *addr = (struct sockaddr_in *) uaddr; u32 tb_id = RT_TABLE_LOCAL; int ret = -EINVAL; int chk_addr_ret; if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in)) goto out; if (sk->sk_bound_dev_if) tb_id = l3mdev_fib_table_by_index(sock_net(sk), sk->sk_bound_dev_if) ? : tb_id; chk_addr_ret = inet_addr_type_table(sock_net(sk), addr->sin_addr.s_addr, tb_id); ret = -EADDRNOTAVAIL; if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL && chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) goto out; inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr; if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ sk_dst_reset(sk); ret = 0; out: return ret; } /* * This should be easy, if there is something there * we return it, otherwise we block. */ static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (flags & MSG_ERRQUEUE) { err = ip_recv_error(sk, msg, len, addr_len); goto out; } skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto done; sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: if (err) return err; return copied; } static int raw_init(struct sock *sk) { struct raw_sock *rp = raw_sk(sk); if (inet_sk(sk)->inet_num == IPPROTO_ICMP) memset(&rp->filter, 0, sizeof(rp->filter)); return 0; } static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen) { if (optlen > sizeof(struct icmp_filter)) optlen = sizeof(struct icmp_filter); if (copy_from_user(&raw_sk(sk)->filter, optval, optlen)) return -EFAULT; return 0; } static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen) { int len, ret = -EFAULT; if (get_user(len, optlen)) goto out; ret = -EINVAL; if (len < 0) goto out; if (len > sizeof(struct icmp_filter)) len = sizeof(struct icmp_filter); ret = -EFAULT; if (put_user(len, optlen) || copy_to_user(optval, &raw_sk(sk)->filter, len)) goto out; ret = 0; out: return ret; } static int do_raw_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (optname == ICMP_FILTER) { if (inet_sk(sk)->inet_num != IPPROTO_ICMP) return -EOPNOTSUPP; else return raw_seticmpfilter(sk, optval, optlen); } return -ENOPROTOOPT; } static int raw_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level != SOL_RAW) return ip_setsockopt(sk, level, optname, optval, optlen); return do_raw_setsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT static int compat_raw_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level != SOL_RAW) return compat_ip_setsockopt(sk, level, optname, optval, optlen); return do_raw_setsockopt(sk, level, optname, optval, optlen); } #endif static int do_raw_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (optname == ICMP_FILTER) { if (inet_sk(sk)->inet_num != IPPROTO_ICMP) return -EOPNOTSUPP; else return raw_geticmpfilter(sk, optval, optlen); } return -ENOPROTOOPT; } static int raw_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level != SOL_RAW) return ip_getsockopt(sk, level, optname, optval, optlen); return do_raw_getsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT static int compat_raw_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level != SOL_RAW) return compat_ip_getsockopt(sk, level, optname, optval, optlen); return do_raw_getsockopt(sk, level, optname, optval, optlen); } #endif static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg) { switch (cmd) { case SIOCOUTQ: { int amount = sk_wmem_alloc_get(sk); return put_user(amount, (int __user *)arg); } case SIOCINQ: { struct sk_buff *skb; int amount = 0; spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) amount = skb->len; spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } default: #ifdef CONFIG_IP_MROUTE return ipmr_ioctl(sk, cmd, (void __user *)arg); #else return -ENOIOCTLCMD; #endif } } #ifdef CONFIG_COMPAT static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) { switch (cmd) { case SIOCOUTQ: case SIOCINQ: return -ENOIOCTLCMD; default: #ifdef CONFIG_IP_MROUTE return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg)); #else return -ENOIOCTLCMD; #endif } } #endif int raw_abort(struct sock *sk, int err) { lock_sock(sk); sk->sk_err = err; sk->sk_error_report(sk); __udp_disconnect(sk, 0); release_sock(sk); return 0; } EXPORT_SYMBOL_GPL(raw_abort); struct proto raw_prot = { .name = "RAW", .owner = THIS_MODULE, .close = raw_close, .destroy = raw_destroy, .connect = ip4_datagram_connect, .disconnect = __udp_disconnect, .ioctl = raw_ioctl, .init = raw_init, .setsockopt = raw_setsockopt, .getsockopt = raw_getsockopt, .sendmsg = raw_sendmsg, .recvmsg = raw_recvmsg, .bind = raw_bind, .backlog_rcv = raw_rcv_skb, .release_cb = ip4_datagram_release_cb, .hash = raw_hash_sk, .unhash = raw_unhash_sk, .obj_size = sizeof(struct raw_sock), .h.raw_hash = &raw_v4_hashinfo, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_raw_setsockopt, .compat_getsockopt = compat_raw_getsockopt, .compat_ioctl = compat_raw_ioctl, #endif .diag_destroy = raw_abort, }; #ifdef CONFIG_PROC_FS static struct sock *raw_get_first(struct seq_file *seq) { struct sock *sk; struct raw_iter_state *state = raw_seq_private(seq); for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE; ++state->bucket) { sk_for_each(sk, &state->h->ht[state->bucket]) if (sock_net(sk) == seq_file_net(seq)) goto found; } sk = NULL; found: return sk; } static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk) { struct raw_iter_state *state = raw_seq_private(seq); do { sk = sk_next(sk); try_again: ; } while (sk && sock_net(sk) != seq_file_net(seq)); if (!sk && ++state->bucket < RAW_HTABLE_SIZE) { sk = sk_head(&state->h->ht[state->bucket]); goto try_again; } return sk; } static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos) { struct sock *sk = raw_get_first(seq); if (sk) while (pos && (sk = raw_get_next(seq, sk)) != NULL) --pos; return pos ? NULL : sk; } void *raw_seq_start(struct seq_file *seq, loff_t *pos) { struct raw_iter_state *state = raw_seq_private(seq); read_lock(&state->h->lock); return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } EXPORT_SYMBOL_GPL(raw_seq_start); void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *sk; if (v == SEQ_START_TOKEN) sk = raw_get_first(seq); else sk = raw_get_next(seq, v); ++*pos; return sk; } EXPORT_SYMBOL_GPL(raw_seq_next); void raw_seq_stop(struct seq_file *seq, void *v) { struct raw_iter_state *state = raw_seq_private(seq); read_unlock(&state->h->lock); } EXPORT_SYMBOL_GPL(raw_seq_stop); static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) { struct inet_sock *inet = inet_sk(sp); __be32 dest = inet->inet_daddr, src = inet->inet_rcv_saddr; __u16 destp = 0, srcp = inet->inet_num; seq_printf(seq, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n", i, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), 0, sock_i_ino(sp), refcount_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); } static int raw_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode ref pointer drops\n"); else raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); return 0; } static const struct seq_operations raw_seq_ops = { .start = raw_seq_start, .next = raw_seq_next, .stop = raw_seq_stop, .show = raw_seq_show, }; int raw_seq_open(struct inode *ino, struct file *file, struct raw_hashinfo *h, const struct seq_operations *ops) { int err; struct raw_iter_state *i; err = seq_open_net(ino, file, ops, sizeof(struct raw_iter_state)); if (err < 0) return err; i = raw_seq_private((struct seq_file *)file->private_data); i->h = h; return 0; } EXPORT_SYMBOL_GPL(raw_seq_open); static int raw_v4_seq_open(struct inode *inode, struct file *file) { return raw_seq_open(inode, file, &raw_v4_hashinfo, &raw_seq_ops); } static const struct file_operations raw_seq_fops = { .owner = THIS_MODULE, .open = raw_v4_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; static __net_init int raw_init_net(struct net *net) { if (!proc_create("raw", S_IRUGO, net->proc_net, &raw_seq_fops)) return -ENOMEM; return 0; } static __net_exit void raw_exit_net(struct net *net) { remove_proc_entry("raw", net->proc_net); } static __net_initdata struct pernet_operations raw_net_ops = { .init = raw_init_net, .exit = raw_exit_net, }; int __init raw_proc_init(void) { return register_pernet_subsys(&raw_net_ops); } void __init raw_proc_exit(void) { unregister_pernet_subsys(&raw_net_ops); } #endif /* CONFIG_PROC_FS */
./CrossVul/dataset_final_sorted/CWE-362/c/bad_2970_0
crossvul-cpp_data_good_2970_0
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * RAW - implementation of IP "raw" sockets. * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * * Fixes: * Alan Cox : verify_area() fixed up * Alan Cox : ICMP error handling * Alan Cox : EMSGSIZE if you send too big a packet * Alan Cox : Now uses generic datagrams and shared * skbuff library. No more peek crashes, * no more backlogs * Alan Cox : Checks sk->broadcast. * Alan Cox : Uses skb_free_datagram/skb_copy_datagram * Alan Cox : Raw passes ip options too * Alan Cox : Setsocketopt added * Alan Cox : Fixed error return for broadcasts * Alan Cox : Removed wake_up calls * Alan Cox : Use ttl/tos * Alan Cox : Cleaned up old debugging * Alan Cox : Use new kernel side addresses * Arnt Gulbrandsen : Fixed MSG_DONTROUTE in raw sockets. * Alan Cox : BSD style RAW socket demultiplexing. * Alan Cox : Beginnings of mrouted support. * Alan Cox : Added IP_HDRINCL option. * Alan Cox : Skip broadcast check if BSDism set. * David S. Miller : New socket lookup architecture. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/types.h> #include <linux/atomic.h> #include <asm/byteorder.h> #include <asm/current.h> #include <linux/uaccess.h> #include <asm/ioctls.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/sockios.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/mroute.h> #include <linux/netdevice.h> #include <linux/in_route.h> #include <linux/route.h> #include <linux/skbuff.h> #include <linux/igmp.h> #include <net/net_namespace.h> #include <net/dst.h> #include <net/sock.h> #include <linux/ip.h> #include <linux/net.h> #include <net/ip.h> #include <net/icmp.h> #include <net/udp.h> #include <net/raw.h> #include <net/snmp.h> #include <net/tcp_states.h> #include <net/inet_common.h> #include <net/checksum.h> #include <net/xfrm.h> #include <linux/rtnetlink.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/compat.h> #include <linux/uio.h> struct raw_frag_vec { struct msghdr *msg; union { struct icmphdr icmph; char c[1]; } hdr; int hlen; }; struct raw_hashinfo raw_v4_hashinfo = { .lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock), }; EXPORT_SYMBOL_GPL(raw_v4_hashinfo); int raw_hash_sk(struct sock *sk) { struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; struct hlist_head *head; head = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)]; write_lock_bh(&h->lock); sk_add_node(sk, head); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); write_unlock_bh(&h->lock); return 0; } EXPORT_SYMBOL_GPL(raw_hash_sk); void raw_unhash_sk(struct sock *sk) { struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; write_lock_bh(&h->lock); if (sk_del_node_init(sk)) sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); write_unlock_bh(&h->lock); } EXPORT_SYMBOL_GPL(raw_unhash_sk); struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, unsigned short num, __be32 raddr, __be32 laddr, int dif, int sdif) { sk_for_each_from(sk) { struct inet_sock *inet = inet_sk(sk); if (net_eq(sock_net(sk), net) && inet->inet_num == num && !(inet->inet_daddr && inet->inet_daddr != raddr) && !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif && sk->sk_bound_dev_if != sdif)) goto found; /* gotcha */ } sk = NULL; found: return sk; } EXPORT_SYMBOL_GPL(__raw_v4_lookup); /* * 0 - deliver * 1 - block */ static int icmp_filter(const struct sock *sk, const struct sk_buff *skb) { struct icmphdr _hdr; const struct icmphdr *hdr; hdr = skb_header_pointer(skb, skb_transport_offset(skb), sizeof(_hdr), &_hdr); if (!hdr) return 1; if (hdr->type < 32) { __u32 data = raw_sk(sk)->filter.data; return ((1U << hdr->type) & data) != 0; } /* Do not block unknown ICMP types */ return 0; } /* IP input processing comes here for RAW socket delivery. * Caller owns SKB, so we must make clones. * * RFC 1122: SHOULD pass TOS value up to the transport layer. * -> It does. And not only TOS, but all IP header. */ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash) { int sdif = inet_sdif(skb); struct sock *sk; struct hlist_head *head; int delivered = 0; struct net *net; read_lock(&raw_v4_hashinfo.lock); head = &raw_v4_hashinfo.ht[hash]; if (hlist_empty(head)) goto out; net = dev_net(skb->dev); sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex, sdif); while (sk) { delivered = 1; if ((iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) && ip_mc_sf_allow(sk, iph->daddr, iph->saddr, skb->dev->ifindex, sdif)) { struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); /* Not releasing hash table! */ if (clone) raw_rcv(sk, clone); } sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex, sdif); } out: read_unlock(&raw_v4_hashinfo.lock); return delivered; } int raw_local_deliver(struct sk_buff *skb, int protocol) { int hash; struct sock *raw_sk; hash = protocol & (RAW_HTABLE_SIZE - 1); raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); /* If there maybe a raw socket we must check - if not we * don't care less */ if (raw_sk && !raw_v4_input(skb, ip_hdr(skb), hash)) raw_sk = NULL; return raw_sk != NULL; } static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info) { struct inet_sock *inet = inet_sk(sk); const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; int err = 0; int harderr = 0; if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) ipv4_sk_update_pmtu(skb, sk, info); else if (type == ICMP_REDIRECT) { ipv4_sk_redirect(skb, sk); return; } /* Report error on raw socket, if: 1. User requested ip_recverr. 2. Socket is connected (otherwise the error indication is useless without ip_recverr and error is hard. */ if (!inet->recverr && sk->sk_state != TCP_ESTABLISHED) return; switch (type) { default: case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; case ICMP_SOURCE_QUENCH: return; case ICMP_PARAMETERPROB: err = EPROTO; harderr = 1; break; case ICMP_DEST_UNREACH: err = EHOSTUNREACH; if (code > NR_ICMP_UNREACH) break; err = icmp_err_convert[code].errno; harderr = icmp_err_convert[code].fatal; if (code == ICMP_FRAG_NEEDED) { harderr = inet->pmtudisc != IP_PMTUDISC_DONT; err = EMSGSIZE; } } if (inet->recverr) { const struct iphdr *iph = (const struct iphdr *)skb->data; u8 *payload = skb->data + (iph->ihl << 2); if (inet->hdrincl) payload = skb->data; ip_icmp_error(sk, skb, err, 0, info, payload); } if (inet->recverr || harderr) { sk->sk_err = err; sk->sk_error_report(sk); } } void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info) { int hash; struct sock *raw_sk; const struct iphdr *iph; struct net *net; hash = protocol & (RAW_HTABLE_SIZE - 1); read_lock(&raw_v4_hashinfo.lock); raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); if (raw_sk) { int dif = skb->dev->ifindex; int sdif = inet_sdif(skb); iph = (const struct iphdr *)skb->data; net = dev_net(skb->dev); while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol, iph->daddr, iph->saddr, dif, sdif)) != NULL) { raw_err(raw_sk, skb, info); raw_sk = sk_next(raw_sk); iph = (const struct iphdr *)skb->data; } } read_unlock(&raw_v4_hashinfo.lock); } static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) { /* Charge it to the socket. */ ipv4_pktinfo_prepare(sk, skb); if (sock_queue_rcv_skb(sk, skb) < 0) { kfree_skb(skb); return NET_RX_DROP; } return NET_RX_SUCCESS; } int raw_rcv(struct sock *sk, struct sk_buff *skb) { if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { atomic_inc(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } nf_reset(skb); skb_push(skb, skb->data - skb_network_header(skb)); raw_rcv_skb(sk, skb); return 0; } static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, struct msghdr *msg, size_t length, struct rtable **rtp, unsigned int flags, const struct sockcm_cookie *sockc) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); struct iphdr *iph; struct sk_buff *skb; unsigned int iphlen; int err; struct rtable *rt = *rtp; int hlen, tlen; if (length > rt->dst.dev->mtu) { ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, rt->dst.dev->mtu); return -EMSGSIZE; } if (length < sizeof(struct iphdr)) return -EINVAL; if (flags&MSG_PROBE) goto out; hlen = LL_RESERVED_SPACE(rt->dst.dev); tlen = rt->dst.dev->needed_tailroom; skb = sock_alloc_send_skb(sk, length + hlen + tlen + 15, flags & MSG_DONTWAIT, &err); if (!skb) goto error; skb_reserve(skb, hlen); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; skb_dst_set(skb, &rt->dst); *rtp = NULL; skb_reset_network_header(skb); iph = ip_hdr(skb); skb_put(skb, length); skb->ip_summed = CHECKSUM_NONE; sock_tx_timestamp(sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); if (flags & MSG_CONFIRM) skb_set_dst_pending_confirm(skb, 1); skb->transport_header = skb->network_header; err = -EFAULT; if (memcpy_from_msg(iph, msg, length)) goto error_free; iphlen = iph->ihl * 4; /* * We don't want to modify the ip header, but we do need to * be sure that it won't cause problems later along the network * stack. Specifically we want to make sure that iph->ihl is a * sane value. If ihl points beyond the length of the buffer passed * in, reject the frame as invalid */ err = -EINVAL; if (iphlen > length) goto error_free; if (iphlen >= sizeof(*iph)) { if (!iph->saddr) iph->saddr = fl4->saddr; iph->check = 0; iph->tot_len = htons(length); if (!iph->id) ip_select_ident(net, skb, NULL); iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); skb->transport_header += iphlen; if (iph->protocol == IPPROTO_ICMP && length >= iphlen + sizeof(struct icmphdr)) icmp_out_count(net, ((struct icmphdr *) skb_transport_header(skb))->type); } err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, rt->dst.dev, dst_output); if (err > 0) err = net_xmit_errno(err); if (err) goto error; out: return 0; error_free: kfree_skb(skb); error: IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); if (err == -ENOBUFS && !inet->recverr) err = 0; return err; } static int raw_probe_proto_opt(struct raw_frag_vec *rfv, struct flowi4 *fl4) { int err; if (fl4->flowi4_proto != IPPROTO_ICMP) return 0; /* We only need the first two bytes. */ rfv->hlen = 2; err = memcpy_from_msg(rfv->hdr.c, rfv->msg, rfv->hlen); if (err) return err; fl4->fl4_icmp_type = rfv->hdr.icmph.type; fl4->fl4_icmp_code = rfv->hdr.icmph.code; return 0; } static int raw_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { struct raw_frag_vec *rfv = from; if (offset < rfv->hlen) { int copy = min(rfv->hlen - offset, len); if (skb->ip_summed == CHECKSUM_PARTIAL) memcpy(to, rfv->hdr.c + offset, copy); else skb->csum = csum_block_add( skb->csum, csum_partial_copy_nocheck(rfv->hdr.c + offset, to, copy, 0), odd); odd = 0; offset += copy; to += copy; len -= copy; if (!len) return 0; } offset -= rfv->hlen; return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb); } static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); struct ipcm_cookie ipc; struct rtable *rt = NULL; struct flowi4 fl4; int free = 0; __be32 daddr; __be32 saddr; u8 tos; int err; struct ip_options_data opt_copy; struct raw_frag_vec rfv; int hdrincl; err = -EMSGSIZE; if (len > 0xFFFF) goto out; /* hdrincl should be READ_ONCE(inet->hdrincl) * but READ_ONCE() doesn't work with bit fields */ hdrincl = inet->hdrincl; /* * Check the flags. */ err = -EOPNOTSUPP; if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */ goto out; /* compatibility */ /* * Get and verify the address. */ if (msg->msg_namelen) { DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); err = -EINVAL; if (msg->msg_namelen < sizeof(*usin)) goto out; if (usin->sin_family != AF_INET) { pr_info_once("%s: %s forgot to set AF_INET. Fix it!\n", __func__, current->comm); err = -EAFNOSUPPORT; if (usin->sin_family) goto out; } daddr = usin->sin_addr.s_addr; /* ANK: I did not forget to get protocol from port field. * I just do not know, who uses this weirdness. * IP_HDRINCL is much more convenient. */ } else { err = -EDESTADDRREQ; if (sk->sk_state != TCP_ESTABLISHED) goto out; daddr = inet->inet_daddr; } ipc.sockc.tsflags = sk->sk_tsflags; ipc.addr = inet->inet_saddr; ipc.opt = NULL; ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; ipc.oif = sk->sk_bound_dev_if; if (msg->msg_controllen) { err = ip_cmsg_send(sk, msg, &ipc, false); if (unlikely(err)) { kfree(ipc.opt); goto out; } if (ipc.opt) free = 1; } saddr = ipc.addr; ipc.addr = daddr; if (!ipc.opt) { struct ip_options_rcu *inet_opt; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt) { memcpy(&opt_copy, inet_opt, sizeof(*inet_opt) + inet_opt->opt.optlen); ipc.opt = &opt_copy.opt; } rcu_read_unlock(); } if (ipc.opt) { err = -EINVAL; /* Linux does not mangle headers on raw sockets, * so that IP options + IP_HDRINCL is non-sense. */ if (hdrincl) goto done; if (ipc.opt->opt.srr) { if (!daddr) goto done; daddr = ipc.opt->opt.faddr; } } tos = get_rtconn_flags(&ipc, sk); if (msg->msg_flags & MSG_DONTROUTE) tos |= RTO_ONLINK; if (ipv4_is_multicast(daddr)) { if (!ipc.oif) ipc.oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; } else if (!ipc.oif) ipc.oif = inet->uc_index; flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, hdrincl ? IPPROTO_RAW : sk->sk_protocol, inet_sk_flowi_flags(sk) | (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), daddr, saddr, 0, 0, sk->sk_uid); if (!hdrincl) { rfv.msg = msg; rfv.hlen = 0; err = raw_probe_proto_opt(&rfv, &fl4); if (err) goto done; } security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); rt = ip_route_output_flow(net, &fl4, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto done; } err = -EACCES; if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST)) goto done; if (msg->msg_flags & MSG_CONFIRM) goto do_confirm; back_from_confirm: if (hdrincl) err = raw_send_hdrinc(sk, &fl4, msg, len, &rt, msg->msg_flags, &ipc.sockc); else { sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); if (!ipc.addr) ipc.addr = fl4.daddr; lock_sock(sk); err = ip_append_data(sk, &fl4, raw_getfrag, &rfv, len, 0, &ipc, &rt, msg->msg_flags); if (err) ip_flush_pending_frames(sk); else if (!(msg->msg_flags & MSG_MORE)) { err = ip_push_pending_frames(sk, &fl4); if (err == -ENOBUFS && !inet->recverr) err = 0; } release_sock(sk); } done: if (free) kfree(ipc.opt); ip_rt_put(rt); out: if (err < 0) return err; return len; do_confirm: if (msg->msg_flags & MSG_PROBE) dst_confirm_neigh(&rt->dst, &fl4.daddr); if (!(msg->msg_flags & MSG_PROBE) || len) goto back_from_confirm; err = 0; goto done; } static void raw_close(struct sock *sk, long timeout) { /* * Raw sockets may have direct kernel references. Kill them. */ rtnl_lock(); ip_ra_control(sk, 0, NULL); rtnl_unlock(); sk_common_release(sk); } static void raw_destroy(struct sock *sk) { lock_sock(sk); ip_flush_pending_frames(sk); release_sock(sk); } /* This gets rid of all the nasties in af_inet. -DaveM */ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *addr = (struct sockaddr_in *) uaddr; u32 tb_id = RT_TABLE_LOCAL; int ret = -EINVAL; int chk_addr_ret; if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in)) goto out; if (sk->sk_bound_dev_if) tb_id = l3mdev_fib_table_by_index(sock_net(sk), sk->sk_bound_dev_if) ? : tb_id; chk_addr_ret = inet_addr_type_table(sock_net(sk), addr->sin_addr.s_addr, tb_id); ret = -EADDRNOTAVAIL; if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL && chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) goto out; inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr; if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ sk_dst_reset(sk); ret = 0; out: return ret; } /* * This should be easy, if there is something there * we return it, otherwise we block. */ static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (flags & MSG_ERRQUEUE) { err = ip_recv_error(sk, msg, len, addr_len); goto out; } skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto done; sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: if (err) return err; return copied; } static int raw_init(struct sock *sk) { struct raw_sock *rp = raw_sk(sk); if (inet_sk(sk)->inet_num == IPPROTO_ICMP) memset(&rp->filter, 0, sizeof(rp->filter)); return 0; } static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen) { if (optlen > sizeof(struct icmp_filter)) optlen = sizeof(struct icmp_filter); if (copy_from_user(&raw_sk(sk)->filter, optval, optlen)) return -EFAULT; return 0; } static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen) { int len, ret = -EFAULT; if (get_user(len, optlen)) goto out; ret = -EINVAL; if (len < 0) goto out; if (len > sizeof(struct icmp_filter)) len = sizeof(struct icmp_filter); ret = -EFAULT; if (put_user(len, optlen) || copy_to_user(optval, &raw_sk(sk)->filter, len)) goto out; ret = 0; out: return ret; } static int do_raw_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (optname == ICMP_FILTER) { if (inet_sk(sk)->inet_num != IPPROTO_ICMP) return -EOPNOTSUPP; else return raw_seticmpfilter(sk, optval, optlen); } return -ENOPROTOOPT; } static int raw_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level != SOL_RAW) return ip_setsockopt(sk, level, optname, optval, optlen); return do_raw_setsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT static int compat_raw_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level != SOL_RAW) return compat_ip_setsockopt(sk, level, optname, optval, optlen); return do_raw_setsockopt(sk, level, optname, optval, optlen); } #endif static int do_raw_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (optname == ICMP_FILTER) { if (inet_sk(sk)->inet_num != IPPROTO_ICMP) return -EOPNOTSUPP; else return raw_geticmpfilter(sk, optval, optlen); } return -ENOPROTOOPT; } static int raw_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level != SOL_RAW) return ip_getsockopt(sk, level, optname, optval, optlen); return do_raw_getsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT static int compat_raw_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level != SOL_RAW) return compat_ip_getsockopt(sk, level, optname, optval, optlen); return do_raw_getsockopt(sk, level, optname, optval, optlen); } #endif static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg) { switch (cmd) { case SIOCOUTQ: { int amount = sk_wmem_alloc_get(sk); return put_user(amount, (int __user *)arg); } case SIOCINQ: { struct sk_buff *skb; int amount = 0; spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) amount = skb->len; spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } default: #ifdef CONFIG_IP_MROUTE return ipmr_ioctl(sk, cmd, (void __user *)arg); #else return -ENOIOCTLCMD; #endif } } #ifdef CONFIG_COMPAT static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) { switch (cmd) { case SIOCOUTQ: case SIOCINQ: return -ENOIOCTLCMD; default: #ifdef CONFIG_IP_MROUTE return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg)); #else return -ENOIOCTLCMD; #endif } } #endif int raw_abort(struct sock *sk, int err) { lock_sock(sk); sk->sk_err = err; sk->sk_error_report(sk); __udp_disconnect(sk, 0); release_sock(sk); return 0; } EXPORT_SYMBOL_GPL(raw_abort); struct proto raw_prot = { .name = "RAW", .owner = THIS_MODULE, .close = raw_close, .destroy = raw_destroy, .connect = ip4_datagram_connect, .disconnect = __udp_disconnect, .ioctl = raw_ioctl, .init = raw_init, .setsockopt = raw_setsockopt, .getsockopt = raw_getsockopt, .sendmsg = raw_sendmsg, .recvmsg = raw_recvmsg, .bind = raw_bind, .backlog_rcv = raw_rcv_skb, .release_cb = ip4_datagram_release_cb, .hash = raw_hash_sk, .unhash = raw_unhash_sk, .obj_size = sizeof(struct raw_sock), .h.raw_hash = &raw_v4_hashinfo, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_raw_setsockopt, .compat_getsockopt = compat_raw_getsockopt, .compat_ioctl = compat_raw_ioctl, #endif .diag_destroy = raw_abort, }; #ifdef CONFIG_PROC_FS static struct sock *raw_get_first(struct seq_file *seq) { struct sock *sk; struct raw_iter_state *state = raw_seq_private(seq); for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE; ++state->bucket) { sk_for_each(sk, &state->h->ht[state->bucket]) if (sock_net(sk) == seq_file_net(seq)) goto found; } sk = NULL; found: return sk; } static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk) { struct raw_iter_state *state = raw_seq_private(seq); do { sk = sk_next(sk); try_again: ; } while (sk && sock_net(sk) != seq_file_net(seq)); if (!sk && ++state->bucket < RAW_HTABLE_SIZE) { sk = sk_head(&state->h->ht[state->bucket]); goto try_again; } return sk; } static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos) { struct sock *sk = raw_get_first(seq); if (sk) while (pos && (sk = raw_get_next(seq, sk)) != NULL) --pos; return pos ? NULL : sk; } void *raw_seq_start(struct seq_file *seq, loff_t *pos) { struct raw_iter_state *state = raw_seq_private(seq); read_lock(&state->h->lock); return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } EXPORT_SYMBOL_GPL(raw_seq_start); void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *sk; if (v == SEQ_START_TOKEN) sk = raw_get_first(seq); else sk = raw_get_next(seq, v); ++*pos; return sk; } EXPORT_SYMBOL_GPL(raw_seq_next); void raw_seq_stop(struct seq_file *seq, void *v) { struct raw_iter_state *state = raw_seq_private(seq); read_unlock(&state->h->lock); } EXPORT_SYMBOL_GPL(raw_seq_stop); static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) { struct inet_sock *inet = inet_sk(sp); __be32 dest = inet->inet_daddr, src = inet->inet_rcv_saddr; __u16 destp = 0, srcp = inet->inet_num; seq_printf(seq, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n", i, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), 0, sock_i_ino(sp), refcount_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); } static int raw_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode ref pointer drops\n"); else raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); return 0; } static const struct seq_operations raw_seq_ops = { .start = raw_seq_start, .next = raw_seq_next, .stop = raw_seq_stop, .show = raw_seq_show, }; int raw_seq_open(struct inode *ino, struct file *file, struct raw_hashinfo *h, const struct seq_operations *ops) { int err; struct raw_iter_state *i; err = seq_open_net(ino, file, ops, sizeof(struct raw_iter_state)); if (err < 0) return err; i = raw_seq_private((struct seq_file *)file->private_data); i->h = h; return 0; } EXPORT_SYMBOL_GPL(raw_seq_open); static int raw_v4_seq_open(struct inode *inode, struct file *file) { return raw_seq_open(inode, file, &raw_v4_hashinfo, &raw_seq_ops); } static const struct file_operations raw_seq_fops = { .owner = THIS_MODULE, .open = raw_v4_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; static __net_init int raw_init_net(struct net *net) { if (!proc_create("raw", S_IRUGO, net->proc_net, &raw_seq_fops)) return -ENOMEM; return 0; } static __net_exit void raw_exit_net(struct net *net) { remove_proc_entry("raw", net->proc_net); } static __net_initdata struct pernet_operations raw_net_ops = { .init = raw_init_net, .exit = raw_exit_net, }; int __init raw_proc_init(void) { return register_pernet_subsys(&raw_net_ops); } void __init raw_proc_exit(void) { unregister_pernet_subsys(&raw_net_ops); } #endif /* CONFIG_PROC_FS */
./CrossVul/dataset_final_sorted/CWE-362/c/good_2970_0
crossvul-cpp_data_good_1578_2
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2003 Intel Corp. * Copyright (c) 2001-2002 Nokia, Inc. * Copyright (c) 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel implementation * * These functions interface with the sockets layer to implement the * SCTP Extensions for the Sockets API. * * Note that the descriptions from the specification are USER level * functions--this file is the functions which populate the struct proto * for SCTP which is the BOTTOM of the sockets interface. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Narasimha Budihal <narsi@refcode.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * Daisy Chang <daisyc@us.ibm.com> * Sridhar Samudrala <samudrala@us.ibm.com> * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> * Ardelle Fan <ardelle.fan@intel.com> * Ryan Layer <rmlayer@us.ibm.com> * Anup Pemmaiah <pemmaiah@cc.usu.edu> * Kevin Gao <kevin.gao@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/kernel.h> #include <linux/wait.h> #include <linux/time.h> #include <linux/ip.h> #include <linux/capability.h> #include <linux/fcntl.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/crypto.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/compat.h> #include <net/ip.h> #include <net/icmp.h> #include <net/route.h> #include <net/ipv6.h> #include <net/inet_common.h> #include <net/busy_poll.h> #include <linux/socket.h> /* for sa_family_t */ #include <linux/export.h> #include <net/sock.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> /* Forward declarations for internal helper functions. */ static int sctp_writeable(struct sock *sk); static void sctp_wfree(struct sk_buff *skb); static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, size_t msg_len); static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); static int sctp_wait_for_accept(struct sock *sk, long timeo); static void sctp_wait_for_close(struct sock *sk, long timeo); static void sctp_destruct_sock(struct sock *sk); static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, union sctp_addr *addr, int len); static int sctp_bindx_add(struct sock *, struct sockaddr *, int); static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); static int sctp_send_asconf(struct sctp_association *asoc, struct sctp_chunk *chunk); static int sctp_do_bind(struct sock *, union sctp_addr *, int); static int sctp_autobind(struct sock *sk); static void sctp_sock_migrate(struct sock *, struct sock *, struct sctp_association *, sctp_socket_type_t); static int sctp_memory_pressure; static atomic_long_t sctp_memory_allocated; struct percpu_counter sctp_sockets_allocated; static void sctp_enter_memory_pressure(struct sock *sk) { sctp_memory_pressure = 1; } /* Get the sndbuf space available at the time on the association. */ static inline int sctp_wspace(struct sctp_association *asoc) { int amt; if (asoc->ep->sndbuf_policy) amt = asoc->sndbuf_used; else amt = sk_wmem_alloc_get(asoc->base.sk); if (amt >= asoc->base.sk->sk_sndbuf) { if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) amt = 0; else { amt = sk_stream_wspace(asoc->base.sk); if (amt < 0) amt = 0; } } else { amt = asoc->base.sk->sk_sndbuf - amt; } return amt; } /* Increment the used sndbuf space count of the corresponding association by * the size of the outgoing data chunk. * Also, set the skb destructor for sndbuf accounting later. * * Since it is always 1-1 between chunk and skb, and also a new skb is always * allocated for chunk bundling in sctp_packet_transmit(), we can use the * destructor in the data chunk skb for the purpose of the sndbuf space * tracking. */ static inline void sctp_set_owner_w(struct sctp_chunk *chunk) { struct sctp_association *asoc = chunk->asoc; struct sock *sk = asoc->base.sk; /* The sndbuf space is tracked per association. */ sctp_association_hold(asoc); skb_set_owner_w(chunk->skb, sk); chunk->skb->destructor = sctp_wfree; /* Save the chunk pointer in skb for sctp_wfree to use later. */ skb_shinfo(chunk->skb)->destructor_arg = chunk; asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + sizeof(struct sk_buff) + sizeof(struct sctp_chunk); atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); sk->sk_wmem_queued += chunk->skb->truesize; sk_mem_charge(sk, chunk->skb->truesize); } /* Verify that this is a valid address. */ static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, int len) { struct sctp_af *af; /* Verify basic sockaddr. */ af = sctp_sockaddr_af(sctp_sk(sk), addr, len); if (!af) return -EINVAL; /* Is this a valid SCTP address? */ if (!af->addr_valid(addr, sctp_sk(sk), NULL)) return -EINVAL; if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) return -EINVAL; return 0; } /* Look up the association by its id. If this is not a UDP-style * socket, the ID field is always ignored. */ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) { struct sctp_association *asoc = NULL; /* If this is not a UDP-style socket, assoc id should be ignored. */ if (!sctp_style(sk, UDP)) { /* Return NULL if the socket state is not ESTABLISHED. It * could be a TCP-style listening socket or a socket which * hasn't yet called connect() to establish an association. */ if (!sctp_sstate(sk, ESTABLISHED)) return NULL; /* Get the first and the only association from the list. */ if (!list_empty(&sctp_sk(sk)->ep->asocs)) asoc = list_entry(sctp_sk(sk)->ep->asocs.next, struct sctp_association, asocs); return asoc; } /* Otherwise this is a UDP-style socket. */ if (!id || (id == (sctp_assoc_t)-1)) return NULL; spin_lock_bh(&sctp_assocs_id_lock); asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); spin_unlock_bh(&sctp_assocs_id_lock); if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) return NULL; return asoc; } /* Look up the transport from an address and an assoc id. If both address and * id are specified, the associations matching the address and the id should be * the same. */ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, struct sockaddr_storage *addr, sctp_assoc_t id) { struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; struct sctp_transport *transport; union sctp_addr *laddr = (union sctp_addr *)addr; addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, laddr, &transport); if (!addr_asoc) return NULL; id_asoc = sctp_id2assoc(sk, id); if (id_asoc && (id_asoc != addr_asoc)) return NULL; sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), (union sctp_addr *)addr); return transport; } /* API 3.1.2 bind() - UDP Style Syntax * The syntax of bind() is, * * ret = bind(int sd, struct sockaddr *addr, int addrlen); * * sd - the socket descriptor returned by socket(). * addr - the address structure (struct sockaddr_in or struct * sockaddr_in6 [RFC 2553]), * addr_len - the size of the address structure. */ static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) { int retval = 0; lock_sock(sk); pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, addr, addr_len); /* Disallow binding twice. */ if (!sctp_sk(sk)->ep->base.bind_addr.port) retval = sctp_do_bind(sk, (union sctp_addr *)addr, addr_len); else retval = -EINVAL; release_sock(sk); return retval; } static long sctp_get_port_local(struct sock *, union sctp_addr *); /* Verify this is a valid sockaddr. */ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, union sctp_addr *addr, int len) { struct sctp_af *af; /* Check minimum size. */ if (len < sizeof (struct sockaddr)) return NULL; /* V4 mapped address are really of AF_INET family */ if (addr->sa.sa_family == AF_INET6 && ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { if (!opt->pf->af_supported(AF_INET, opt)) return NULL; } else { /* Does this PF support this AF? */ if (!opt->pf->af_supported(addr->sa.sa_family, opt)) return NULL; } /* If we get this far, af is valid. */ af = sctp_get_af_specific(addr->sa.sa_family); if (len < af->sockaddr_len) return NULL; return af; } /* Bind a local address either to an endpoint or to an association. */ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) { struct net *net = sock_net(sk); struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; struct sctp_bind_addr *bp = &ep->base.bind_addr; struct sctp_af *af; unsigned short snum; int ret = 0; /* Common sockaddr verification. */ af = sctp_sockaddr_af(sp, addr, len); if (!af) { pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", __func__, sk, addr, len); return -EINVAL; } snum = ntohs(addr->v4.sin_port); pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", __func__, sk, &addr->sa, bp->port, snum, len); /* PF specific bind() address verification. */ if (!sp->pf->bind_verify(sp, addr)) return -EADDRNOTAVAIL; /* We must either be unbound, or bind to the same port. * It's OK to allow 0 ports if we are already bound. * We'll just inhert an already bound port in this case */ if (bp->port) { if (!snum) snum = bp->port; else if (snum != bp->port) { pr_debug("%s: new port %d doesn't match existing port " "%d\n", __func__, snum, bp->port); return -EINVAL; } } if (snum && snum < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) return -EACCES; /* See if the address matches any of the addresses we may have * already bound before checking against other endpoints. */ if (sctp_bind_addr_match(bp, addr, sp)) return -EINVAL; /* Make sure we are allowed to bind here. * The function sctp_get_port_local() does duplicate address * detection. */ addr->v4.sin_port = htons(snum); if ((ret = sctp_get_port_local(sk, addr))) { return -EADDRINUSE; } /* Refresh ephemeral port. */ if (!bp->port) bp->port = inet_sk(sk)->inet_num; /* Add the address to the bind address list. * Use GFP_ATOMIC since BHs will be disabled. */ ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC); /* Copy back into socket for getsockname() use. */ if (!ret) { inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); sp->pf->to_sk_saddr(addr, sk); } return ret; } /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks * * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged * at any one time. If a sender, after sending an ASCONF chunk, decides * it needs to transfer another ASCONF Chunk, it MUST wait until the * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a * subsequent ASCONF. Note this restriction binds each side, so at any * time two ASCONF may be in-transit on any given association (one sent * from each endpoint). */ static int sctp_send_asconf(struct sctp_association *asoc, struct sctp_chunk *chunk) { struct net *net = sock_net(asoc->base.sk); int retval = 0; /* If there is an outstanding ASCONF chunk, queue it for later * transmission. */ if (asoc->addip_last_asconf) { list_add_tail(&chunk->list, &asoc->addip_chunk_list); goto out; } /* Hold the chunk until an ASCONF_ACK is received. */ sctp_chunk_hold(chunk); retval = sctp_primitive_ASCONF(net, asoc, chunk); if (retval) sctp_chunk_free(chunk); else asoc->addip_last_asconf = chunk; out: return retval; } /* Add a list of addresses as bind addresses to local endpoint or * association. * * Basically run through each address specified in the addrs/addrcnt * array/length pair, determine if it is IPv6 or IPv4 and call * sctp_do_bind() on it. * * If any of them fails, then the operation will be reversed and the * ones that were added will be removed. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) { int cnt; int retval = 0; void *addr_buf; struct sockaddr *sa_addr; struct sctp_af *af; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); addr_buf = addrs; for (cnt = 0; cnt < addrcnt; cnt++) { /* The list may contain either IPv4 or IPv6 address; * determine the address length for walking thru the list. */ sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa_family); if (!af) { retval = -EINVAL; goto err_bindx_add; } retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, af->sockaddr_len); addr_buf += af->sockaddr_len; err_bindx_add: if (retval < 0) { /* Failed. Cleanup the ones that have been added */ if (cnt > 0) sctp_bindx_rem(sk, addrs, cnt); return retval; } } return retval; } /* Send an ASCONF chunk with Add IP address parameters to all the peers of the * associations that are part of the endpoint indicating that a list of local * addresses are added to the endpoint. * * If any of the addresses is already in the bind address list of the * association, we do not send the chunk for that association. But it will not * affect other associations. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_send_asconf_add_ip(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc; struct sctp_bind_addr *bp; struct sctp_chunk *chunk; struct sctp_sockaddr_entry *laddr; union sctp_addr *addr; union sctp_addr saveaddr; void *addr_buf; struct sctp_af *af; struct list_head *p; int i; int retval = 0; if (!net->sctp.addip_enable) return retval; sp = sctp_sk(sk); ep = sp->ep; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); list_for_each_entry(asoc, &ep->asocs, asocs) { if (!asoc->peer.asconf_capable) continue; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) continue; if (!sctp_state(asoc, ESTABLISHED)) continue; /* Check if any address in the packed array of addresses is * in the bind address list of the association. If so, * do not send the asconf chunk to its peer, but continue with * other associations. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); if (!af) { retval = -EINVAL; goto out; } if (sctp_assoc_lookup_laddr(asoc, addr)) break; addr_buf += af->sockaddr_len; } if (i < addrcnt) continue; /* Use the first valid address in bind addr list of * association as Address Parameter of ASCONF CHUNK. */ bp = &asoc->base.bind_addr; p = bp->address_list.next; laddr = list_entry(p, struct sctp_sockaddr_entry, list); chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, addrcnt, SCTP_PARAM_ADD_IP); if (!chunk) { retval = -ENOMEM; goto out; } /* Add the new addresses to the bind address list with * use_as_src set to 0. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); memcpy(&saveaddr, addr, af->sockaddr_len); retval = sctp_add_bind_addr(bp, &saveaddr, SCTP_ADDR_NEW, GFP_ATOMIC); addr_buf += af->sockaddr_len; } if (asoc->src_out_of_asoc_ok) { struct sctp_transport *trans; list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { /* Clear the source and route cache */ dst_release(trans->dst); trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); trans->ssthresh = asoc->peer.i.a_rwnd; trans->rto = asoc->rto_initial; sctp_max_rto(asoc, trans); trans->rtt = trans->srtt = trans->rttvar = 0; sctp_transport_route(trans, NULL, sctp_sk(asoc->base.sk)); } } retval = sctp_send_asconf(asoc, chunk); } out: return retval; } /* Remove a list of addresses from bind addresses list. Do not remove the * last address. * * Basically run through each address specified in the addrs/addrcnt * array/length pair, determine if it is IPv6 or IPv4 and call * sctp_del_bind() on it. * * If any of them fails, then the operation will be reversed and the * ones that were removed will be added back. * * At least one address has to be left; if only one address is * available, the operation will return -EBUSY. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; int cnt; struct sctp_bind_addr *bp = &ep->base.bind_addr; int retval = 0; void *addr_buf; union sctp_addr *sa_addr; struct sctp_af *af; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); addr_buf = addrs; for (cnt = 0; cnt < addrcnt; cnt++) { /* If the bind address list is empty or if there is only one * bind address, there is nothing more to be removed (we need * at least one address here). */ if (list_empty(&bp->address_list) || (sctp_list_single_entry(&bp->address_list))) { retval = -EBUSY; goto err_bindx_rem; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa.sa_family); if (!af) { retval = -EINVAL; goto err_bindx_rem; } if (!af->addr_valid(sa_addr, sp, NULL)) { retval = -EADDRNOTAVAIL; goto err_bindx_rem; } if (sa_addr->v4.sin_port && sa_addr->v4.sin_port != htons(bp->port)) { retval = -EINVAL; goto err_bindx_rem; } if (!sa_addr->v4.sin_port) sa_addr->v4.sin_port = htons(bp->port); /* FIXME - There is probably a need to check if sk->sk_saddr and * sk->sk_rcv_addr are currently set to one of the addresses to * be removed. This is something which needs to be looked into * when we are fixing the outstanding issues with multi-homing * socket routing and failover schemes. Refer to comments in * sctp_do_bind(). -daisy */ retval = sctp_del_bind_addr(bp, sa_addr); addr_buf += af->sockaddr_len; err_bindx_rem: if (retval < 0) { /* Failed. Add the ones that has been removed back */ if (cnt > 0) sctp_bindx_add(sk, addrs, cnt); return retval; } } return retval; } /* Send an ASCONF chunk with Delete IP address parameters to all the peers of * the associations that are part of the endpoint indicating that a list of * local addresses are removed from the endpoint. * * If any of the addresses is already in the bind address list of the * association, we do not send the chunk for that association. But it will not * affect other associations. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_send_asconf_del_ip(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc; struct sctp_transport *transport; struct sctp_bind_addr *bp; struct sctp_chunk *chunk; union sctp_addr *laddr; void *addr_buf; struct sctp_af *af; struct sctp_sockaddr_entry *saddr; int i; int retval = 0; int stored = 0; chunk = NULL; if (!net->sctp.addip_enable) return retval; sp = sctp_sk(sk); ep = sp->ep; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); list_for_each_entry(asoc, &ep->asocs, asocs) { if (!asoc->peer.asconf_capable) continue; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) continue; if (!sctp_state(asoc, ESTABLISHED)) continue; /* Check if any address in the packed array of addresses is * not present in the bind address list of the association. * If so, do not send the asconf chunk to its peer, but * continue with other associations. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { laddr = addr_buf; af = sctp_get_af_specific(laddr->v4.sin_family); if (!af) { retval = -EINVAL; goto out; } if (!sctp_assoc_lookup_laddr(asoc, laddr)) break; addr_buf += af->sockaddr_len; } if (i < addrcnt) continue; /* Find one address in the association's bind address list * that is not in the packed array of addresses. This is to * make sure that we do not delete all the addresses in the * association. */ bp = &asoc->base.bind_addr; laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, addrcnt, sp); if ((laddr == NULL) && (addrcnt == 1)) { if (asoc->asconf_addr_del_pending) continue; asoc->asconf_addr_del_pending = kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); if (asoc->asconf_addr_del_pending == NULL) { retval = -ENOMEM; goto out; } asoc->asconf_addr_del_pending->sa.sa_family = addrs->sa_family; asoc->asconf_addr_del_pending->v4.sin_port = htons(bp->port); if (addrs->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addrs; asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; } else if (addrs->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addrs; asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; } pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", __func__, asoc, &asoc->asconf_addr_del_pending->sa, asoc->asconf_addr_del_pending); asoc->src_out_of_asoc_ok = 1; stored = 1; goto skip_mkasconf; } if (laddr == NULL) return -EINVAL; /* We do not need RCU protection throughout this loop * because this is done under a socket lock from the * setsockopt call. */ chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, SCTP_PARAM_DEL_IP); if (!chunk) { retval = -ENOMEM; goto out; } skip_mkasconf: /* Reset use_as_src flag for the addresses in the bind address * list that are to be deleted. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { laddr = addr_buf; af = sctp_get_af_specific(laddr->v4.sin_family); list_for_each_entry(saddr, &bp->address_list, list) { if (sctp_cmp_addr_exact(&saddr->a, laddr)) saddr->state = SCTP_ADDR_DEL; } addr_buf += af->sockaddr_len; } /* Update the route and saddr entries for all the transports * as some of the addresses in the bind address list are * about to be deleted and cannot be used as source addresses. */ list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { dst_release(transport->dst); sctp_transport_route(transport, NULL, sctp_sk(asoc->base.sk)); } if (stored) /* We don't need to transmit ASCONF */ continue; retval = sctp_send_asconf(asoc, chunk); } out: return retval; } /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) { struct sock *sk = sctp_opt2sk(sp); union sctp_addr *addr; struct sctp_af *af; /* It is safe to write port space in caller. */ addr = &addrw->a; addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); af = sctp_get_af_specific(addr->sa.sa_family); if (!af) return -EINVAL; if (sctp_verify_addr(sk, addr, af->sockaddr_len)) return -EINVAL; if (addrw->state == SCTP_ADDR_NEW) return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); else return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); } /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() * * API 8.1 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, * int flags); * * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. * If the sd is an IPv6 socket, the addresses passed can either be IPv4 * or IPv6 addresses. * * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see * Section 3.1.2 for this usage. * * addrs is a pointer to an array of one or more socket addresses. Each * address is contained in its appropriate structure (i.e. struct * sockaddr_in or struct sockaddr_in6) the family of the address type * must be used to distinguish the address length (note that this * representation is termed a "packed array" of addresses). The caller * specifies the number of addresses in the array with addrcnt. * * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns * -1, and sets errno to the appropriate error code. * * For SCTP, the port given in each socket address must be the same, or * sctp_bindx() will fail, setting errno to EINVAL. * * The flags parameter is formed from the bitwise OR of zero or more of * the following currently defined flags: * * SCTP_BINDX_ADD_ADDR * * SCTP_BINDX_REM_ADDR * * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given * addresses from the association. The two flags are mutually exclusive; * if both are given, sctp_bindx() will fail with EINVAL. A caller may * not remove all addresses from an association; sctp_bindx() will * reject such an attempt with EINVAL. * * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate * additional addresses with an endpoint after calling bind(). Or use * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening * socket is associated with so that no new association accepted will be * associated with those addresses. If the endpoint supports dynamic * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a * endpoint to send the appropriate message to the peer to change the * peers address lists. * * Adding and removing addresses from a connected association is * optional functionality. Implementations that do not support this * functionality should return EOPNOTSUPP. * * Basically do nothing but copying the addresses from user to kernel * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() * from userspace. * * We don't use copy_from_user() for optimization: we first do the * sanity checks (buffer size -fast- and access check-healthy * pointer); if all of those succeed, then we can alloc the memory * (expensive operation) needed to copy the data to kernel. Then we do * the copying without checking the user space area * (__copy_from_user()). * * On exit there is no need to do sockfd_put(), sys_setsockopt() does * it. * * sk The sk of the socket * addrs The pointer to the addresses in user land * addrssize Size of the addrs buffer * op Operation to perform (add or remove, see the flags of * sctp_bindx) * * Returns 0 if ok, <0 errno code on error. */ static int sctp_setsockopt_bindx(struct sock *sk, struct sockaddr __user *addrs, int addrs_size, int op) { struct sockaddr *kaddrs; int err; int addrcnt = 0; int walk_size = 0; struct sockaddr *sa_addr; void *addr_buf; struct sctp_af *af; pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", __func__, sk, addrs, addrs_size, op); if (unlikely(addrs_size <= 0)) return -EINVAL; /* Check the user passed a healthy pointer. */ if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) return -EFAULT; /* Alloc space for the address array in kernel memory. */ kaddrs = kmalloc(addrs_size, GFP_KERNEL); if (unlikely(!kaddrs)) return -ENOMEM; if (__copy_from_user(kaddrs, addrs, addrs_size)) { kfree(kaddrs); return -EFAULT; } /* Walk through the addrs buffer and count the number of addresses. */ addr_buf = kaddrs; while (walk_size < addrs_size) { if (walk_size + sizeof(sa_family_t) > addrs_size) { kfree(kaddrs); return -EINVAL; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa_family); /* If the address family is not supported or if this address * causes the address buffer to overflow return EINVAL. */ if (!af || (walk_size + af->sockaddr_len) > addrs_size) { kfree(kaddrs); return -EINVAL; } addrcnt++; addr_buf += af->sockaddr_len; walk_size += af->sockaddr_len; } /* Do the work. */ switch (op) { case SCTP_BINDX_ADD_ADDR: err = sctp_bindx_add(sk, kaddrs, addrcnt); if (err) goto out; err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); break; case SCTP_BINDX_REM_ADDR: err = sctp_bindx_rem(sk, kaddrs, addrcnt); if (err) goto out; err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); break; default: err = -EINVAL; break; } out: kfree(kaddrs); return err; } /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) * * Common routine for handling connect() and sctp_connectx(). * Connect will come in with just a single address. */ static int __sctp_connect(struct sock *sk, struct sockaddr *kaddrs, int addrs_size, sctp_assoc_t *assoc_id) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc = NULL; struct sctp_association *asoc2; struct sctp_transport *transport; union sctp_addr to; sctp_scope_t scope; long timeo; int err = 0; int addrcnt = 0; int walk_size = 0; union sctp_addr *sa_addr = NULL; void *addr_buf; unsigned short port; unsigned int f_flags = 0; sp = sctp_sk(sk); ep = sp->ep; /* connect() cannot be done on a socket that is already in ESTABLISHED * state - UDP-style peeled off socket or a TCP-style socket that * is already connected. * It cannot be done even on a TCP-style listening socket. */ if (sctp_sstate(sk, ESTABLISHED) || (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { err = -EISCONN; goto out_free; } /* Walk through the addrs buffer and count the number of addresses. */ addr_buf = kaddrs; while (walk_size < addrs_size) { struct sctp_af *af; if (walk_size + sizeof(sa_family_t) > addrs_size) { err = -EINVAL; goto out_free; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa.sa_family); /* If the address family is not supported or if this address * causes the address buffer to overflow return EINVAL. */ if (!af || (walk_size + af->sockaddr_len) > addrs_size) { err = -EINVAL; goto out_free; } port = ntohs(sa_addr->v4.sin_port); /* Save current address so we can work with it */ memcpy(&to, sa_addr, af->sockaddr_len); err = sctp_verify_addr(sk, &to, af->sockaddr_len); if (err) goto out_free; /* Make sure the destination port is correctly set * in all addresses. */ if (asoc && asoc->peer.port && asoc->peer.port != port) { err = -EINVAL; goto out_free; } /* Check if there already is a matching association on the * endpoint (other than the one created here). */ asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); if (asoc2 && asoc2 != asoc) { if (asoc2->state >= SCTP_STATE_ESTABLISHED) err = -EISCONN; else err = -EALREADY; goto out_free; } /* If we could not find a matching association on the endpoint, * make sure that there is no peeled-off association matching * the peer address even on another socket. */ if (sctp_endpoint_is_peeled_off(ep, &to)) { err = -EADDRNOTAVAIL; goto out_free; } if (!asoc) { /* If a bind() or sctp_bindx() is not called prior to * an sctp_connectx() call, the system picks an * ephemeral port and will choose an address set * equivalent to binding with a wildcard address. */ if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) { err = -EAGAIN; goto out_free; } } else { /* * If an unprivileged user inherits a 1-many * style socket with open associations on a * privileged port, it MAY be permitted to * accept new associations, but it SHOULD NOT * be permitted to open new associations. */ if (ep->base.bind_addr.port < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { err = -EACCES; goto out_free; } } scope = sctp_scope(&to); asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); if (!asoc) { err = -ENOMEM; goto out_free; } err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); if (err < 0) { goto out_free; } } /* Prime the peer's transport structures. */ transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); if (!transport) { err = -ENOMEM; goto out_free; } addrcnt++; addr_buf += af->sockaddr_len; walk_size += af->sockaddr_len; } /* In case the user of sctp_connectx() wants an association * id back, assign one now. */ if (assoc_id) { err = sctp_assoc_set_id(asoc, GFP_KERNEL); if (err < 0) goto out_free; } err = sctp_primitive_ASSOCIATE(net, asoc, NULL); if (err < 0) { goto out_free; } /* Initialize sk's dport and daddr for getpeername() */ inet_sk(sk)->inet_dport = htons(asoc->peer.port); sp->pf->to_sk_daddr(sa_addr, sk); sk->sk_err = 0; /* in-kernel sockets don't generally have a file allocated to them * if all they do is call sock_create_kern(). */ if (sk->sk_socket->file) f_flags = sk->sk_socket->file->f_flags; timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); err = sctp_wait_for_connect(asoc, &timeo); if ((err == 0 || err == -EINPROGRESS) && assoc_id) *assoc_id = asoc->assoc_id; /* Don't free association on exit. */ asoc = NULL; out_free: pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", __func__, asoc, kaddrs, err); if (asoc) { /* sctp_primitive_ASSOCIATE may have added this association * To the hash table, try to unhash it, just in case, its a noop * if it wasn't hashed so we're safe */ sctp_unhash_established(asoc); sctp_association_free(asoc); } return err; } /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() * * API 8.9 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, * sctp_assoc_t *asoc); * * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. * If the sd is an IPv6 socket, the addresses passed can either be IPv4 * or IPv6 addresses. * * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see * Section 3.1.2 for this usage. * * addrs is a pointer to an array of one or more socket addresses. Each * address is contained in its appropriate structure (i.e. struct * sockaddr_in or struct sockaddr_in6) the family of the address type * must be used to distengish the address length (note that this * representation is termed a "packed array" of addresses). The caller * specifies the number of addresses in the array with addrcnt. * * On success, sctp_connectx() returns 0. It also sets the assoc_id to * the association id of the new association. On failure, sctp_connectx() * returns -1, and sets errno to the appropriate error code. The assoc_id * is not touched by the kernel. * * For SCTP, the port given in each socket address must be the same, or * sctp_connectx() will fail, setting errno to EINVAL. * * An application can use sctp_connectx to initiate an association with * an endpoint that is multi-homed. Much like sctp_bindx() this call * allows a caller to specify multiple addresses at which a peer can be * reached. The way the SCTP stack uses the list of addresses to set up * the association is implementation dependent. This function only * specifies that the stack will try to make use of all the addresses in * the list when needed. * * Note that the list of addresses passed in is only used for setting up * the association. It does not necessarily equal the set of addresses * the peer uses for the resulting association. If the caller wants to * find out the set of peer addresses, it must use sctp_getpaddrs() to * retrieve them after the association has been set up. * * Basically do nothing but copying the addresses from user to kernel * land and invoking either sctp_connectx(). This is used for tunneling * the sctp_connectx() request through sctp_setsockopt() from userspace. * * We don't use copy_from_user() for optimization: we first do the * sanity checks (buffer size -fast- and access check-healthy * pointer); if all of those succeed, then we can alloc the memory * (expensive operation) needed to copy the data to kernel. Then we do * the copying without checking the user space area * (__copy_from_user()). * * On exit there is no need to do sockfd_put(), sys_setsockopt() does * it. * * sk The sk of the socket * addrs The pointer to the addresses in user land * addrssize Size of the addrs buffer * * Returns >=0 if ok, <0 errno code on error. */ static int __sctp_setsockopt_connectx(struct sock *sk, struct sockaddr __user *addrs, int addrs_size, sctp_assoc_t *assoc_id) { int err = 0; struct sockaddr *kaddrs; pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", __func__, sk, addrs, addrs_size); if (unlikely(addrs_size <= 0)) return -EINVAL; /* Check the user passed a healthy pointer. */ if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) return -EFAULT; /* Alloc space for the address array in kernel memory. */ kaddrs = kmalloc(addrs_size, GFP_KERNEL); if (unlikely(!kaddrs)) return -ENOMEM; if (__copy_from_user(kaddrs, addrs, addrs_size)) { err = -EFAULT; } else { err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); } kfree(kaddrs); return err; } /* * This is an older interface. It's kept for backward compatibility * to the option that doesn't provide association id. */ static int sctp_setsockopt_connectx_old(struct sock *sk, struct sockaddr __user *addrs, int addrs_size) { return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); } /* * New interface for the API. The since the API is done with a socket * option, to make it simple we feed back the association id is as a return * indication to the call. Error is always negative and association id is * always positive. */ static int sctp_setsockopt_connectx(struct sock *sk, struct sockaddr __user *addrs, int addrs_size) { sctp_assoc_t assoc_id = 0; int err = 0; err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); if (err) return err; else return assoc_id; } /* * New (hopefully final) interface for the API. * We use the sctp_getaddrs_old structure so that use-space library * can avoid any unnecessary allocations. The only different part * is that we store the actual length of the address buffer into the * addrs_num structure member. That way we can re-use the existing * code. */ #ifdef CONFIG_COMPAT struct compat_sctp_getaddrs_old { sctp_assoc_t assoc_id; s32 addr_num; compat_uptr_t addrs; /* struct sockaddr * */ }; #endif static int sctp_getsockopt_connectx3(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_getaddrs_old param; sctp_assoc_t assoc_id = 0; int err = 0; #ifdef CONFIG_COMPAT if (is_compat_task()) { struct compat_sctp_getaddrs_old param32; if (len < sizeof(param32)) return -EINVAL; if (copy_from_user(&param32, optval, sizeof(param32))) return -EFAULT; param.assoc_id = param32.assoc_id; param.addr_num = param32.addr_num; param.addrs = compat_ptr(param32.addrs); } else #endif { if (len < sizeof(param)) return -EINVAL; if (copy_from_user(&param, optval, sizeof(param))) return -EFAULT; } err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) param.addrs, param.addr_num, &assoc_id); if (err == 0 || err == -EINPROGRESS) { if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) return -EFAULT; if (put_user(sizeof(assoc_id), optlen)) return -EFAULT; } return err; } /* API 3.1.4 close() - UDP Style Syntax * Applications use close() to perform graceful shutdown (as described in * Section 10.1 of [SCTP]) on ALL the associations currently represented * by a UDP-style socket. * * The syntax is * * ret = close(int sd); * * sd - the socket descriptor of the associations to be closed. * * To gracefully shutdown a specific association represented by the * UDP-style socket, an application should use the sendmsg() call, * passing no user data, but including the appropriate flag in the * ancillary data (see Section xxxx). * * If sd in the close() call is a branched-off socket representing only * one association, the shutdown is performed on that association only. * * 4.1.6 close() - TCP Style Syntax * * Applications use close() to gracefully close down an association. * * The syntax is: * * int close(int sd); * * sd - the socket descriptor of the association to be closed. * * After an application calls close() on a socket descriptor, no further * socket operations will succeed on that descriptor. * * API 7.1.4 SO_LINGER * * An application using the TCP-style socket can use this option to * perform the SCTP ABORT primitive. The linger option structure is: * * struct linger { * int l_onoff; // option on/off * int l_linger; // linger time * }; * * To enable the option, set l_onoff to 1. If the l_linger value is set * to 0, calling close() is the same as the ABORT primitive. If the * value is set to a negative value, the setsockopt() call will return * an error. If the value is set to a positive value linger_time, the * close() can be blocked for at most linger_time ms. If the graceful * shutdown phase does not finish during this period, close() will * return but the graceful shutdown phase continues in the system. */ static void sctp_close(struct sock *sk, long timeout) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; struct sctp_association *asoc; struct list_head *pos, *temp; unsigned int data_was_unread; pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_state = SCTP_SS_CLOSING; ep = sctp_sk(sk)->ep; /* Clean up any skbs sitting on the receive queue. */ data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); /* Walk all associations on an endpoint. */ list_for_each_safe(pos, temp, &ep->asocs) { asoc = list_entry(pos, struct sctp_association, asocs); if (sctp_style(sk, TCP)) { /* A closed association can still be in the list if * it belongs to a TCP-style listening socket that is * not yet accepted. If so, free it. If not, send an * ABORT or SHUTDOWN based on the linger options. */ if (sctp_state(asoc, CLOSED)) { sctp_unhash_established(asoc); sctp_association_free(asoc); continue; } } if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || !skb_queue_empty(&asoc->ulpq.reasm) || (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { struct sctp_chunk *chunk; chunk = sctp_make_abort_user(asoc, NULL, 0); if (chunk) sctp_primitive_ABORT(net, asoc, chunk); } else sctp_primitive_SHUTDOWN(net, asoc, NULL); } /* On a TCP-style socket, block for at most linger_time if set. */ if (sctp_style(sk, TCP) && timeout) sctp_wait_for_close(sk, timeout); /* This will run the backlog queue. */ release_sock(sk); /* Supposedly, no process has access to the socket, but * the net layers still may. * Also, sctp_destroy_sock() needs to be called with addr_wq_lock * held and that should be grabbed before socket lock. */ spin_lock_bh(&net->sctp.addr_wq_lock); bh_lock_sock(sk); /* Hold the sock, since sk_common_release() will put sock_put() * and we have just a little more cleanup. */ sock_hold(sk); sk_common_release(sk); bh_unlock_sock(sk); spin_unlock_bh(&net->sctp.addr_wq_lock); sock_put(sk); SCTP_DBG_OBJCNT_DEC(sock); } /* Handle EPIPE error. */ static int sctp_error(struct sock *sk, int flags, int err) { if (err == -EPIPE) err = sock_error(sk) ? : -EPIPE; if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); return err; } /* API 3.1.3 sendmsg() - UDP Style Syntax * * An application uses sendmsg() and recvmsg() calls to transmit data to * and receive data from its peer. * * ssize_t sendmsg(int socket, const struct msghdr *message, * int flags); * * socket - the socket descriptor of the endpoint. * message - pointer to the msghdr structure which contains a single * user message and possibly some ancillary data. * * See Section 5 for complete description of the data * structures. * * flags - flags sent or received with the user message, see Section * 5 for complete description of the flags. * * Note: This function could use a rewrite especially when explicit * connect support comes in. */ /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *new_asoc = NULL, *asoc = NULL; struct sctp_transport *transport, *chunk_tp; struct sctp_chunk *chunk; union sctp_addr to; struct sockaddr *msg_name = NULL; struct sctp_sndrcvinfo default_sinfo; struct sctp_sndrcvinfo *sinfo; struct sctp_initmsg *sinit; sctp_assoc_t associd = 0; sctp_cmsgs_t cmsgs = { NULL }; sctp_scope_t scope; bool fill_sinfo_ttl = false, wait_connect = false; struct sctp_datamsg *datamsg; int msg_flags = msg->msg_flags; __u16 sinfo_flags = 0; long timeo; int err; err = 0; sp = sctp_sk(sk); ep = sp->ep; pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, msg, msg_len, ep); /* We cannot send a message over a TCP-style listening socket. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { err = -EPIPE; goto out_nounlock; } /* Parse out the SCTP CMSGs. */ err = sctp_msghdr_parse(msg, &cmsgs); if (err) { pr_debug("%s: msghdr parse err:%x\n", __func__, err); goto out_nounlock; } /* Fetch the destination address for this packet. This * address only selects the association--it is not necessarily * the address we will send to. * For a peeled-off socket, msg_name is ignored. */ if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { int msg_namelen = msg->msg_namelen; err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, msg_namelen); if (err) return err; if (msg_namelen > sizeof(to)) msg_namelen = sizeof(to); memcpy(&to, msg->msg_name, msg_namelen); msg_name = msg->msg_name; } sinit = cmsgs.init; if (cmsgs.sinfo != NULL) { memset(&default_sinfo, 0, sizeof(default_sinfo)); default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid; default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags; default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid; default_sinfo.sinfo_context = cmsgs.sinfo->snd_context; default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id; sinfo = &default_sinfo; fill_sinfo_ttl = true; } else { sinfo = cmsgs.srinfo; } /* Did the user specify SNDINFO/SNDRCVINFO? */ if (sinfo) { sinfo_flags = sinfo->sinfo_flags; associd = sinfo->sinfo_assoc_id; } pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, msg_len, sinfo_flags); /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { err = -EINVAL; goto out_nounlock; } /* If SCTP_EOF is set, no data can be sent. Disallow sending zero * length messages when SCTP_EOF|SCTP_ABORT is not set. * If SCTP_ABORT is set, the message length could be non zero with * the msg_iov set to the user abort reason. */ if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { err = -EINVAL; goto out_nounlock; } /* If SCTP_ADDR_OVER is set, there must be an address * specified in msg_name. */ if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { err = -EINVAL; goto out_nounlock; } transport = NULL; pr_debug("%s: about to look up association\n", __func__); lock_sock(sk); /* If a msg_name has been specified, assume this is to be used. */ if (msg_name) { /* Look for a matching association on the endpoint. */ asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); if (!asoc) { /* If we could not find a matching association on the * endpoint, make sure that it is not a TCP-style * socket that already has an association or there is * no peeled-off association on another socket. */ if ((sctp_style(sk, TCP) && sctp_sstate(sk, ESTABLISHED)) || sctp_endpoint_is_peeled_off(ep, &to)) { err = -EADDRNOTAVAIL; goto out_unlock; } } } else { asoc = sctp_id2assoc(sk, associd); if (!asoc) { err = -EPIPE; goto out_unlock; } } if (asoc) { pr_debug("%s: just looked up association:%p\n", __func__, asoc); /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED * socket that has an association in CLOSED state. This can * happen when an accepted socket has an association that is * already CLOSED. */ if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { err = -EPIPE; goto out_unlock; } if (sinfo_flags & SCTP_EOF) { pr_debug("%s: shutting down association:%p\n", __func__, asoc); sctp_primitive_SHUTDOWN(net, asoc, NULL); err = 0; goto out_unlock; } if (sinfo_flags & SCTP_ABORT) { chunk = sctp_make_abort_user(asoc, msg, msg_len); if (!chunk) { err = -ENOMEM; goto out_unlock; } pr_debug("%s: aborting association:%p\n", __func__, asoc); sctp_primitive_ABORT(net, asoc, chunk); err = 0; goto out_unlock; } } /* Do we need to create the association? */ if (!asoc) { pr_debug("%s: there is no association yet\n", __func__); if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { err = -EINVAL; goto out_unlock; } /* Check for invalid stream against the stream counts, * either the default or the user specified stream counts. */ if (sinfo) { if (!sinit || !sinit->sinit_num_ostreams) { /* Check against the defaults. */ if (sinfo->sinfo_stream >= sp->initmsg.sinit_num_ostreams) { err = -EINVAL; goto out_unlock; } } else { /* Check against the requested. */ if (sinfo->sinfo_stream >= sinit->sinit_num_ostreams) { err = -EINVAL; goto out_unlock; } } } /* * API 3.1.2 bind() - UDP Style Syntax * If a bind() or sctp_bindx() is not called prior to a * sendmsg() call that initiates a new association, the * system picks an ephemeral port and will choose an address * set equivalent to binding with a wildcard address. */ if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) { err = -EAGAIN; goto out_unlock; } } else { /* * If an unprivileged user inherits a one-to-many * style socket with open associations on a privileged * port, it MAY be permitted to accept new associations, * but it SHOULD NOT be permitted to open new * associations. */ if (ep->base.bind_addr.port < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { err = -EACCES; goto out_unlock; } } scope = sctp_scope(&to); new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); if (!new_asoc) { err = -ENOMEM; goto out_unlock; } asoc = new_asoc; err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); if (err < 0) { err = -ENOMEM; goto out_free; } /* If the SCTP_INIT ancillary data is specified, set all * the association init values accordingly. */ if (sinit) { if (sinit->sinit_num_ostreams) { asoc->c.sinit_num_ostreams = sinit->sinit_num_ostreams; } if (sinit->sinit_max_instreams) { asoc->c.sinit_max_instreams = sinit->sinit_max_instreams; } if (sinit->sinit_max_attempts) { asoc->max_init_attempts = sinit->sinit_max_attempts; } if (sinit->sinit_max_init_timeo) { asoc->max_init_timeo = msecs_to_jiffies(sinit->sinit_max_init_timeo); } } /* Prime the peer's transport structures. */ transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); if (!transport) { err = -ENOMEM; goto out_free; } } /* ASSERT: we have a valid association at this point. */ pr_debug("%s: we have a valid association\n", __func__); if (!sinfo) { /* If the user didn't specify SNDINFO/SNDRCVINFO, make up * one with some defaults. */ memset(&default_sinfo, 0, sizeof(default_sinfo)); default_sinfo.sinfo_stream = asoc->default_stream; default_sinfo.sinfo_flags = asoc->default_flags; default_sinfo.sinfo_ppid = asoc->default_ppid; default_sinfo.sinfo_context = asoc->default_context; default_sinfo.sinfo_timetolive = asoc->default_timetolive; default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); sinfo = &default_sinfo; } else if (fill_sinfo_ttl) { /* In case SNDINFO was specified, we still need to fill * it with a default ttl from the assoc here. */ sinfo->sinfo_timetolive = asoc->default_timetolive; } /* API 7.1.7, the sndbuf size per association bounds the * maximum size of data that can be sent in a single send call. */ if (msg_len > sk->sk_sndbuf) { err = -EMSGSIZE; goto out_free; } if (asoc->pmtu_pending) sctp_assoc_pending_pmtu(sk, asoc); /* If fragmentation is disabled and the message length exceeds the * association fragmentation point, return EMSGSIZE. The I-D * does not specify what this error is, but this looks like * a great fit. */ if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { err = -EMSGSIZE; goto out_free; } /* Check for invalid stream. */ if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { err = -EINVAL; goto out_free; } timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); if (!sctp_wspace(asoc)) { err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); if (err) goto out_free; } /* If an address is passed with the sendto/sendmsg call, it is used * to override the primary destination address in the TCP model, or * when SCTP_ADDR_OVER flag is set in the UDP model. */ if ((sctp_style(sk, TCP) && msg_name) || (sinfo_flags & SCTP_ADDR_OVER)) { chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); if (!chunk_tp) { err = -EINVAL; goto out_free; } } else chunk_tp = NULL; /* Auto-connect, if we aren't connected already. */ if (sctp_state(asoc, CLOSED)) { err = sctp_primitive_ASSOCIATE(net, asoc, NULL); if (err < 0) goto out_free; wait_connect = true; pr_debug("%s: we associated primitively\n", __func__); } /* Break the message into multiple chunks of maximum size. */ datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter); if (IS_ERR(datamsg)) { err = PTR_ERR(datamsg); goto out_free; } /* Now send the (possibly) fragmented message. */ list_for_each_entry(chunk, &datamsg->chunks, frag_list) { sctp_chunk_hold(chunk); /* Do accounting for the write space. */ sctp_set_owner_w(chunk); chunk->transport = chunk_tp; } /* Send it to the lower layers. Note: all chunks * must either fail or succeed. The lower layer * works that way today. Keep it that way or this * breaks. */ err = sctp_primitive_SEND(net, asoc, datamsg); /* Did the lower layer accept the chunk? */ if (err) { sctp_datamsg_free(datamsg); goto out_free; } pr_debug("%s: we sent primitively\n", __func__); sctp_datamsg_put(datamsg); err = msg_len; if (unlikely(wait_connect)) { timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT); sctp_wait_for_connect(asoc, &timeo); } /* If we are already past ASSOCIATE, the lower * layers are responsible for association cleanup. */ goto out_unlock; out_free: if (new_asoc) { sctp_unhash_established(asoc); sctp_association_free(asoc); } out_unlock: release_sock(sk); out_nounlock: return sctp_error(sk, msg_flags, err); #if 0 do_sock_err: if (msg_len) err = msg_len; else err = sock_error(sk); goto out; do_interrupted: if (msg_len) err = msg_len; goto out; #endif /* 0 */ } /* This is an extended version of skb_pull() that removes the data from the * start of a skb even when data is spread across the list of skb's in the * frag_list. len specifies the total amount of data that needs to be removed. * when 'len' bytes could be removed from the skb, it returns 0. * If 'len' exceeds the total skb length, it returns the no. of bytes that * could not be removed. */ static int sctp_skb_pull(struct sk_buff *skb, int len) { struct sk_buff *list; int skb_len = skb_headlen(skb); int rlen; if (len <= skb_len) { __skb_pull(skb, len); return 0; } len -= skb_len; __skb_pull(skb, skb_len); skb_walk_frags(skb, list) { rlen = sctp_skb_pull(list, len); skb->len -= (len-rlen); skb->data_len -= (len-rlen); if (!rlen) return 0; len = rlen; } return len; } /* API 3.1.3 recvmsg() - UDP Style Syntax * * ssize_t recvmsg(int socket, struct msghdr *message, * int flags); * * socket - the socket descriptor of the endpoint. * message - pointer to the msghdr structure which contains a single * user message and possibly some ancillary data. * * See Section 5 for complete description of the data * structures. * * flags - flags sent or received with the user message, see Section * 5 for complete description of the flags. */ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct sctp_ulpevent *event = NULL; struct sctp_sock *sp = sctp_sk(sk); struct sk_buff *skb; int copied; int err = 0; int skb_len; pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, addr_len); lock_sock(sk); if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) { err = -ENOTCONN; goto out; } skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; /* Get the total length of the skb including any skb's in the * frag_list. */ skb_len = skb->len; copied = skb_len; if (copied > len) copied = len; err = skb_copy_datagram_msg(skb, 0, msg, copied); event = sctp_skb2event(skb); if (err) goto out_free; sock_recv_ts_and_drops(msg, sk, skb); if (sctp_ulpevent_is_notification(event)) { msg->msg_flags |= MSG_NOTIFICATION; sp->pf->event_msgname(event, msg->msg_name, addr_len); } else { sp->pf->skb_msgname(skb, msg->msg_name, addr_len); } /* Check if we allow SCTP_NXTINFO. */ if (sp->recvnxtinfo) sctp_ulpevent_read_nxtinfo(event, msg, sk); /* Check if we allow SCTP_RCVINFO. */ if (sp->recvrcvinfo) sctp_ulpevent_read_rcvinfo(event, msg); /* Check if we allow SCTP_SNDRCVINFO. */ if (sp->subscribe.sctp_data_io_event) sctp_ulpevent_read_sndrcvinfo(event, msg); #if 0 /* FIXME: we should be calling IP/IPv6 layers. */ if (sk->sk_protinfo.af_inet.cmsg_flags) ip_cmsg_recv(msg, skb); #endif err = copied; /* If skb's length exceeds the user's buffer, update the skb and * push it back to the receive_queue so that the next call to * recvmsg() will return the remaining data. Don't set MSG_EOR. */ if (skb_len > copied) { msg->msg_flags &= ~MSG_EOR; if (flags & MSG_PEEK) goto out_free; sctp_skb_pull(skb, copied); skb_queue_head(&sk->sk_receive_queue, skb); /* When only partial message is copied to the user, increase * rwnd by that amount. If all the data in the skb is read, * rwnd is updated when the event is freed. */ if (!sctp_ulpevent_is_notification(event)) sctp_assoc_rwnd_increase(event->asoc, copied); goto out; } else if ((event->msg_flags & MSG_NOTIFICATION) || (event->msg_flags & MSG_EOR)) msg->msg_flags |= MSG_EOR; else msg->msg_flags &= ~MSG_EOR; out_free: if (flags & MSG_PEEK) { /* Release the skb reference acquired after peeking the skb in * sctp_skb_recv_datagram(). */ kfree_skb(skb); } else { /* Free the event which includes releasing the reference to * the owner of the skb, freeing the skb and updating the * rwnd. */ sctp_ulpevent_free(event); } out: release_sock(sk); return err; } /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) * * This option is a on/off flag. If enabled no SCTP message * fragmentation will be performed. Instead if a message being sent * exceeds the current PMTU size, the message will NOT be sent and * instead a error will be indicated to the user. */ static int sctp_setsockopt_disable_fragments(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; return 0; } static int sctp_setsockopt_events(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_association *asoc; struct sctp_ulpevent *event; if (optlen > sizeof(struct sctp_event_subscribe)) return -EINVAL; if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) return -EFAULT; if (sctp_sk(sk)->subscribe.sctp_data_io_event) pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Requested SCTP_SNDRCVINFO event.\n" "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n", current->comm, task_pid_nr(current)); /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, * if there is no data to be sent or retransmit, the stack will * immediately send up this notification. */ if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, &sctp_sk(sk)->subscribe)) { asoc = sctp_id2assoc(sk, 0); if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC); if (!event) return -ENOMEM; sctp_ulpq_tail_event(&asoc->ulpq, event); } } return 0; } /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) * * This socket option is applicable to the UDP-style socket only. When * set it will cause associations that are idle for more than the * specified number of seconds to automatically close. An association * being idle is defined an association that has NOT sent or received * user data. The special value of '0' indicates that no automatic * close of any associations should be performed. The option expects an * integer defining the number of seconds of idle time before an * association is closed. */ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct net *net = sock_net(sk); /* Applicable to UDP-style socket only */ if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (optlen != sizeof(int)) return -EINVAL; if (copy_from_user(&sp->autoclose, optval, optlen)) return -EFAULT; if (sp->autoclose > net->sctp.max_autoclose) sp->autoclose = net->sctp.max_autoclose; return 0; } /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) * * Applications can enable or disable heartbeats for any peer address of * an association, modify an address's heartbeat interval, force a * heartbeat to be sent immediately, and adjust the address's maximum * number of retransmissions sent before an address is considered * unreachable. The following structure is used to access and modify an * address's parameters: * * struct sctp_paddrparams { * sctp_assoc_t spp_assoc_id; * struct sockaddr_storage spp_address; * uint32_t spp_hbinterval; * uint16_t spp_pathmaxrxt; * uint32_t spp_pathmtu; * uint32_t spp_sackdelay; * uint32_t spp_flags; * }; * * spp_assoc_id - (one-to-many style socket) This is filled in the * application, and identifies the association for * this query. * spp_address - This specifies which address is of interest. * spp_hbinterval - This contains the value of the heartbeat interval, * in milliseconds. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmaxrxt - This contains the maximum number of * retransmissions before this address shall be * considered unreachable. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmtu - When Path MTU discovery is disabled the value * specified here will be the "fixed" path mtu. * Note that if the spp_address field is empty * then all associations on this address will * have this fixed path mtu set upon them. * * spp_sackdelay - When delayed sack is enabled, this value specifies * the number of milliseconds that sacks will be delayed * for. This value will apply to all addresses of an * association if the spp_address field is empty. Note * also, that if delayed sack is enabled and this * value is set to 0, no change is made to the last * recorded delayed sack timer value. * * spp_flags - These flags are used to control various features * on an association. The flag field may contain * zero or more of the following options. * * SPP_HB_ENABLE - Enable heartbeats on the * specified address. Note that if the address * field is empty all addresses for the association * have heartbeats enabled upon them. * * SPP_HB_DISABLE - Disable heartbeats on the * speicifed address. Note that if the address * field is empty all addresses for the association * will have their heartbeats disabled. Note also * that SPP_HB_ENABLE and SPP_HB_DISABLE are * mutually exclusive, only one of these two should * be specified. Enabling both fields will have * undetermined results. * * SPP_HB_DEMAND - Request a user initiated heartbeat * to be made immediately. * * SPP_HB_TIME_IS_ZERO - Specify's that the time for * heartbeat delayis to be set to the value of 0 * milliseconds. * * SPP_PMTUD_ENABLE - This field will enable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. * * SPP_PMTUD_DISABLE - This field will disable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. Not also that * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually * exclusive. Enabling both will have undetermined * results. * * SPP_SACKDELAY_ENABLE - Setting this flag turns * on delayed sack. The time specified in spp_sackdelay * is used to specify the sack delay for this address. Note * that if spp_address is empty then all addresses will * enable delayed sack and take on the sack delay * value specified in spp_sackdelay. * SPP_SACKDELAY_DISABLE - Setting this flag turns * off delayed sack. If the spp_address field is blank then * delayed sack is disabled for the entire association. Note * also that this field is mutually exclusive to * SPP_SACKDELAY_ENABLE, setting both will have undefined * results. */ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, struct sctp_transport *trans, struct sctp_association *asoc, struct sctp_sock *sp, int hb_change, int pmtud_change, int sackdelay_change) { int error; if (params->spp_flags & SPP_HB_DEMAND && trans) { struct net *net = sock_net(trans->asoc->base.sk); error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); if (error) return error; } /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of * this field is ignored. Note also that a value of zero indicates * the current setting should be left unchanged. */ if (params->spp_flags & SPP_HB_ENABLE) { /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is * set. This lets us use 0 value when this flag * is set. */ if (params->spp_flags & SPP_HB_TIME_IS_ZERO) params->spp_hbinterval = 0; if (params->spp_hbinterval || (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { if (trans) { trans->hbinterval = msecs_to_jiffies(params->spp_hbinterval); } else if (asoc) { asoc->hbinterval = msecs_to_jiffies(params->spp_hbinterval); } else { sp->hbinterval = params->spp_hbinterval; } } } if (hb_change) { if (trans) { trans->param_flags = (trans->param_flags & ~SPP_HB) | hb_change; } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_HB) | hb_change; } else { sp->param_flags = (sp->param_flags & ~SPP_HB) | hb_change; } } /* When Path MTU discovery is disabled the value specified here will * be the "fixed" path mtu (i.e. the value of the spp_flags field must * include the flag SPP_PMTUD_DISABLE for this field to have any * effect). */ if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { if (trans) { trans->pathmtu = params->spp_pathmtu; sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); } else if (asoc) { asoc->pathmtu = params->spp_pathmtu; sctp_frag_point(asoc, params->spp_pathmtu); } else { sp->pathmtu = params->spp_pathmtu; } } if (pmtud_change) { if (trans) { int update = (trans->param_flags & SPP_PMTUD_DISABLE) && (params->spp_flags & SPP_PMTUD_ENABLE); trans->param_flags = (trans->param_flags & ~SPP_PMTUD) | pmtud_change; if (update) { sctp_transport_pmtu(trans, sctp_opt2sk(sp)); sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); } } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; } else { sp->param_flags = (sp->param_flags & ~SPP_PMTUD) | pmtud_change; } } /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the * value of this field is ignored. Note also that a value of zero * indicates the current setting should be left unchanged. */ if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { if (trans) { trans->sackdelay = msecs_to_jiffies(params->spp_sackdelay); } else if (asoc) { asoc->sackdelay = msecs_to_jiffies(params->spp_sackdelay); } else { sp->sackdelay = params->spp_sackdelay; } } if (sackdelay_change) { if (trans) { trans->param_flags = (trans->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } else { sp->param_flags = (sp->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } } /* Note that a value of zero indicates the current setting should be left unchanged. */ if (params->spp_pathmaxrxt) { if (trans) { trans->pathmaxrxt = params->spp_pathmaxrxt; } else if (asoc) { asoc->pathmaxrxt = params->spp_pathmaxrxt; } else { sp->pathmaxrxt = params->spp_pathmaxrxt; } } return 0; } static int sctp_setsockopt_peer_addr_params(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_paddrparams params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); int error; int hb_change, pmtud_change, sackdelay_change; if (optlen != sizeof(struct sctp_paddrparams)) return -EINVAL; if (copy_from_user(&params, optval, optlen)) return -EFAULT; /* Validate flags and value parameters. */ hb_change = params.spp_flags & SPP_HB; pmtud_change = params.spp_flags & SPP_PMTUD; sackdelay_change = params.spp_flags & SPP_SACKDELAY; if (hb_change == SPP_HB || pmtud_change == SPP_PMTUD || sackdelay_change == SPP_SACKDELAY || params.spp_sackdelay > 500 || (params.spp_pathmtu && params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) return -EINVAL; /* If an address other than INADDR_ANY is specified, and * no transport is found, then the request is invalid. */ if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) { trans = sctp_addr_id2transport(sk, &params.spp_address, params.spp_assoc_id); if (!trans) return -EINVAL; } /* Get association, if assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.spp_assoc_id); if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Heartbeat demand can only be sent on a transport or * association, but not a socket. */ if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) return -EINVAL; /* Process parameters. */ error = sctp_apply_peer_addr_params(&params, trans, asoc, sp, hb_change, pmtud_change, sackdelay_change); if (error) return error; /* If changes are for association, also apply parameters to each * transport. */ if (!trans && asoc) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { sctp_apply_peer_addr_params(&params, trans, asoc, sp, hb_change, pmtud_change, sackdelay_change); } } return 0; } static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags) { return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; } static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags) { return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; } /* * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) * * This option will effect the way delayed acks are performed. This * option allows you to get or set the delayed ack time, in * milliseconds. It also allows changing the delayed ack frequency. * Changing the frequency to 1 disables the delayed sack algorithm. If * the assoc_id is 0, then this sets or gets the endpoints default * values. If the assoc_id field is non-zero, then the set or get * effects the specified association for the one to many model (the * assoc_id field is ignored by the one to one model). Note that if * sack_delay or sack_freq are 0 when setting this option, then the * current values will remain unchanged. * * struct sctp_sack_info { * sctp_assoc_t sack_assoc_id; * uint32_t sack_delay; * uint32_t sack_freq; * }; * * sack_assoc_id - This parameter, indicates which association the user * is performing an action upon. Note that if this field's value is * zero then the endpoints default value is changed (effecting future * associations only). * * sack_delay - This parameter contains the number of milliseconds that * the user is requesting the delayed ACK timer be set to. Note that * this value is defined in the standard to be between 200 and 500 * milliseconds. * * sack_freq - This parameter contains the number of packets that must * be received before a sack is sent without waiting for the delay * timer to expire. The default value for this is 2, setting this * value to 1 will disable the delayed sack algorithm. */ static int sctp_setsockopt_delayed_ack(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sack_info params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (optlen == sizeof(struct sctp_sack_info)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; if (params.sack_delay == 0 && params.sack_freq == 0) return 0; } else if (optlen == sizeof(struct sctp_assoc_value)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of struct sctp_assoc_value in delayed_ack socket option.\n" "Use struct sctp_sack_info instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&params, optval, optlen)) return -EFAULT; if (params.sack_delay == 0) params.sack_freq = 1; else params.sack_freq = 0; } else return -EINVAL; /* Validate value parameter. */ if (params.sack_delay > 500) return -EINVAL; /* Get association, if sack_assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.sack_assoc_id); if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (params.sack_delay) { if (asoc) { asoc->sackdelay = msecs_to_jiffies(params.sack_delay); asoc->param_flags = sctp_spp_sackdelay_enable(asoc->param_flags); } else { sp->sackdelay = params.sack_delay; sp->param_flags = sctp_spp_sackdelay_enable(sp->param_flags); } } if (params.sack_freq == 1) { if (asoc) { asoc->param_flags = sctp_spp_sackdelay_disable(asoc->param_flags); } else { sp->param_flags = sctp_spp_sackdelay_disable(sp->param_flags); } } else if (params.sack_freq > 1) { if (asoc) { asoc->sackfreq = params.sack_freq; asoc->param_flags = sctp_spp_sackdelay_enable(asoc->param_flags); } else { sp->sackfreq = params.sack_freq; sp->param_flags = sctp_spp_sackdelay_enable(sp->param_flags); } } /* If change is for association, also apply to each transport. */ if (asoc) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { if (params.sack_delay) { trans->sackdelay = msecs_to_jiffies(params.sack_delay); trans->param_flags = sctp_spp_sackdelay_enable(trans->param_flags); } if (params.sack_freq == 1) { trans->param_flags = sctp_spp_sackdelay_disable(trans->param_flags); } else if (params.sack_freq > 1) { trans->sackfreq = params.sack_freq; trans->param_flags = sctp_spp_sackdelay_enable(trans->param_flags); } } } return 0; } /* 7.1.3 Initialization Parameters (SCTP_INITMSG) * * Applications can specify protocol parameters for the default association * initialization. The option name argument to setsockopt() and getsockopt() * is SCTP_INITMSG. * * Setting initialization parameters is effective only on an unconnected * socket (for UDP-style sockets only future associations are effected * by the change). With TCP-style sockets, this option is inherited by * sockets derived from a listener socket. */ static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_initmsg sinit; struct sctp_sock *sp = sctp_sk(sk); if (optlen != sizeof(struct sctp_initmsg)) return -EINVAL; if (copy_from_user(&sinit, optval, optlen)) return -EFAULT; if (sinit.sinit_num_ostreams) sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; if (sinit.sinit_max_instreams) sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; if (sinit.sinit_max_attempts) sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; if (sinit.sinit_max_init_timeo) sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; return 0; } /* * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) * * Applications that wish to use the sendto() system call may wish to * specify a default set of parameters that would normally be supplied * through the inclusion of ancillary data. This socket option allows * such an application to set the default sctp_sndrcvinfo structure. * The application that wishes to use this socket option simply passes * in to this call the sctp_sndrcvinfo structure defined in Section * 5.2.2) The input parameters accepted by this call include * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, * sinfo_timetolive. The user must provide the sinfo_assoc_id field in * to this call if the caller is using the UDP model. */ static int sctp_setsockopt_default_send_param(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndrcvinfo info; if (optlen != sizeof(info)) return -EINVAL; if (copy_from_user(&info, optval, optlen)) return -EFAULT; if (info.sinfo_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { asoc->default_stream = info.sinfo_stream; asoc->default_flags = info.sinfo_flags; asoc->default_ppid = info.sinfo_ppid; asoc->default_context = info.sinfo_context; asoc->default_timetolive = info.sinfo_timetolive; } else { sp->default_stream = info.sinfo_stream; sp->default_flags = info.sinfo_flags; sp->default_ppid = info.sinfo_ppid; sp->default_context = info.sinfo_context; sp->default_timetolive = info.sinfo_timetolive; } return 0; } /* RFC6458, Section 8.1.31. Set/get Default Send Parameters * (SCTP_DEFAULT_SNDINFO) */ static int sctp_setsockopt_default_sndinfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndinfo info; if (optlen != sizeof(info)) return -EINVAL; if (copy_from_user(&info, optval, optlen)) return -EFAULT; if (info.snd_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; asoc = sctp_id2assoc(sk, info.snd_assoc_id); if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { asoc->default_stream = info.snd_sid; asoc->default_flags = info.snd_flags; asoc->default_ppid = info.snd_ppid; asoc->default_context = info.snd_context; } else { sp->default_stream = info.snd_sid; sp->default_flags = info.snd_flags; sp->default_ppid = info.snd_ppid; sp->default_context = info.snd_context; } return 0; } /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) * * Requests that the local SCTP stack use the enclosed peer address as * the association primary. The enclosed address must be one of the * association peer's addresses. */ static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_prim prim; struct sctp_transport *trans; if (optlen != sizeof(struct sctp_prim)) return -EINVAL; if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) return -EFAULT; trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); if (!trans) return -EINVAL; sctp_assoc_set_primary(trans->asoc, trans); return 0; } /* * 7.1.5 SCTP_NODELAY * * Turn on/off any Nagle-like algorithm. This means that packets are * generally sent as soon as possible and no unnecessary delays are * introduced, at the cost of more packets in the network. Expects an * integer boolean flag. */ static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; return 0; } /* * * 7.1.1 SCTP_RTOINFO * * The protocol parameters used to initialize and bound retransmission * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access * and modify these parameters. * All parameters are time values, in milliseconds. A value of 0, when * modifying the parameters, indicates that the current value should not * be changed. * */ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_rtoinfo rtoinfo; struct sctp_association *asoc; unsigned long rto_min, rto_max; struct sctp_sock *sp = sctp_sk(sk); if (optlen != sizeof (struct sctp_rtoinfo)) return -EINVAL; if (copy_from_user(&rtoinfo, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); /* Set the values to the specific association */ if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) return -EINVAL; rto_max = rtoinfo.srto_max; rto_min = rtoinfo.srto_min; if (rto_max) rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; else rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; if (rto_min) rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; else rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; if (rto_min > rto_max) return -EINVAL; if (asoc) { if (rtoinfo.srto_initial != 0) asoc->rto_initial = msecs_to_jiffies(rtoinfo.srto_initial); asoc->rto_max = rto_max; asoc->rto_min = rto_min; } else { /* If there is no association or the association-id = 0 * set the values to the endpoint. */ if (rtoinfo.srto_initial != 0) sp->rtoinfo.srto_initial = rtoinfo.srto_initial; sp->rtoinfo.srto_max = rto_max; sp->rtoinfo.srto_min = rto_min; } return 0; } /* * * 7.1.2 SCTP_ASSOCINFO * * This option is used to tune the maximum retransmission attempts * of the association. * Returns an error if the new association retransmission value is * greater than the sum of the retransmission value of the peer. * See [SCTP] for more information. * */ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assocparams assocparams; struct sctp_association *asoc; if (optlen != sizeof(struct sctp_assocparams)) return -EINVAL; if (copy_from_user(&assocparams, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Set the values to the specific association */ if (asoc) { if (assocparams.sasoc_asocmaxrxt != 0) { __u32 path_sum = 0; int paths = 0; struct sctp_transport *peer_addr; list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, transports) { path_sum += peer_addr->pathmaxrxt; paths++; } /* Only validate asocmaxrxt if we have more than * one path/transport. We do this because path * retransmissions are only counted when we have more * then one path. */ if (paths > 1 && assocparams.sasoc_asocmaxrxt > path_sum) return -EINVAL; asoc->max_retrans = assocparams.sasoc_asocmaxrxt; } if (assocparams.sasoc_cookie_life != 0) asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); } else { /* Set the values to the endpoint */ struct sctp_sock *sp = sctp_sk(sk); if (assocparams.sasoc_asocmaxrxt != 0) sp->assocparams.sasoc_asocmaxrxt = assocparams.sasoc_asocmaxrxt; if (assocparams.sasoc_cookie_life != 0) sp->assocparams.sasoc_cookie_life = assocparams.sasoc_cookie_life; } return 0; } /* * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) * * This socket option is a boolean flag which turns on or off mapped V4 * addresses. If this option is turned on and the socket is type * PF_INET6, then IPv4 addresses will be mapped to V6 representation. * If this option is turned off, then no mapping will be done of V4 * addresses and a user will receive both PF_INET6 and PF_INET type * addresses on the socket. */ static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; if (val) sp->v4mapped = 1; else sp->v4mapped = 0; return 0; } /* * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) * This option will get or set the maximum size to put in any outgoing * SCTP DATA chunk. If a message is larger than this size it will be * fragmented by SCTP into the specified size. Note that the underlying * SCTP implementation may fragment into smaller sized chunks when the * PMTU of the underlying association is smaller than the value set by * the user. The default value for this option is '0' which indicates * the user is NOT limiting fragmentation and only the PMTU will effect * SCTP's choice of DATA chunk size. Note also that values set larger * than the maximum size of an IP datagram will effectively let SCTP * control fragmentation (i.e. the same as setting this option to 0). * * The following structure is used to access and modify this parameter: * * struct sctp_assoc_value { * sctp_assoc_t assoc_id; * uint32_t assoc_value; * }; * * assoc_id: This parameter is ignored for one-to-one style sockets. * For one-to-many style sockets this parameter indicates which * association the user is performing an action upon. Note that if * this field's value is zero then the endpoints default value is * changed (effecting future associations only). * assoc_value: This parameter specifies the maximum size in bytes. */ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; struct sctp_sock *sp = sctp_sk(sk); int val; if (optlen == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in maxseg socket option.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&val, optval, optlen)) return -EFAULT; params.assoc_id = 0; } else if (optlen == sizeof(struct sctp_assoc_value)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; val = params.assoc_value; } else return -EINVAL; if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) return -EINVAL; asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc && params.assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { if (val == 0) { val = asoc->pathmtu; val -= sp->pf->af->net_header_len; val -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk); } asoc->user_frag = val; asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); } else { sp->user_frag = val; } return 0; } /* * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) * * Requests that the peer mark the enclosed address as the association * primary. The enclosed address must be one of the association's * locally bound addresses. The following structure is used to make a * set primary request: */ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, unsigned int optlen) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_association *asoc = NULL; struct sctp_setpeerprim prim; struct sctp_chunk *chunk; struct sctp_af *af; int err; sp = sctp_sk(sk); if (!net->sctp.addip_enable) return -EPERM; if (optlen != sizeof(struct sctp_setpeerprim)) return -EINVAL; if (copy_from_user(&prim, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); if (!asoc) return -EINVAL; if (!asoc->peer.asconf_capable) return -EPERM; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) return -EPERM; if (!sctp_state(asoc, ESTABLISHED)) return -ENOTCONN; af = sctp_get_af_specific(prim.sspp_addr.ss_family); if (!af) return -EINVAL; if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) return -EADDRNOTAVAIL; if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) return -EADDRNOTAVAIL; /* Create an ASCONF chunk with SET_PRIMARY parameter */ chunk = sctp_make_asconf_set_prim(asoc, (union sctp_addr *)&prim.sspp_addr); if (!chunk) return -ENOMEM; err = sctp_send_asconf(asoc, chunk); pr_debug("%s: we set peer primary addr primitively\n", __func__); return err; } static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_setadaptation adaptation; if (optlen != sizeof(struct sctp_setadaptation)) return -EINVAL; if (copy_from_user(&adaptation, optval, optlen)) return -EFAULT; sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; return 0; } /* * 7.1.29. Set or Get the default context (SCTP_CONTEXT) * * The context field in the sctp_sndrcvinfo structure is normally only * used when a failed message is retrieved holding the value that was * sent down on the actual send call. This option allows the setting of * a default context on an association basis that will be received on * reading messages from the peer. This is especially helpful in the * one-2-many model for an application to keep some reference to an * internal state machine that is processing messages on the * association. Note that the setting of this value only effects * received messages from the peer and does not effect the value that is * saved with outbound messages. */ static int sctp_setsockopt_context(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (optlen != sizeof(struct sctp_assoc_value)) return -EINVAL; if (copy_from_user(&params, optval, optlen)) return -EFAULT; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; asoc->default_rcv_context = params.assoc_value; } else { sp->default_rcv_context = params.assoc_value; } return 0; } /* * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) * * This options will at a minimum specify if the implementation is doing * fragmented interleave. Fragmented interleave, for a one to many * socket, is when subsequent calls to receive a message may return * parts of messages from different associations. Some implementations * may allow you to turn this value on or off. If so, when turned off, * no fragment interleave will occur (which will cause a head of line * blocking amongst multiple associations sharing the same one to many * socket). When this option is turned on, then each receive call may * come from a different association (thus the user must receive data * with the extended calls (e.g. sctp_recvmsg) to keep track of which * association each receive belongs to. * * This option takes a boolean value. A non-zero value indicates that * fragmented interleave is on. A value of zero indicates that * fragmented interleave is off. * * Note that it is important that an implementation that allows this * option to be turned on, have it off by default. Otherwise an unaware * application using the one to many model may become confused and act * incorrectly. */ static int sctp_setsockopt_fragment_interleave(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen != sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; return 0; } /* * 8.1.21. Set or Get the SCTP Partial Delivery Point * (SCTP_PARTIAL_DELIVERY_POINT) * * This option will set or get the SCTP partial delivery point. This * point is the size of a message where the partial delivery API will be * invoked to help free up rwnd space for the peer. Setting this to a * lower value will cause partial deliveries to happen more often. The * calls argument is an integer that sets or gets the partial delivery * point. Note also that the call will fail if the user attempts to set * this value larger than the socket receive buffer size. * * Note that any single message having a length smaller than or equal to * the SCTP partial delivery point will be delivered in one single read * call as long as the user provided buffer is large enough to hold the * message. */ static int sctp_setsockopt_partial_delivery_point(struct sock *sk, char __user *optval, unsigned int optlen) { u32 val; if (optlen != sizeof(u32)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; /* Note: We double the receive buffer from what the user sets * it to be, also initial rwnd is based on rcvbuf/2. */ if (val > (sk->sk_rcvbuf >> 1)) return -EINVAL; sctp_sk(sk)->pd_point = val; return 0; /* is this the right error code? */ } /* * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) * * This option will allow a user to change the maximum burst of packets * that can be emitted by this association. Note that the default value * is 4, and some implementations may restrict this setting so that it * can only be lowered. * * NOTE: This text doesn't seem right. Do this on a socket basis with * future associations inheriting the socket value. */ static int sctp_setsockopt_maxburst(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; int val; int assoc_id = 0; if (optlen == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in max_burst socket option deprecated.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&val, optval, optlen)) return -EFAULT; } else if (optlen == sizeof(struct sctp_assoc_value)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; val = params.assoc_value; assoc_id = params.assoc_id; } else return -EINVAL; sp = sctp_sk(sk); if (assoc_id != 0) { asoc = sctp_id2assoc(sk, assoc_id); if (!asoc) return -EINVAL; asoc->max_burst = val; } else sp->max_burst = val; return 0; } /* * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) * * This set option adds a chunk type that the user is requesting to be * received only in an authenticated way. Changes to the list of chunks * will only effect future associations on the socket. */ static int sctp_setsockopt_auth_chunk(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunk val; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authchunk)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; switch (val.sauth_chunk) { case SCTP_CID_INIT: case SCTP_CID_INIT_ACK: case SCTP_CID_SHUTDOWN_COMPLETE: case SCTP_CID_AUTH: return -EINVAL; } /* add this chunk id to the endpoint */ return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); } /* * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) * * This option gets or sets the list of HMAC algorithms that the local * endpoint requires the peer to use. */ static int sctp_setsockopt_hmac_ident(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_hmacalgo *hmacs; u32 idents; int err; if (!ep->auth_enable) return -EACCES; if (optlen < sizeof(struct sctp_hmacalgo)) return -EINVAL; hmacs = memdup_user(optval, optlen); if (IS_ERR(hmacs)) return PTR_ERR(hmacs); idents = hmacs->shmac_num_idents; if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { err = -EINVAL; goto out; } err = sctp_auth_ep_set_hmacs(ep, hmacs); out: kfree(hmacs); return err; } /* * 7.1.20. Set a shared key (SCTP_AUTH_KEY) * * This option will set a shared secret key which is used to build an * association shared key. */ static int sctp_setsockopt_auth_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkey *authkey; struct sctp_association *asoc; int ret; if (!ep->auth_enable) return -EACCES; if (optlen <= sizeof(struct sctp_authkey)) return -EINVAL; authkey = memdup_user(optval, optlen); if (IS_ERR(authkey)) return PTR_ERR(authkey); if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { ret = -EINVAL; goto out; } asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { ret = -EINVAL; goto out; } ret = sctp_auth_set_key(ep, asoc, authkey); out: kzfree(authkey); return ret; } /* * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) * * This option will get or set the active shared key to be used to build * the association shared key. */ static int sctp_setsockopt_active_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); } /* * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) * * This set option will delete a shared secret key from use. */ static int sctp_setsockopt_del_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); } /* * 8.1.23 SCTP_AUTO_ASCONF * * This option will enable or disable the use of the automatic generation of * ASCONF chunks to add and delete addresses to an existing association. Note * that this option has two caveats namely: a) it only affects sockets that * are bound to all addresses available to the SCTP stack, and b) the system * administrator may have an overriding control that turns the ASCONF feature * off no matter what setting the socket option may have. * This option expects an integer boolean flag, where a non-zero value turns on * the option, and a zero value turns off the option. * Note. In this implementation, socket operation overrides default parameter * being set by sysctl as well as FreeBSD implementation */ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, unsigned int optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; if (!sctp_is_ep_boundall(sk) && val) return -EINVAL; if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) return 0; spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock); if (val == 0 && sp->do_auto_asconf) { list_del(&sp->auto_asconf_list); sp->do_auto_asconf = 0; } else if (val && !sp->do_auto_asconf) { list_add_tail(&sp->auto_asconf_list, &sock_net(sk)->sctp.auto_asconf_splist); sp->do_auto_asconf = 1; } spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock); return 0; } /* * SCTP_PEER_ADDR_THLDS * * This option allows us to alter the partially failed threshold for one or all * transports in an association. See Section 6.1 of: * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt */ static int sctp_setsockopt_paddr_thresholds(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_paddrthlds val; struct sctp_transport *trans; struct sctp_association *asoc; if (optlen < sizeof(struct sctp_paddrthlds)) return -EINVAL; if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, sizeof(struct sctp_paddrthlds))) return -EFAULT; if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { asoc = sctp_id2assoc(sk, val.spt_assoc_id); if (!asoc) return -ENOENT; list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { if (val.spt_pathmaxrxt) trans->pathmaxrxt = val.spt_pathmaxrxt; trans->pf_retrans = val.spt_pathpfthld; } if (val.spt_pathmaxrxt) asoc->pathmaxrxt = val.spt_pathmaxrxt; asoc->pf_retrans = val.spt_pathpfthld; } else { trans = sctp_addr_id2transport(sk, &val.spt_address, val.spt_assoc_id); if (!trans) return -ENOENT; if (val.spt_pathmaxrxt) trans->pathmaxrxt = val.spt_pathmaxrxt; trans->pf_retrans = val.spt_pathpfthld; } return 0; } static int sctp_setsockopt_recvrcvinfo(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *) optval)) return -EFAULT; sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1; return 0; } static int sctp_setsockopt_recvnxtinfo(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *) optval)) return -EFAULT; sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1; return 0; } /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve * socket options. Socket options are used to change the default * behavior of sockets calls. They are described in Section 7. * * The syntax is: * * ret = getsockopt(int sd, int level, int optname, void __user *optval, * int __user *optlen); * ret = setsockopt(int sd, int level, int optname, const void __user *optval, * int optlen); * * sd - the socket descript. * level - set to IPPROTO_SCTP for all SCTP options. * optname - the option name. * optval - the buffer to store the value of the option. * optlen - the size of the buffer. */ static int sctp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { int retval = 0; pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); /* I can hardly begin to describe how wrong this is. This is * so broken as to be worse than useless. The API draft * REALLY is NOT helpful here... I am not convinced that the * semantics of setsockopt() with a level OTHER THAN SOL_SCTP * are at all well-founded. */ if (level != SOL_SCTP) { struct sctp_af *af = sctp_sk(sk)->pf->af; retval = af->setsockopt(sk, level, optname, optval, optlen); goto out_nounlock; } lock_sock(sk); switch (optname) { case SCTP_SOCKOPT_BINDX_ADD: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, optlen, SCTP_BINDX_ADD_ADDR); break; case SCTP_SOCKOPT_BINDX_REM: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, optlen, SCTP_BINDX_REM_ADDR); break; case SCTP_SOCKOPT_CONNECTX_OLD: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_connectx_old(sk, (struct sockaddr __user *)optval, optlen); break; case SCTP_SOCKOPT_CONNECTX: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_connectx(sk, (struct sockaddr __user *)optval, optlen); break; case SCTP_DISABLE_FRAGMENTS: retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); break; case SCTP_EVENTS: retval = sctp_setsockopt_events(sk, optval, optlen); break; case SCTP_AUTOCLOSE: retval = sctp_setsockopt_autoclose(sk, optval, optlen); break; case SCTP_PEER_ADDR_PARAMS: retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); break; case SCTP_DELAYED_SACK: retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); break; case SCTP_PARTIAL_DELIVERY_POINT: retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); break; case SCTP_INITMSG: retval = sctp_setsockopt_initmsg(sk, optval, optlen); break; case SCTP_DEFAULT_SEND_PARAM: retval = sctp_setsockopt_default_send_param(sk, optval, optlen); break; case SCTP_DEFAULT_SNDINFO: retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen); break; case SCTP_PRIMARY_ADDR: retval = sctp_setsockopt_primary_addr(sk, optval, optlen); break; case SCTP_SET_PEER_PRIMARY_ADDR: retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); break; case SCTP_NODELAY: retval = sctp_setsockopt_nodelay(sk, optval, optlen); break; case SCTP_RTOINFO: retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); break; case SCTP_ASSOCINFO: retval = sctp_setsockopt_associnfo(sk, optval, optlen); break; case SCTP_I_WANT_MAPPED_V4_ADDR: retval = sctp_setsockopt_mappedv4(sk, optval, optlen); break; case SCTP_MAXSEG: retval = sctp_setsockopt_maxseg(sk, optval, optlen); break; case SCTP_ADAPTATION_LAYER: retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); break; case SCTP_CONTEXT: retval = sctp_setsockopt_context(sk, optval, optlen); break; case SCTP_FRAGMENT_INTERLEAVE: retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); break; case SCTP_MAX_BURST: retval = sctp_setsockopt_maxburst(sk, optval, optlen); break; case SCTP_AUTH_CHUNK: retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); break; case SCTP_HMAC_IDENT: retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); break; case SCTP_AUTH_KEY: retval = sctp_setsockopt_auth_key(sk, optval, optlen); break; case SCTP_AUTH_ACTIVE_KEY: retval = sctp_setsockopt_active_key(sk, optval, optlen); break; case SCTP_AUTH_DELETE_KEY: retval = sctp_setsockopt_del_key(sk, optval, optlen); break; case SCTP_AUTO_ASCONF: retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); break; case SCTP_PEER_ADDR_THLDS: retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); break; case SCTP_RECVRCVINFO: retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen); break; case SCTP_RECVNXTINFO: retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen); break; default: retval = -ENOPROTOOPT; break; } release_sock(sk); out_nounlock: return retval; } /* API 3.1.6 connect() - UDP Style Syntax * * An application may use the connect() call in the UDP model to initiate an * association without sending data. * * The syntax is: * * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); * * sd: the socket descriptor to have a new association added to. * * nam: the address structure (either struct sockaddr_in or struct * sockaddr_in6 defined in RFC2553 [7]). * * len: the size of the address. */ static int sctp_connect(struct sock *sk, struct sockaddr *addr, int addr_len) { int err = 0; struct sctp_af *af; lock_sock(sk); pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, addr, addr_len); /* Validate addr_len before calling common connect/connectx routine. */ af = sctp_get_af_specific(addr->sa_family); if (!af || addr_len < af->sockaddr_len) { err = -EINVAL; } else { /* Pass correct addr len to common routine (so it knows there * is only one address being passed. */ err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); } release_sock(sk); return err; } /* FIXME: Write comments. */ static int sctp_disconnect(struct sock *sk, int flags) { return -EOPNOTSUPP; /* STUB */ } /* 4.1.4 accept() - TCP Style Syntax * * Applications use accept() call to remove an established SCTP * association from the accept queue of the endpoint. A new socket * descriptor will be returned from accept() to represent the newly * formed association. */ static struct sock *sctp_accept(struct sock *sk, int flags, int *err) { struct sctp_sock *sp; struct sctp_endpoint *ep; struct sock *newsk = NULL; struct sctp_association *asoc; long timeo; int error = 0; lock_sock(sk); sp = sctp_sk(sk); ep = sp->ep; if (!sctp_style(sk, TCP)) { error = -EOPNOTSUPP; goto out; } if (!sctp_sstate(sk, LISTENING)) { error = -EINVAL; goto out; } timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); error = sctp_wait_for_accept(sk, timeo); if (error) goto out; /* We treat the list of associations on the endpoint as the accept * queue and pick the first association on the list. */ asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); newsk = sp->pf->create_accept_sk(sk, asoc); if (!newsk) { error = -ENOMEM; goto out; } /* Populate the fields of the newsk from the oldsk and migrate the * asoc to the newsk. */ sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); out: release_sock(sk); *err = error; return newsk; } /* The SCTP ioctl handler. */ static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) { int rc = -ENOTCONN; lock_sock(sk); /* * SEQPACKET-style sockets in LISTENING state are valid, for * SCTP, so only discard TCP-style sockets in LISTENING state. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) goto out; switch (cmd) { case SIOCINQ: { struct sk_buff *skb; unsigned int amount = 0; skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) { /* * We will only return the amount of this packet since * that is all that will be read. */ amount = skb->len; } rc = put_user(amount, (int __user *)arg); break; } default: rc = -ENOIOCTLCMD; break; } out: release_sock(sk); return rc; } /* This is the function which gets called during socket creation to * initialized the SCTP-specific portion of the sock. * The sock structure should already be zero-filled memory. */ static int sctp_init_sock(struct sock *sk) { struct net *net = sock_net(sk); struct sctp_sock *sp; pr_debug("%s: sk:%p\n", __func__, sk); sp = sctp_sk(sk); /* Initialize the SCTP per socket area. */ switch (sk->sk_type) { case SOCK_SEQPACKET: sp->type = SCTP_SOCKET_UDP; break; case SOCK_STREAM: sp->type = SCTP_SOCKET_TCP; break; default: return -ESOCKTNOSUPPORT; } /* Initialize default send parameters. These parameters can be * modified with the SCTP_DEFAULT_SEND_PARAM socket option. */ sp->default_stream = 0; sp->default_ppid = 0; sp->default_flags = 0; sp->default_context = 0; sp->default_timetolive = 0; sp->default_rcv_context = 0; sp->max_burst = net->sctp.max_burst; sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; /* Initialize default setup parameters. These parameters * can be modified with the SCTP_INITMSG socket option or * overridden by the SCTP_INIT CMSG. */ sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; sp->initmsg.sinit_max_instreams = sctp_max_instreams; sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; /* Initialize default RTO related parameters. These parameters can * be modified for with the SCTP_RTOINFO socket option. */ sp->rtoinfo.srto_initial = net->sctp.rto_initial; sp->rtoinfo.srto_max = net->sctp.rto_max; sp->rtoinfo.srto_min = net->sctp.rto_min; /* Initialize default association related parameters. These parameters * can be modified with the SCTP_ASSOCINFO socket option. */ sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; sp->assocparams.sasoc_number_peer_destinations = 0; sp->assocparams.sasoc_peer_rwnd = 0; sp->assocparams.sasoc_local_rwnd = 0; sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; /* Initialize default event subscriptions. By default, all the * options are off. */ memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); /* Default Peer Address Parameters. These defaults can * be modified via SCTP_PEER_ADDR_PARAMS */ sp->hbinterval = net->sctp.hb_interval; sp->pathmaxrxt = net->sctp.max_retrans_path; sp->pathmtu = 0; /* allow default discovery */ sp->sackdelay = net->sctp.sack_timeout; sp->sackfreq = 2; sp->param_flags = SPP_HB_ENABLE | SPP_PMTUD_ENABLE | SPP_SACKDELAY_ENABLE; /* If enabled no SCTP message fragmentation will be performed. * Configure through SCTP_DISABLE_FRAGMENTS socket option. */ sp->disable_fragments = 0; /* Enable Nagle algorithm by default. */ sp->nodelay = 0; sp->recvrcvinfo = 0; sp->recvnxtinfo = 0; /* Enable by default. */ sp->v4mapped = 1; /* Auto-close idle associations after the configured * number of seconds. A value of 0 disables this * feature. Configure through the SCTP_AUTOCLOSE socket option, * for UDP-style sockets only. */ sp->autoclose = 0; /* User specified fragmentation limit. */ sp->user_frag = 0; sp->adaptation_ind = 0; sp->pf = sctp_get_pf_specific(sk->sk_family); /* Control variables for partial data delivery. */ atomic_set(&sp->pd_mode, 0); skb_queue_head_init(&sp->pd_lobby); sp->frag_interleave = 0; /* Create a per socket endpoint structure. Even if we * change the data structure relationships, this may still * be useful for storing pre-connect address information. */ sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); if (!sp->ep) return -ENOMEM; sp->hmac = NULL; sk->sk_destruct = sctp_destruct_sock; SCTP_DBG_OBJCNT_INC(sock); local_bh_disable(); percpu_counter_inc(&sctp_sockets_allocated); sock_prot_inuse_add(net, sk->sk_prot, 1); /* Nothing can fail after this block, otherwise * sctp_destroy_sock() will be called without addr_wq_lock held */ if (net->sctp.default_auto_asconf) { spin_lock(&sock_net(sk)->sctp.addr_wq_lock); list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist); sp->do_auto_asconf = 1; spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); } else { sp->do_auto_asconf = 0; } local_bh_enable(); return 0; } /* Cleanup any SCTP per socket resources. Must be called with * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true */ static void sctp_destroy_sock(struct sock *sk) { struct sctp_sock *sp; pr_debug("%s: sk:%p\n", __func__, sk); /* Release our hold on the endpoint. */ sp = sctp_sk(sk); /* This could happen during socket init, thus we bail out * early, since the rest of the below is not setup either. */ if (sp->ep == NULL) return; if (sp->do_auto_asconf) { sp->do_auto_asconf = 0; list_del(&sp->auto_asconf_list); } sctp_endpoint_free(sp->ep); local_bh_disable(); percpu_counter_dec(&sctp_sockets_allocated); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); local_bh_enable(); } /* Triggered when there are no references on the socket anymore */ static void sctp_destruct_sock(struct sock *sk) { struct sctp_sock *sp = sctp_sk(sk); /* Free up the HMAC transform. */ crypto_free_hash(sp->hmac); inet_sock_destruct(sk); } /* API 4.1.7 shutdown() - TCP Style Syntax * int shutdown(int socket, int how); * * sd - the socket descriptor of the association to be closed. * how - Specifies the type of shutdown. The values are * as follows: * SHUT_RD * Disables further receive operations. No SCTP * protocol action is taken. * SHUT_WR * Disables further send operations, and initiates * the SCTP shutdown sequence. * SHUT_RDWR * Disables further send and receive operations * and initiates the SCTP shutdown sequence. */ static void sctp_shutdown(struct sock *sk, int how) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; struct sctp_association *asoc; if (!sctp_style(sk, TCP)) return; if (how & SEND_SHUTDOWN) { ep = sctp_sk(sk)->ep; if (!list_empty(&ep->asocs)) { asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); sctp_primitive_SHUTDOWN(net, asoc, NULL); } } } /* 7.2.1 Association Status (SCTP_STATUS) * Applications can retrieve current status information about an * association, including association state, peer receiver window size, * number of unacked data chunks, and number of data chunks pending * receipt. This information is read-only. */ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_status status; struct sctp_association *asoc = NULL; struct sctp_transport *transport; sctp_assoc_t associd; int retval = 0; if (len < sizeof(status)) { retval = -EINVAL; goto out; } len = sizeof(status); if (copy_from_user(&status, optval, len)) { retval = -EFAULT; goto out; } associd = status.sstat_assoc_id; asoc = sctp_id2assoc(sk, associd); if (!asoc) { retval = -EINVAL; goto out; } transport = asoc->peer.primary_path; status.sstat_assoc_id = sctp_assoc2id(asoc); status.sstat_state = sctp_assoc_to_state(asoc); status.sstat_rwnd = asoc->peer.rwnd; status.sstat_unackdata = asoc->unack_data; status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); status.sstat_instrms = asoc->c.sinit_max_instreams; status.sstat_outstrms = asoc->c.sinit_num_ostreams; status.sstat_fragmentation_point = asoc->frag_point; status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, transport->af_specific->sockaddr_len); /* Map ipv4 address into v4-mapped-on-v6 address. */ sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), (union sctp_addr *)&status.sstat_primary.spinfo_address); status.sstat_primary.spinfo_state = transport->state; status.sstat_primary.spinfo_cwnd = transport->cwnd; status.sstat_primary.spinfo_srtt = transport->srtt; status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); status.sstat_primary.spinfo_mtu = transport->pathmtu; if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) status.sstat_primary.spinfo_state = SCTP_ACTIVE; if (put_user(len, optlen)) { retval = -EFAULT; goto out; } pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", __func__, len, status.sstat_state, status.sstat_rwnd, status.sstat_assoc_id); if (copy_to_user(optval, &status, len)) { retval = -EFAULT; goto out; } out: return retval; } /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) * * Applications can retrieve information about a specific peer address * of an association, including its reachability state, congestion * window, and retransmission timer values. This information is * read-only. */ static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_paddrinfo pinfo; struct sctp_transport *transport; int retval = 0; if (len < sizeof(pinfo)) { retval = -EINVAL; goto out; } len = sizeof(pinfo); if (copy_from_user(&pinfo, optval, len)) { retval = -EFAULT; goto out; } transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, pinfo.spinfo_assoc_id); if (!transport) return -EINVAL; pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); pinfo.spinfo_state = transport->state; pinfo.spinfo_cwnd = transport->cwnd; pinfo.spinfo_srtt = transport->srtt; pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); pinfo.spinfo_mtu = transport->pathmtu; if (pinfo.spinfo_state == SCTP_UNKNOWN) pinfo.spinfo_state = SCTP_ACTIVE; if (put_user(len, optlen)) { retval = -EFAULT; goto out; } if (copy_to_user(optval, &pinfo, len)) { retval = -EFAULT; goto out; } out: return retval; } /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) * * This option is a on/off flag. If enabled no SCTP message * fragmentation will be performed. Instead if a message being sent * exceeds the current PMTU size, the message will NOT be sent and * instead a error will be indicated to the user. */ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = (sctp_sk(sk)->disable_fragments == 1); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) * * This socket option is used to specify various notifications and * ancillary data the user wishes to receive. */ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, int __user *optlen) { if (len <= 0) return -EINVAL; if (len > sizeof(struct sctp_event_subscribe)) len = sizeof(struct sctp_event_subscribe); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) return -EFAULT; return 0; } /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) * * This socket option is applicable to the UDP-style socket only. When * set it will cause associations that are idle for more than the * specified number of seconds to automatically close. An association * being idle is defined an association that has NOT sent or received * user data. The special value of '0' indicates that no automatic * close of any associations should be performed. The option expects an * integer defining the number of seconds of idle time before an * association is closed. */ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) { /* Applicable to UDP-style socket only */ if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) return -EFAULT; return 0; } /* Helper routine to branch off an association to a new socket. */ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) { struct sctp_association *asoc = sctp_id2assoc(sk, id); struct sctp_sock *sp = sctp_sk(sk); struct socket *sock; int err = 0; if (!asoc) return -EINVAL; /* An association cannot be branched off from an already peeled-off * socket, nor is this supported for tcp style sockets. */ if (!sctp_style(sk, UDP)) return -EINVAL; /* Create a new socket. */ err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); if (err < 0) return err; sctp_copy_sock(sock->sk, sk, asoc); /* Make peeled-off sockets more like 1-1 accepted sockets. * Set the daddr and initialize id to something more random */ sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); /* Populate the fields of the newsk from the oldsk and migrate the * asoc to the newsk. */ sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); *sockp = sock; return err; } EXPORT_SYMBOL(sctp_do_peeloff); static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) { sctp_peeloff_arg_t peeloff; struct socket *newsock; struct file *newfile; int retval = 0; if (len < sizeof(sctp_peeloff_arg_t)) return -EINVAL; len = sizeof(sctp_peeloff_arg_t); if (copy_from_user(&peeloff, optval, len)) return -EFAULT; retval = sctp_do_peeloff(sk, peeloff.associd, &newsock); if (retval < 0) goto out; /* Map the socket to an unused fd that can be returned to the user. */ retval = get_unused_fd_flags(0); if (retval < 0) { sock_release(newsock); goto out; } newfile = sock_alloc_file(newsock, 0, NULL); if (unlikely(IS_ERR(newfile))) { put_unused_fd(retval); sock_release(newsock); return PTR_ERR(newfile); } pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, retval); /* Return the fd mapped to the new socket. */ if (put_user(len, optlen)) { fput(newfile); put_unused_fd(retval); return -EFAULT; } peeloff.sd = retval; if (copy_to_user(optval, &peeloff, len)) { fput(newfile); put_unused_fd(retval); return -EFAULT; } fd_install(retval, newfile); out: return retval; } /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) * * Applications can enable or disable heartbeats for any peer address of * an association, modify an address's heartbeat interval, force a * heartbeat to be sent immediately, and adjust the address's maximum * number of retransmissions sent before an address is considered * unreachable. The following structure is used to access and modify an * address's parameters: * * struct sctp_paddrparams { * sctp_assoc_t spp_assoc_id; * struct sockaddr_storage spp_address; * uint32_t spp_hbinterval; * uint16_t spp_pathmaxrxt; * uint32_t spp_pathmtu; * uint32_t spp_sackdelay; * uint32_t spp_flags; * }; * * spp_assoc_id - (one-to-many style socket) This is filled in the * application, and identifies the association for * this query. * spp_address - This specifies which address is of interest. * spp_hbinterval - This contains the value of the heartbeat interval, * in milliseconds. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmaxrxt - This contains the maximum number of * retransmissions before this address shall be * considered unreachable. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmtu - When Path MTU discovery is disabled the value * specified here will be the "fixed" path mtu. * Note that if the spp_address field is empty * then all associations on this address will * have this fixed path mtu set upon them. * * spp_sackdelay - When delayed sack is enabled, this value specifies * the number of milliseconds that sacks will be delayed * for. This value will apply to all addresses of an * association if the spp_address field is empty. Note * also, that if delayed sack is enabled and this * value is set to 0, no change is made to the last * recorded delayed sack timer value. * * spp_flags - These flags are used to control various features * on an association. The flag field may contain * zero or more of the following options. * * SPP_HB_ENABLE - Enable heartbeats on the * specified address. Note that if the address * field is empty all addresses for the association * have heartbeats enabled upon them. * * SPP_HB_DISABLE - Disable heartbeats on the * speicifed address. Note that if the address * field is empty all addresses for the association * will have their heartbeats disabled. Note also * that SPP_HB_ENABLE and SPP_HB_DISABLE are * mutually exclusive, only one of these two should * be specified. Enabling both fields will have * undetermined results. * * SPP_HB_DEMAND - Request a user initiated heartbeat * to be made immediately. * * SPP_PMTUD_ENABLE - This field will enable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. * * SPP_PMTUD_DISABLE - This field will disable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. Not also that * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually * exclusive. Enabling both will have undetermined * results. * * SPP_SACKDELAY_ENABLE - Setting this flag turns * on delayed sack. The time specified in spp_sackdelay * is used to specify the sack delay for this address. Note * that if spp_address is empty then all addresses will * enable delayed sack and take on the sack delay * value specified in spp_sackdelay. * SPP_SACKDELAY_DISABLE - Setting this flag turns * off delayed sack. If the spp_address field is blank then * delayed sack is disabled for the entire association. Note * also that this field is mutually exclusive to * SPP_SACKDELAY_ENABLE, setting both will have undefined * results. */ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_paddrparams params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(struct sctp_paddrparams)) return -EINVAL; len = sizeof(struct sctp_paddrparams); if (copy_from_user(&params, optval, len)) return -EFAULT; /* If an address other than INADDR_ANY is specified, and * no transport is found, then the request is invalid. */ if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) { trans = sctp_addr_id2transport(sk, &params.spp_address, params.spp_assoc_id); if (!trans) { pr_debug("%s: failed no transport\n", __func__); return -EINVAL; } } /* Get association, if assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.spp_assoc_id); if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { pr_debug("%s: failed no association\n", __func__); return -EINVAL; } if (trans) { /* Fetch transport values. */ params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); params.spp_pathmtu = trans->pathmtu; params.spp_pathmaxrxt = trans->pathmaxrxt; params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = trans->param_flags; } else if (asoc) { /* Fetch association values. */ params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); params.spp_pathmtu = asoc->pathmtu; params.spp_pathmaxrxt = asoc->pathmaxrxt; params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = asoc->param_flags; } else { /* Fetch socket values. */ params.spp_hbinterval = sp->hbinterval; params.spp_pathmtu = sp->pathmtu; params.spp_sackdelay = sp->sackdelay; params.spp_pathmaxrxt = sp->pathmaxrxt; /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = sp->param_flags; } if (copy_to_user(optval, &params, len)) return -EFAULT; if (put_user(len, optlen)) return -EFAULT; return 0; } /* * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) * * This option will effect the way delayed acks are performed. This * option allows you to get or set the delayed ack time, in * milliseconds. It also allows changing the delayed ack frequency. * Changing the frequency to 1 disables the delayed sack algorithm. If * the assoc_id is 0, then this sets or gets the endpoints default * values. If the assoc_id field is non-zero, then the set or get * effects the specified association for the one to many model (the * assoc_id field is ignored by the one to one model). Note that if * sack_delay or sack_freq are 0 when setting this option, then the * current values will remain unchanged. * * struct sctp_sack_info { * sctp_assoc_t sack_assoc_id; * uint32_t sack_delay; * uint32_t sack_freq; * }; * * sack_assoc_id - This parameter, indicates which association the user * is performing an action upon. Note that if this field's value is * zero then the endpoints default value is changed (effecting future * associations only). * * sack_delay - This parameter contains the number of milliseconds that * the user is requesting the delayed ACK timer be set to. Note that * this value is defined in the standard to be between 200 and 500 * milliseconds. * * sack_freq - This parameter contains the number of packets that must * be received before a sack is sent without waiting for the delay * timer to expire. The default value for this is 2, setting this * value to 1 will disable the delayed sack algorithm. */ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sack_info params; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (len >= sizeof(struct sctp_sack_info)) { len = sizeof(struct sctp_sack_info); if (copy_from_user(&params, optval, len)) return -EFAULT; } else if (len == sizeof(struct sctp_assoc_value)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of struct sctp_assoc_value in delayed_ack socket option.\n" "Use struct sctp_sack_info instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&params, optval, len)) return -EFAULT; } else return -EINVAL; /* Get association, if sack_assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.sack_assoc_id); if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { /* Fetch association values. */ if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { params.sack_delay = jiffies_to_msecs( asoc->sackdelay); params.sack_freq = asoc->sackfreq; } else { params.sack_delay = 0; params.sack_freq = 1; } } else { /* Fetch socket values. */ if (sp->param_flags & SPP_SACKDELAY_ENABLE) { params.sack_delay = sp->sackdelay; params.sack_freq = sp->sackfreq; } else { params.sack_delay = 0; params.sack_freq = 1; } } if (copy_to_user(optval, &params, len)) return -EFAULT; if (put_user(len, optlen)) return -EFAULT; return 0; } /* 7.1.3 Initialization Parameters (SCTP_INITMSG) * * Applications can specify protocol parameters for the default association * initialization. The option name argument to setsockopt() and getsockopt() * is SCTP_INITMSG. * * Setting initialization parameters is effective only on an unconnected * socket (for UDP-style sockets only future associations are effected * by the change). With TCP-style sockets, this option is inherited by * sockets derived from a listener socket. */ static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) { if (len < sizeof(struct sctp_initmsg)) return -EINVAL; len = sizeof(struct sctp_initmsg); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) return -EFAULT; return 0; } static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_association *asoc; int cnt = 0; struct sctp_getaddrs getaddrs; struct sctp_transport *from; void __user *to; union sctp_addr temp; struct sctp_sock *sp = sctp_sk(sk); int addrlen; size_t space_left; int bytes_copied; if (len < sizeof(struct sctp_getaddrs)) return -EINVAL; if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) return -EFAULT; /* For UDP-style sockets, id specifies the association to query. */ asoc = sctp_id2assoc(sk, getaddrs.assoc_id); if (!asoc) return -EINVAL; to = optval + offsetof(struct sctp_getaddrs, addrs); space_left = len - offsetof(struct sctp_getaddrs, addrs); list_for_each_entry(from, &asoc->peer.transport_addr_list, transports) { memcpy(&temp, &from->ipaddr, sizeof(temp)); addrlen = sctp_get_pf_specific(sk->sk_family) ->addr_to_user(sp, &temp); if (space_left < addrlen) return -ENOMEM; if (copy_to_user(to, &temp, addrlen)) return -EFAULT; to += addrlen; cnt++; space_left -= addrlen; } if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) return -EFAULT; bytes_copied = ((char __user *)to) - optval; if (put_user(bytes_copied, optlen)) return -EFAULT; return 0; } static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, size_t space_left, int *bytes_copied) { struct sctp_sockaddr_entry *addr; union sctp_addr temp; int cnt = 0; int addrlen; struct net *net = sock_net(sk); rcu_read_lock(); list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { if (!addr->valid) continue; if ((PF_INET == sk->sk_family) && (AF_INET6 == addr->a.sa.sa_family)) continue; if ((PF_INET6 == sk->sk_family) && inet_v6_ipv6only(sk) && (AF_INET == addr->a.sa.sa_family)) continue; memcpy(&temp, &addr->a, sizeof(temp)); if (!temp.v4.sin_port) temp.v4.sin_port = htons(port); addrlen = sctp_get_pf_specific(sk->sk_family) ->addr_to_user(sctp_sk(sk), &temp); if (space_left < addrlen) { cnt = -ENOMEM; break; } memcpy(to, &temp, addrlen); to += addrlen; cnt++; space_left -= addrlen; *bytes_copied += addrlen; } rcu_read_unlock(); return cnt; } static int sctp_getsockopt_local_addrs(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_bind_addr *bp; struct sctp_association *asoc; int cnt = 0; struct sctp_getaddrs getaddrs; struct sctp_sockaddr_entry *addr; void __user *to; union sctp_addr temp; struct sctp_sock *sp = sctp_sk(sk); int addrlen; int err = 0; size_t space_left; int bytes_copied = 0; void *addrs; void *buf; if (len < sizeof(struct sctp_getaddrs)) return -EINVAL; if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) return -EFAULT; /* * For UDP-style sockets, id specifies the association to query. * If the id field is set to the value '0' then the locally bound * addresses are returned without regard to any particular * association. */ if (0 == getaddrs.assoc_id) { bp = &sctp_sk(sk)->ep->base.bind_addr; } else { asoc = sctp_id2assoc(sk, getaddrs.assoc_id); if (!asoc) return -EINVAL; bp = &asoc->base.bind_addr; } to = optval + offsetof(struct sctp_getaddrs, addrs); space_left = len - offsetof(struct sctp_getaddrs, addrs); addrs = kmalloc(space_left, GFP_KERNEL); if (!addrs) return -ENOMEM; /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid * addresses from the global local address list. */ if (sctp_list_single_entry(&bp->address_list)) { addr = list_entry(bp->address_list.next, struct sctp_sockaddr_entry, list); if (sctp_is_any(sk, &addr->a)) { cnt = sctp_copy_laddrs(sk, bp->port, addrs, space_left, &bytes_copied); if (cnt < 0) { err = cnt; goto out; } goto copy_getaddrs; } } buf = addrs; /* Protection on the bound address list is not needed since * in the socket option context we hold a socket lock and * thus the bound address list can't change. */ list_for_each_entry(addr, &bp->address_list, list) { memcpy(&temp, &addr->a, sizeof(temp)); addrlen = sctp_get_pf_specific(sk->sk_family) ->addr_to_user(sp, &temp); if (space_left < addrlen) { err = -ENOMEM; /*fixme: right error?*/ goto out; } memcpy(buf, &temp, addrlen); buf += addrlen; bytes_copied += addrlen; cnt++; space_left -= addrlen; } copy_getaddrs: if (copy_to_user(to, addrs, bytes_copied)) { err = -EFAULT; goto out; } if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { err = -EFAULT; goto out; } if (put_user(bytes_copied, optlen)) err = -EFAULT; out: kfree(addrs); return err; } /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) * * Requests that the local SCTP stack use the enclosed peer address as * the association primary. The enclosed address must be one of the * association peer's addresses. */ static int sctp_getsockopt_primary_addr(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_prim prim; struct sctp_association *asoc; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(struct sctp_prim)) return -EINVAL; len = sizeof(struct sctp_prim); if (copy_from_user(&prim, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); if (!asoc) return -EINVAL; if (!asoc->peer.primary_path) return -ENOTCONN; memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, asoc->peer.primary_path->af_specific->sockaddr_len); sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp, (union sctp_addr *)&prim.ssp_addr); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &prim, len)) return -EFAULT; return 0; } /* * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) * * Requests that the local endpoint set the specified Adaptation Layer * Indication parameter for all future INIT and INIT-ACK exchanges. */ static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_setadaptation adaptation; if (len < sizeof(struct sctp_setadaptation)) return -EINVAL; len = sizeof(struct sctp_setadaptation); adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &adaptation, len)) return -EFAULT; return 0; } /* * * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) * * Applications that wish to use the sendto() system call may wish to * specify a default set of parameters that would normally be supplied * through the inclusion of ancillary data. This socket option allows * such an application to set the default sctp_sndrcvinfo structure. * The application that wishes to use this socket option simply passes * in to this call the sctp_sndrcvinfo structure defined in Section * 5.2.2) The input parameters accepted by this call include * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, * sinfo_timetolive. The user must provide the sinfo_assoc_id field in * to this call if the caller is using the UDP model. * * For getsockopt, it get the default sctp_sndrcvinfo structure. */ static int sctp_getsockopt_default_send_param(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndrcvinfo info; if (len < sizeof(info)) return -EINVAL; len = sizeof(info); if (copy_from_user(&info, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { info.sinfo_stream = asoc->default_stream; info.sinfo_flags = asoc->default_flags; info.sinfo_ppid = asoc->default_ppid; info.sinfo_context = asoc->default_context; info.sinfo_timetolive = asoc->default_timetolive; } else { info.sinfo_stream = sp->default_stream; info.sinfo_flags = sp->default_flags; info.sinfo_ppid = sp->default_ppid; info.sinfo_context = sp->default_context; info.sinfo_timetolive = sp->default_timetolive; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &info, len)) return -EFAULT; return 0; } /* RFC6458, Section 8.1.31. Set/get Default Send Parameters * (SCTP_DEFAULT_SNDINFO) */ static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndinfo info; if (len < sizeof(info)) return -EINVAL; len = sizeof(info); if (copy_from_user(&info, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, info.snd_assoc_id); if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { info.snd_sid = asoc->default_stream; info.snd_flags = asoc->default_flags; info.snd_ppid = asoc->default_ppid; info.snd_context = asoc->default_context; } else { info.snd_sid = sp->default_stream; info.snd_flags = sp->default_flags; info.snd_ppid = sp->default_ppid; info.snd_context = sp->default_context; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &info, len)) return -EFAULT; return 0; } /* * * 7.1.5 SCTP_NODELAY * * Turn on/off any Nagle-like algorithm. This means that packets are * generally sent as soon as possible and no unnecessary delays are * introduced, at the cost of more packets in the network. Expects an * integer boolean flag. */ static int sctp_getsockopt_nodelay(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = (sctp_sk(sk)->nodelay == 1); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * * 7.1.1 SCTP_RTOINFO * * The protocol parameters used to initialize and bound retransmission * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access * and modify these parameters. * All parameters are time values, in milliseconds. A value of 0, when * modifying the parameters, indicates that the current value should not * be changed. * */ static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_rtoinfo rtoinfo; struct sctp_association *asoc; if (len < sizeof (struct sctp_rtoinfo)) return -EINVAL; len = sizeof(struct sctp_rtoinfo); if (copy_from_user(&rtoinfo, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Values corresponding to the specific association. */ if (asoc) { rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); } else { /* Values corresponding to the endpoint. */ struct sctp_sock *sp = sctp_sk(sk); rtoinfo.srto_initial = sp->rtoinfo.srto_initial; rtoinfo.srto_max = sp->rtoinfo.srto_max; rtoinfo.srto_min = sp->rtoinfo.srto_min; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &rtoinfo, len)) return -EFAULT; return 0; } /* * * 7.1.2 SCTP_ASSOCINFO * * This option is used to tune the maximum retransmission attempts * of the association. * Returns an error if the new association retransmission value is * greater than the sum of the retransmission value of the peer. * See [SCTP] for more information. * */ static int sctp_getsockopt_associnfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assocparams assocparams; struct sctp_association *asoc; struct list_head *pos; int cnt = 0; if (len < sizeof (struct sctp_assocparams)) return -EINVAL; len = sizeof(struct sctp_assocparams); if (copy_from_user(&assocparams, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Values correspoinding to the specific association */ if (asoc) { assocparams.sasoc_asocmaxrxt = asoc->max_retrans; assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; assocparams.sasoc_local_rwnd = asoc->a_rwnd; assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); list_for_each(pos, &asoc->peer.transport_addr_list) { cnt++; } assocparams.sasoc_number_peer_destinations = cnt; } else { /* Values corresponding to the endpoint */ struct sctp_sock *sp = sctp_sk(sk); assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; assocparams.sasoc_cookie_life = sp->assocparams.sasoc_cookie_life; assocparams.sasoc_number_peer_destinations = sp->assocparams. sasoc_number_peer_destinations; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &assocparams, len)) return -EFAULT; return 0; } /* * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) * * This socket option is a boolean flag which turns on or off mapped V4 * addresses. If this option is turned on and the socket is type * PF_INET6, then IPv4 addresses will be mapped to V6 representation. * If this option is turned off, then no mapping will be done of V4 * addresses and a user will receive both PF_INET6 and PF_INET type * addresses on the socket. */ static int sctp_getsockopt_mappedv4(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = sp->v4mapped; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.29. Set or Get the default context (SCTP_CONTEXT) * (chapter and verse is quoted at sctp_setsockopt_context()) */ static int sctp_getsockopt_context(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (len < sizeof(struct sctp_assoc_value)) return -EINVAL; len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, len)) return -EFAULT; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; params.assoc_value = asoc->default_rcv_context; } else { params.assoc_value = sp->default_rcv_context; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &params, len)) return -EFAULT; return 0; } /* * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) * This option will get or set the maximum size to put in any outgoing * SCTP DATA chunk. If a message is larger than this size it will be * fragmented by SCTP into the specified size. Note that the underlying * SCTP implementation may fragment into smaller sized chunks when the * PMTU of the underlying association is smaller than the value set by * the user. The default value for this option is '0' which indicates * the user is NOT limiting fragmentation and only the PMTU will effect * SCTP's choice of DATA chunk size. Note also that values set larger * than the maximum size of an IP datagram will effectively let SCTP * control fragmentation (i.e. the same as setting this option to 0). * * The following structure is used to access and modify this parameter: * * struct sctp_assoc_value { * sctp_assoc_t assoc_id; * uint32_t assoc_value; * }; * * assoc_id: This parameter is ignored for one-to-one style sockets. * For one-to-many style sockets this parameter indicates which * association the user is performing an action upon. Note that if * this field's value is zero then the endpoints default value is * changed (effecting future associations only). * assoc_value: This parameter specifies the maximum size in bytes. */ static int sctp_getsockopt_maxseg(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; if (len == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in maxseg socket option.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); params.assoc_id = 0; } else if (len >= sizeof(struct sctp_assoc_value)) { len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, sizeof(params))) return -EFAULT; } else return -EINVAL; asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc && params.assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) params.assoc_value = asoc->frag_point; else params.assoc_value = sctp_sk(sk)->user_frag; if (put_user(len, optlen)) return -EFAULT; if (len == sizeof(int)) { if (copy_to_user(optval, &params.assoc_value, len)) return -EFAULT; } else { if (copy_to_user(optval, &params, len)) return -EFAULT; } return 0; } /* * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) */ static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = sctp_sk(sk)->frag_interleave; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.25. Set or Get the sctp partial delivery point * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) */ static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, char __user *optval, int __user *optlen) { u32 val; if (len < sizeof(u32)) return -EINVAL; len = sizeof(u32); val = sctp_sk(sk)->pd_point; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) * (chapter and verse is quoted at sctp_setsockopt_maxburst()) */ static int sctp_getsockopt_maxburst(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (len == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in max_burst socket option.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); params.assoc_id = 0; } else if (len >= sizeof(struct sctp_assoc_value)) { len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, len)) return -EFAULT; } else return -EINVAL; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; params.assoc_value = asoc->max_burst; } else params.assoc_value = sp->max_burst; if (len == sizeof(int)) { if (copy_to_user(optval, &params.assoc_value, len)) return -EFAULT; } else { if (copy_to_user(optval, &params, len)) return -EFAULT; } return 0; } static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_hmacalgo __user *p = (void __user *)optval; struct sctp_hmac_algo_param *hmacs; __u16 data_len = 0; u32 num_idents; if (!ep->auth_enable) return -EACCES; hmacs = ep->auth_hmacs_list; data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < sizeof(struct sctp_hmacalgo) + data_len) return -EINVAL; len = sizeof(struct sctp_hmacalgo) + data_len; num_idents = data_len / sizeof(u16); if (put_user(len, optlen)) return -EFAULT; if (put_user(num_idents, &p->shmac_num_idents)) return -EFAULT; if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) return -EFAULT; return 0; } static int sctp_getsockopt_active_key(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) val.scact_keynumber = asoc->active_key_id; else val.scact_keynumber = ep->active_key_id; len = sizeof(struct sctp_authkeyid); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunks __user *p = (void __user *)optval; struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; u32 num_chunks = 0; char __user *to; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authchunks)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; asoc = sctp_id2assoc(sk, val.gauth_assoc_id); if (!asoc) return -EINVAL; ch = asoc->peer.peer_chunks; if (!ch) goto num; /* See if the user provided enough room for all the data */ num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < num_chunks) return -EINVAL; if (copy_to_user(to, ch->chunks, num_chunks)) return -EFAULT; num: len = sizeof(struct sctp_authchunks) + num_chunks; if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; return 0; } static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunks __user *p = (void __user *)optval; struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; u32 num_chunks = 0; char __user *to; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authchunks)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; asoc = sctp_id2assoc(sk, val.gauth_assoc_id); if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; else ch = ep->auth_chunk_list; if (!ch) goto num; num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < sizeof(struct sctp_authchunks) + num_chunks) return -EINVAL; if (copy_to_user(to, ch->chunks, num_chunks)) return -EFAULT; num: len = sizeof(struct sctp_authchunks) + num_chunks; if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; return 0; } /* * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) * This option gets the current number of associations that are attached * to a one-to-many style socket. The option value is an uint32_t. */ static int sctp_getsockopt_assoc_number(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; u32 val = 0; if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(u32)) return -EINVAL; len = sizeof(u32); list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { val++; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 8.1.23 SCTP_AUTO_ASCONF * See the corresponding setsockopt entry as description */ static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val = 0; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) val = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 8.2.6. Get the Current Identifiers of Associations * (SCTP_GET_ASSOC_ID_LIST) * * This option gets the current list of SCTP association identifiers of * the SCTP associations handled by a one-to-many style socket. */ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_assoc_ids *ids; u32 num = 0; if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(struct sctp_assoc_ids)) return -EINVAL; list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { num++; } if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) return -EINVAL; len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; ids = kmalloc(len, GFP_KERNEL); if (unlikely(!ids)) return -ENOMEM; ids->gaids_number_of_ids = num; num = 0; list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { ids->gaids_assoc_id[num++] = asoc->assoc_id; } if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { kfree(ids); return -EFAULT; } kfree(ids); return 0; } /* * SCTP_PEER_ADDR_THLDS * * This option allows us to fetch the partially failed threshold for one or all * transports in an association. See Section 6.1 of: * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt */ static int sctp_getsockopt_paddr_thresholds(struct sock *sk, char __user *optval, int len, int __user *optlen) { struct sctp_paddrthlds val; struct sctp_transport *trans; struct sctp_association *asoc; if (len < sizeof(struct sctp_paddrthlds)) return -EINVAL; len = sizeof(struct sctp_paddrthlds); if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) return -EFAULT; if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { asoc = sctp_id2assoc(sk, val.spt_assoc_id); if (!asoc) return -ENOENT; val.spt_pathpfthld = asoc->pf_retrans; val.spt_pathmaxrxt = asoc->pathmaxrxt; } else { trans = sctp_addr_id2transport(sk, &val.spt_address, val.spt_assoc_id); if (!trans) return -ENOENT; val.spt_pathmaxrxt = trans->pathmaxrxt; val.spt_pathpfthld = trans->pf_retrans; } if (put_user(len, optlen) || copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * SCTP_GET_ASSOC_STATS * * This option retrieves local per endpoint statistics. It is modeled * after OpenSolaris' implementation */ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_stats sas; struct sctp_association *asoc = NULL; /* User must provide at least the assoc id */ if (len < sizeof(sctp_assoc_t)) return -EINVAL; /* Allow the struct to grow and fill in as much as possible */ len = min_t(size_t, len, sizeof(sas)); if (copy_from_user(&sas, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, sas.sas_assoc_id); if (!asoc) return -EINVAL; sas.sas_rtxchunks = asoc->stats.rtxchunks; sas.sas_gapcnt = asoc->stats.gapcnt; sas.sas_outofseqtsns = asoc->stats.outofseqtsns; sas.sas_osacks = asoc->stats.osacks; sas.sas_isacks = asoc->stats.isacks; sas.sas_octrlchunks = asoc->stats.octrlchunks; sas.sas_ictrlchunks = asoc->stats.ictrlchunks; sas.sas_oodchunks = asoc->stats.oodchunks; sas.sas_iodchunks = asoc->stats.iodchunks; sas.sas_ouodchunks = asoc->stats.ouodchunks; sas.sas_iuodchunks = asoc->stats.iuodchunks; sas.sas_idupchunks = asoc->stats.idupchunks; sas.sas_opackets = asoc->stats.opackets; sas.sas_ipackets = asoc->stats.ipackets; /* New high max rto observed, will return 0 if not a single * RTO update took place. obs_rto_ipaddr will be bogus * in such a case */ sas.sas_maxrto = asoc->stats.max_obs_rto; memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, sizeof(struct sockaddr_storage)); /* Mark beginning of a new observation period */ asoc->stats.max_obs_rto = asoc->rto_min; if (put_user(len, optlen)) return -EFAULT; pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); if (copy_to_user(optval, &sas, len)) return -EFAULT; return 0; } static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val = 0; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (sctp_sk(sk)->recvrcvinfo) val = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val = 0; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (sctp_sk(sk)->recvnxtinfo) val = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { int retval = 0; int len; pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); /* I can hardly begin to describe how wrong this is. This is * so broken as to be worse than useless. The API draft * REALLY is NOT helpful here... I am not convinced that the * semantics of getsockopt() with a level OTHER THAN SOL_SCTP * are at all well-founded. */ if (level != SOL_SCTP) { struct sctp_af *af = sctp_sk(sk)->pf->af; retval = af->getsockopt(sk, level, optname, optval, optlen); return retval; } if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case SCTP_STATUS: retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); break; case SCTP_DISABLE_FRAGMENTS: retval = sctp_getsockopt_disable_fragments(sk, len, optval, optlen); break; case SCTP_EVENTS: retval = sctp_getsockopt_events(sk, len, optval, optlen); break; case SCTP_AUTOCLOSE: retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); break; case SCTP_SOCKOPT_PEELOFF: retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); break; case SCTP_PEER_ADDR_PARAMS: retval = sctp_getsockopt_peer_addr_params(sk, len, optval, optlen); break; case SCTP_DELAYED_SACK: retval = sctp_getsockopt_delayed_ack(sk, len, optval, optlen); break; case SCTP_INITMSG: retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); break; case SCTP_GET_PEER_ADDRS: retval = sctp_getsockopt_peer_addrs(sk, len, optval, optlen); break; case SCTP_GET_LOCAL_ADDRS: retval = sctp_getsockopt_local_addrs(sk, len, optval, optlen); break; case SCTP_SOCKOPT_CONNECTX3: retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); break; case SCTP_DEFAULT_SEND_PARAM: retval = sctp_getsockopt_default_send_param(sk, len, optval, optlen); break; case SCTP_DEFAULT_SNDINFO: retval = sctp_getsockopt_default_sndinfo(sk, len, optval, optlen); break; case SCTP_PRIMARY_ADDR: retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); break; case SCTP_NODELAY: retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); break; case SCTP_RTOINFO: retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); break; case SCTP_ASSOCINFO: retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); break; case SCTP_I_WANT_MAPPED_V4_ADDR: retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); break; case SCTP_MAXSEG: retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); break; case SCTP_GET_PEER_ADDR_INFO: retval = sctp_getsockopt_peer_addr_info(sk, len, optval, optlen); break; case SCTP_ADAPTATION_LAYER: retval = sctp_getsockopt_adaptation_layer(sk, len, optval, optlen); break; case SCTP_CONTEXT: retval = sctp_getsockopt_context(sk, len, optval, optlen); break; case SCTP_FRAGMENT_INTERLEAVE: retval = sctp_getsockopt_fragment_interleave(sk, len, optval, optlen); break; case SCTP_PARTIAL_DELIVERY_POINT: retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, optlen); break; case SCTP_MAX_BURST: retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); break; case SCTP_AUTH_KEY: case SCTP_AUTH_CHUNK: case SCTP_AUTH_DELETE_KEY: retval = -EOPNOTSUPP; break; case SCTP_HMAC_IDENT: retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); break; case SCTP_AUTH_ACTIVE_KEY: retval = sctp_getsockopt_active_key(sk, len, optval, optlen); break; case SCTP_PEER_AUTH_CHUNKS: retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, optlen); break; case SCTP_LOCAL_AUTH_CHUNKS: retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, optlen); break; case SCTP_GET_ASSOC_NUMBER: retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); break; case SCTP_GET_ASSOC_ID_LIST: retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); break; case SCTP_AUTO_ASCONF: retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); break; case SCTP_PEER_ADDR_THLDS: retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); break; case SCTP_GET_ASSOC_STATS: retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); break; case SCTP_RECVRCVINFO: retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen); break; case SCTP_RECVNXTINFO: retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen); break; default: retval = -ENOPROTOOPT; break; } release_sock(sk); return retval; } static void sctp_hash(struct sock *sk) { /* STUB */ } static void sctp_unhash(struct sock *sk) { /* STUB */ } /* Check if port is acceptable. Possibly find first available port. * * The port hash table (contained in the 'global' SCTP protocol storage * returned by struct sctp_protocol *sctp_get_protocol()). The hash * table is an array of 4096 lists (sctp_bind_hashbucket). Each * list (the list number is the port number hashed out, so as you * would expect from a hash function, all the ports in a given list have * such a number that hashes out to the same list number; you were * expecting that, right?); so each list has a set of ports, with a * link to the socket (struct sock) that uses it, the port number and * a fastreuse flag (FIXME: NPI ipg). */ static struct sctp_bind_bucket *sctp_bucket_create( struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) { struct sctp_bind_hashbucket *head; /* hash list */ struct sctp_bind_bucket *pp; unsigned short snum; int ret; snum = ntohs(addr->v4.sin_port); pr_debug("%s: begins, snum:%d\n", __func__, snum); local_bh_disable(); if (snum == 0) { /* Search for an available port. */ int low, high, remaining, index; unsigned int rover; struct net *net = sock_net(sk); inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rover = prandom_u32() % remaining + low; do { rover++; if ((rover < low) || (rover > high)) rover = low; if (inet_is_local_reserved_port(net, rover)) continue; index = sctp_phashfn(sock_net(sk), rover); head = &sctp_port_hashtable[index]; spin_lock(&head->lock); sctp_for_each_hentry(pp, &head->chain) if ((pp->port == rover) && net_eq(sock_net(sk), pp->net)) goto next; break; next: spin_unlock(&head->lock); } while (--remaining > 0); /* Exhausted local port range during search? */ ret = 1; if (remaining <= 0) goto fail; /* OK, here is the one we will use. HEAD (the port * hash table list entry) is non-NULL and we hold it's * mutex. */ snum = rover; } else { /* We are given an specific port number; we verify * that it is not being used. If it is used, we will * exahust the search in the hash list corresponding * to the port number (snum) - we detect that with the * port iterator, pp being NULL. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; spin_lock(&head->lock); sctp_for_each_hentry(pp, &head->chain) { if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) goto pp_found; } } pp = NULL; goto pp_not_found; pp_found: if (!hlist_empty(&pp->owner)) { /* We had a port hash table hit - there is an * available port (pp != NULL) and it is being * used by other socket (pp->owner not empty); that other * socket is going to be sk2. */ int reuse = sk->sk_reuse; struct sock *sk2; pr_debug("%s: found a possible match\n", __func__); if (pp->fastreuse && sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) goto success; /* Run through the list of sockets bound to the port * (pp->port) [via the pointers bind_next and * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, * we get the endpoint they describe and run through * the endpoint's list of IP (v4 or v6) addresses, * comparing each of the addresses with the address of * the socket sk. If we find a match, then that means * that this port/socket (sk) combination are already * in an endpoint. */ sk_for_each_bound(sk2, &pp->owner) { struct sctp_endpoint *ep2; ep2 = sctp_sk(sk2)->ep; if (sk == sk2 || (reuse && sk2->sk_reuse && sk2->sk_state != SCTP_SS_LISTENING)) continue; if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, sctp_sk(sk2), sctp_sk(sk))) { ret = (long)sk2; goto fail_unlock; } } pr_debug("%s: found a match\n", __func__); } pp_not_found: /* If there was a hash table miss, create a new port. */ ret = 1; if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) goto fail_unlock; /* In either case (hit or miss), make sure fastreuse is 1 only * if sk->sk_reuse is too (that is, if the caller requested * SO_REUSEADDR on this socket -sk-). */ if (hlist_empty(&pp->owner)) { if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) pp->fastreuse = 1; else pp->fastreuse = 0; } else if (pp->fastreuse && (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) pp->fastreuse = 0; /* We are set, so fill up all the data in the hash table * entry, tie the socket list information with the rest of the * sockets FIXME: Blurry, NPI (ipg). */ success: if (!sctp_sk(sk)->bind_hash) { inet_sk(sk)->inet_num = snum; sk_add_bind_node(sk, &pp->owner); sctp_sk(sk)->bind_hash = pp; } ret = 0; fail_unlock: spin_unlock(&head->lock); fail: local_bh_enable(); return ret; } /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral * port is requested. */ static int sctp_get_port(struct sock *sk, unsigned short snum) { union sctp_addr addr; struct sctp_af *af = sctp_sk(sk)->pf->af; /* Set up a dummy address struct from the sk. */ af->from_sk(&addr, sk); addr.v4.sin_port = htons(snum); /* Note: sk->sk_num gets filled in if ephemeral port request. */ return !!sctp_get_port_local(sk, &addr); } /* * Move a socket to LISTENING state. */ static int sctp_listen_start(struct sock *sk, int backlog) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; struct crypto_hash *tfm = NULL; char alg[32]; /* Allocate HMAC for generating cookie. */ if (!sp->hmac && sp->sctp_hmac_alg) { sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { net_info_ratelimited("failed to load transform for %s: %ld\n", sp->sctp_hmac_alg, PTR_ERR(tfm)); return -ENOSYS; } sctp_sk(sk)->hmac = tfm; } /* * If a bind() or sctp_bindx() is not called prior to a listen() * call that allows new associations to be accepted, the system * picks an ephemeral port and will choose an address set equivalent * to binding with a wildcard address. * * This is not currently spelled out in the SCTP sockets * extensions draft, but follows the practice as seen in TCP * sockets. * */ sk->sk_state = SCTP_SS_LISTENING; if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) return -EAGAIN; } else { if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { sk->sk_state = SCTP_SS_CLOSED; return -EADDRINUSE; } } sk->sk_max_ack_backlog = backlog; sctp_hash_endpoint(ep); return 0; } /* * 4.1.3 / 5.1.3 listen() * * By default, new associations are not accepted for UDP style sockets. * An application uses listen() to mark a socket as being able to * accept new associations. * * On TCP style sockets, applications use listen() to ready the SCTP * endpoint for accepting inbound associations. * * On both types of endpoints a backlog of '0' disables listening. * * Move a socket to LISTENING state. */ int sctp_inet_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; struct sctp_endpoint *ep = sctp_sk(sk)->ep; int err = -EINVAL; if (unlikely(backlog < 0)) return err; lock_sock(sk); /* Peeled-off sockets are not allowed to listen(). */ if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) goto out; if (sock->state != SS_UNCONNECTED) goto out; /* If backlog is zero, disable listening. */ if (!backlog) { if (sctp_sstate(sk, CLOSED)) goto out; err = 0; sctp_unhash_endpoint(ep); sk->sk_state = SCTP_SS_CLOSED; if (sk->sk_reuse) sctp_sk(sk)->bind_hash->fastreuse = 1; goto out; } /* If we are already listening, just update the backlog */ if (sctp_sstate(sk, LISTENING)) sk->sk_max_ack_backlog = backlog; else { err = sctp_listen_start(sk, backlog); if (err) goto out; } err = 0; out: release_sock(sk); return err; } /* * This function is done by modeling the current datagram_poll() and the * tcp_poll(). Note that, based on these implementations, we don't * lock the socket in this function, even though it seems that, * ideally, locking or some other mechanisms can be used to ensure * the integrity of the counters (sndbuf and wmem_alloc) used * in this place. We assume that we don't need locks either until proven * otherwise. * * Another thing to note is that we include the Async I/O support * here, again, by modeling the current TCP/UDP code. We don't have * a good way to test with it yet. */ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct sctp_sock *sp = sctp_sk(sk); unsigned int mask; poll_wait(file, sk_sleep(sk), wait); /* A TCP-style listening socket becomes readable when the accept queue * is not empty. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) return (!list_empty(&sp->ep->asocs)) ? (POLLIN | POLLRDNORM) : 0; mask = 0; /* Is there any exceptional events? */ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) mask |= POLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP | POLLIN | POLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; /* Is it readable? Reconsider this code with TCP-style support. */ if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= POLLIN | POLLRDNORM; /* The association is either gone or not ready. */ if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) return mask; /* Is it writable? */ if (sctp_writeable(sk)) { mask |= POLLOUT | POLLWRNORM; } else { set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); /* * Since the socket is not locked, the buffer * might be made available after the writeable check and * before the bit is set. This could cause a lost I/O * signal. tcp_poll() has a race breaker for this race * condition. Based on their implementation, we put * in the following code to cover it as well. */ if (sctp_writeable(sk)) mask |= POLLOUT | POLLWRNORM; } return mask; } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ static struct sctp_bind_bucket *sctp_bucket_create( struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) { struct sctp_bind_bucket *pp; pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); if (pp) { SCTP_DBG_OBJCNT_INC(bind_bucket); pp->port = snum; pp->fastreuse = 0; INIT_HLIST_HEAD(&pp->owner); pp->net = net; hlist_add_head(&pp->node, &head->chain); } return pp; } /* Caller must hold hashbucket lock for this tb with local BH disabled */ static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) { if (pp && hlist_empty(&pp->owner)) { __hlist_del(&pp->node); kmem_cache_free(sctp_bucket_cachep, pp); SCTP_DBG_OBJCNT_DEC(bind_bucket); } } /* Release this socket's reference to a local port. */ static inline void __sctp_put_port(struct sock *sk) { struct sctp_bind_hashbucket *head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), inet_sk(sk)->inet_num)]; struct sctp_bind_bucket *pp; spin_lock(&head->lock); pp = sctp_sk(sk)->bind_hash; __sk_del_bind_node(sk); sctp_sk(sk)->bind_hash = NULL; inet_sk(sk)->inet_num = 0; sctp_bucket_destroy(pp); spin_unlock(&head->lock); } void sctp_put_port(struct sock *sk) { local_bh_disable(); __sctp_put_port(sk); local_bh_enable(); } /* * The system picks an ephemeral port and choose an address set equivalent * to binding with a wildcard address. * One of those addresses will be the primary address for the association. * This automatically enables the multihoming capability of SCTP. */ static int sctp_autobind(struct sock *sk) { union sctp_addr autoaddr; struct sctp_af *af; __be16 port; /* Initialize a local sockaddr structure to INADDR_ANY. */ af = sctp_sk(sk)->pf->af; port = htons(inet_sk(sk)->inet_num); af->inaddr_any(&autoaddr, port); return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); } /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. * * From RFC 2292 * 4.2 The cmsghdr Structure * * * When ancillary data is sent or received, any number of ancillary data * objects can be specified by the msg_control and msg_controllen members of * the msghdr structure, because each object is preceded by * a cmsghdr structure defining the object's length (the cmsg_len member). * Historically Berkeley-derived implementations have passed only one object * at a time, but this API allows multiple objects to be * passed in a single call to sendmsg() or recvmsg(). The following example * shows two ancillary data objects in a control buffer. * * |<--------------------------- msg_controllen -------------------------->| * | | * * |<----- ancillary data object ----->|<----- ancillary data object ----->| * * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| * | | | * * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | * * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | * | | | | | * * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| * * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| * * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ * ^ * | * * msg_control * points here */ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) { struct cmsghdr *cmsg; struct msghdr *my_msg = (struct msghdr *)msg; for_each_cmsghdr(cmsg, my_msg) { if (!CMSG_OK(my_msg, cmsg)) return -EINVAL; /* Should we parse this header or ignore? */ if (cmsg->cmsg_level != IPPROTO_SCTP) continue; /* Strictly check lengths following example in SCM code. */ switch (cmsg->cmsg_type) { case SCTP_INIT: /* SCTP Socket API Extension * 5.3.1 SCTP Initiation Structure (SCTP_INIT) * * This cmsghdr structure provides information for * initializing new SCTP associations with sendmsg(). * The SCTP_INITMSG socket option uses this same data * structure. This structure is not used for * recvmsg(). * * cmsg_level cmsg_type cmsg_data[] * ------------ ------------ ---------------------- * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg */ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg))) return -EINVAL; cmsgs->init = CMSG_DATA(cmsg); break; case SCTP_SNDRCV: /* SCTP Socket API Extension * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV) * * This cmsghdr structure specifies SCTP options for * sendmsg() and describes SCTP header information * about a received message through recvmsg(). * * cmsg_level cmsg_type cmsg_data[] * ------------ ------------ ---------------------- * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo */ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) return -EINVAL; cmsgs->srinfo = CMSG_DATA(cmsg); if (cmsgs->srinfo->sinfo_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; break; case SCTP_SNDINFO: /* SCTP Socket API Extension * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO) * * This cmsghdr structure specifies SCTP options for * sendmsg(). This structure and SCTP_RCVINFO replaces * SCTP_SNDRCV which has been deprecated. * * cmsg_level cmsg_type cmsg_data[] * ------------ ------------ --------------------- * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo */ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo))) return -EINVAL; cmsgs->sinfo = CMSG_DATA(cmsg); if (cmsgs->sinfo->snd_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; break; default: return -EINVAL; } } return 0; } /* * Wait for a packet.. * Note: This function is the same function as in core/datagram.c * with a few modifications to make lksctp work. */ static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p) { int error; DEFINE_WAIT(wait); prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); /* Socket errors? */ error = sock_error(sk); if (error) goto out; if (!skb_queue_empty(&sk->sk_receive_queue)) goto ready; /* Socket shut down? */ if (sk->sk_shutdown & RCV_SHUTDOWN) goto out; /* Sequenced packets can come disconnected. If so we report the * problem. */ error = -ENOTCONN; /* Is there a good reason to think that we may receive some data? */ if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) goto out; /* Handle signals. */ if (signal_pending(current)) goto interrupted; /* Let another process have a go. Since we are going to sleep * anyway. Note: This may cause odd behaviors if the message * does not fit in the user's buffer, but this seems to be the * only way to honor MSG_DONTWAIT realistically. */ release_sock(sk); *timeo_p = schedule_timeout(*timeo_p); lock_sock(sk); ready: finish_wait(sk_sleep(sk), &wait); return 0; interrupted: error = sock_intr_errno(*timeo_p); out: finish_wait(sk_sleep(sk), &wait); *err = error; return error; } /* Receive a datagram. * Note: This is pretty much the same routine as in core/datagram.c * with a few changes to make lksctp work. */ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, int noblock, int *err) { int error; struct sk_buff *skb; long timeo; timeo = sock_rcvtimeo(sk, noblock); pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, MAX_SCHEDULE_TIMEOUT); do { /* Again only user level code calls this function, * so nothing interrupt level * will suddenly eat the receive_queue. * * Look at current nfs client by the way... * However, this function was correct in any case. 8) */ if (flags & MSG_PEEK) { spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) atomic_inc(&skb->users); spin_unlock_bh(&sk->sk_receive_queue.lock); } else { skb = skb_dequeue(&sk->sk_receive_queue); } if (skb) return skb; /* Caller is allowed not to check sk->sk_err before calling. */ error = sock_error(sk); if (error) goto no_packet; if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk_can_busy_loop(sk) && sk_busy_loop(sk, noblock)) continue; /* User doesn't want to wait. */ error = -EAGAIN; if (!timeo) goto no_packet; } while (sctp_wait_for_packet(sk, err, &timeo) == 0); return NULL; no_packet: *err = error; return NULL; } /* If sndbuf has changed, wake up per association sndbuf waiters. */ static void __sctp_write_space(struct sctp_association *asoc) { struct sock *sk = asoc->base.sk; struct socket *sock = sk->sk_socket; if ((sctp_wspace(asoc) > 0) && sock) { if (waitqueue_active(&asoc->wait)) wake_up_interruptible(&asoc->wait); if (sctp_writeable(sk)) { wait_queue_head_t *wq = sk_sleep(sk); if (wq && waitqueue_active(wq)) wake_up_interruptible(wq); /* Note that we try to include the Async I/O support * here by modeling from the current TCP/UDP code. * We have not tested with it yet. */ if (!(sk->sk_shutdown & SEND_SHUTDOWN)) sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); } } } static void sctp_wake_up_waiters(struct sock *sk, struct sctp_association *asoc) { struct sctp_association *tmp = asoc; /* We do accounting for the sndbuf space per association, * so we only need to wake our own association. */ if (asoc->ep->sndbuf_policy) return __sctp_write_space(asoc); /* If association goes down and is just flushing its * outq, then just normally notify others. */ if (asoc->base.dead) return sctp_write_space(sk); /* Accounting for the sndbuf space is per socket, so we * need to wake up others, try to be fair and in case of * other associations, let them have a go first instead * of just doing a sctp_write_space() call. * * Note that we reach sctp_wake_up_waiters() only when * associations free up queued chunks, thus we are under * lock and the list of associations on a socket is * guaranteed not to change. */ for (tmp = list_next_entry(tmp, asocs); 1; tmp = list_next_entry(tmp, asocs)) { /* Manually skip the head element. */ if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) continue; /* Wake up association. */ __sctp_write_space(tmp); /* We've reached the end. */ if (tmp == asoc) break; } } /* Do accounting for the sndbuf space. * Decrement the used sndbuf space of the corresponding association by the * data size which was just transmitted(freed). */ static void sctp_wfree(struct sk_buff *skb) { struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; struct sctp_association *asoc = chunk->asoc; struct sock *sk = asoc->base.sk; asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + sizeof(struct sk_buff) + sizeof(struct sctp_chunk); atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); /* * This undoes what is done via sctp_set_owner_w and sk_mem_charge */ sk->sk_wmem_queued -= skb->truesize; sk_mem_uncharge(sk, skb->truesize); sock_wfree(skb); sctp_wake_up_waiters(sk, asoc); sctp_association_put(asoc); } /* Do accounting for the receive space on the socket. * Accounting for the association is done in ulpevent.c * We set this as a destructor for the cloned data skbs so that * accounting is done at the correct time. */ void sctp_sock_rfree(struct sk_buff *skb) { struct sock *sk = skb->sk; struct sctp_ulpevent *event = sctp_skb2event(skb); atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); /* * Mimic the behavior of sock_rfree */ sk_mem_uncharge(sk, event->rmem_len); } /* Helper function to wait for space in the sndbuf. */ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, size_t msg_len) { struct sock *sk = asoc->base.sk; int err = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, *timeo_p, msg_len); /* Increment the association's refcnt. */ sctp_association_hold(asoc); /* Wait on the association specific sndbuf space. */ for (;;) { prepare_to_wait_exclusive(&asoc->wait, &wait, TASK_INTERRUPTIBLE); if (!*timeo_p) goto do_nonblock; if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || asoc->base.dead) goto do_error; if (signal_pending(current)) goto do_interrupted; if (msg_len <= sctp_wspace(asoc)) break; /* Let another process have a go. Since we are going * to sleep anyway. */ release_sock(sk); current_timeo = schedule_timeout(current_timeo); BUG_ON(sk != asoc->base.sk); lock_sock(sk); *timeo_p = current_timeo; } out: finish_wait(&asoc->wait, &wait); /* Release the association's refcnt. */ sctp_association_put(asoc); return err; do_error: err = -EPIPE; goto out; do_interrupted: err = sock_intr_errno(*timeo_p); goto out; do_nonblock: err = -EAGAIN; goto out; } void sctp_data_ready(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLRDNORM | POLLRDBAND); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); } /* If socket sndbuf has changed, wake up all per association waiters. */ void sctp_write_space(struct sock *sk) { struct sctp_association *asoc; /* Wake up the tasks in each wait queue. */ list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { __sctp_write_space(asoc); } } /* Is there any sndbuf space available on the socket? * * Note that sk_wmem_alloc is the sum of the send buffers on all of the * associations on the same socket. For a UDP-style socket with * multiple associations, it is possible for it to be "unwriteable" * prematurely. I assume that this is acceptable because * a premature "unwriteable" is better than an accidental "writeable" which * would cause an unwanted block under certain circumstances. For the 1-1 * UDP-style sockets or TCP-style sockets, this code should work. * - Daisy */ static int sctp_writeable(struct sock *sk) { int amt = 0; amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amt < 0) amt = 0; return amt; } /* Wait for an association to go into ESTABLISHED state. If timeout is 0, * returns immediately with EINPROGRESS. */ static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) { struct sock *sk = asoc->base.sk; int err = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); /* Increment the association's refcnt. */ sctp_association_hold(asoc); for (;;) { prepare_to_wait_exclusive(&asoc->wait, &wait, TASK_INTERRUPTIBLE); if (!*timeo_p) goto do_nonblock; if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || asoc->base.dead) goto do_error; if (signal_pending(current)) goto do_interrupted; if (sctp_state(asoc, ESTABLISHED)) break; /* Let another process have a go. Since we are going * to sleep anyway. */ release_sock(sk); current_timeo = schedule_timeout(current_timeo); lock_sock(sk); *timeo_p = current_timeo; } out: finish_wait(&asoc->wait, &wait); /* Release the association's refcnt. */ sctp_association_put(asoc); return err; do_error: if (asoc->init_err_counter + 1 > asoc->max_init_attempts) err = -ETIMEDOUT; else err = -ECONNREFUSED; goto out; do_interrupted: err = sock_intr_errno(*timeo_p); goto out; do_nonblock: err = -EINPROGRESS; goto out; } static int sctp_wait_for_accept(struct sock *sk, long timeo) { struct sctp_endpoint *ep; int err = 0; DEFINE_WAIT(wait); ep = sctp_sk(sk)->ep; for (;;) { prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (list_empty(&ep->asocs)) { release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); } err = -EINVAL; if (!sctp_sstate(sk, LISTENING)) break; err = 0; if (!list_empty(&ep->asocs)) break; err = sock_intr_errno(timeo); if (signal_pending(current)) break; err = -EAGAIN; if (!timeo) break; } finish_wait(sk_sleep(sk), &wait); return err; } static void sctp_wait_for_close(struct sock *sk, long timeout) { DEFINE_WAIT(wait); do { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (list_empty(&sctp_sk(sk)->ep->asocs)) break; release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); } while (!signal_pending(current) && timeout); finish_wait(sk_sleep(sk), &wait); } static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) { struct sk_buff *frag; if (!skb->data_len) goto done; /* Don't forget the fragments. */ skb_walk_frags(skb, frag) sctp_skb_set_owner_r_frag(frag, sk); done: sctp_skb_set_owner_r(skb, sk); } void sctp_copy_sock(struct sock *newsk, struct sock *sk, struct sctp_association *asoc) { struct inet_sock *inet = inet_sk(sk); struct inet_sock *newinet; newsk->sk_type = sk->sk_type; newsk->sk_bound_dev_if = sk->sk_bound_dev_if; newsk->sk_flags = sk->sk_flags; newsk->sk_no_check_tx = sk->sk_no_check_tx; newsk->sk_no_check_rx = sk->sk_no_check_rx; newsk->sk_reuse = sk->sk_reuse; newsk->sk_shutdown = sk->sk_shutdown; newsk->sk_destruct = sctp_destruct_sock; newsk->sk_family = sk->sk_family; newsk->sk_protocol = IPPROTO_SCTP; newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; newsk->sk_sndbuf = sk->sk_sndbuf; newsk->sk_rcvbuf = sk->sk_rcvbuf; newsk->sk_lingertime = sk->sk_lingertime; newsk->sk_rcvtimeo = sk->sk_rcvtimeo; newsk->sk_sndtimeo = sk->sk_sndtimeo; newinet = inet_sk(newsk); /* Initialize sk's sport, dport, rcv_saddr and daddr for * getsockname() and getpeername() */ newinet->inet_sport = inet->inet_sport; newinet->inet_saddr = inet->inet_saddr; newinet->inet_rcv_saddr = inet->inet_rcv_saddr; newinet->inet_dport = htons(asoc->peer.port); newinet->pmtudisc = inet->pmtudisc; newinet->inet_id = asoc->next_tsn ^ jiffies; newinet->uc_ttl = inet->uc_ttl; newinet->mc_loop = 1; newinet->mc_ttl = 1; newinet->mc_index = 0; newinet->mc_list = NULL; } static inline void sctp_copy_descendant(struct sock *sk_to, const struct sock *sk_from) { int ancestor_size = sizeof(struct inet_sock) + sizeof(struct sctp_sock) - offsetof(struct sctp_sock, auto_asconf_list); if (sk_from->sk_family == PF_INET6) ancestor_size += sizeof(struct ipv6_pinfo); __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); } /* Populate the fields of the newsk from the oldsk and migrate the assoc * and its messages to the newsk. */ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, struct sctp_association *assoc, sctp_socket_type_t type) { struct sctp_sock *oldsp = sctp_sk(oldsk); struct sctp_sock *newsp = sctp_sk(newsk); struct sctp_bind_bucket *pp; /* hash list port iterator */ struct sctp_endpoint *newep = newsp->ep; struct sk_buff *skb, *tmp; struct sctp_ulpevent *event; struct sctp_bind_hashbucket *head; /* Migrate socket buffer sizes and all the socket level options to the * new socket. */ newsk->sk_sndbuf = oldsk->sk_sndbuf; newsk->sk_rcvbuf = oldsk->sk_rcvbuf; /* Brute force copy old sctp opt. */ sctp_copy_descendant(newsk, oldsk); /* Restore the ep value that was overwritten with the above structure * copy. */ newsp->ep = newep; newsp->hmac = NULL; /* Hook this new socket in to the bind_hash list. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), inet_sk(oldsk)->inet_num)]; local_bh_disable(); spin_lock(&head->lock); pp = sctp_sk(oldsk)->bind_hash; sk_add_bind_node(newsk, &pp->owner); sctp_sk(newsk)->bind_hash = pp; inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; spin_unlock(&head->lock); local_bh_enable(); /* Copy the bind_addr list from the original endpoint to the new * endpoint so that we can handle restarts properly */ sctp_bind_addr_dup(&newsp->ep->base.bind_addr, &oldsp->ep->base.bind_addr, GFP_KERNEL); /* Move any messages in the old socket's receive queue that are for the * peeled off association to the new socket's receive queue. */ sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { __skb_unlink(skb, &oldsk->sk_receive_queue); __skb_queue_tail(&newsk->sk_receive_queue, skb); sctp_skb_set_owner_r_frag(skb, newsk); } } /* Clean up any messages pending delivery due to partial * delivery. Three cases: * 1) No partial deliver; no work. * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. */ skb_queue_head_init(&newsp->pd_lobby); atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { struct sk_buff_head *queue; /* Decide which queue to move pd_lobby skbs to. */ if (assoc->ulpq.pd_mode) { queue = &newsp->pd_lobby; } else queue = &newsk->sk_receive_queue; /* Walk through the pd_lobby, looking for skbs that * need moved to the new socket. */ sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { __skb_unlink(skb, &oldsp->pd_lobby); __skb_queue_tail(queue, skb); sctp_skb_set_owner_r_frag(skb, newsk); } } /* Clear up any skbs waiting for the partial * delivery to finish. */ if (assoc->ulpq.pd_mode) sctp_clear_pd(oldsk, NULL); } sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) sctp_skb_set_owner_r_frag(skb, newsk); sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) sctp_skb_set_owner_r_frag(skb, newsk); /* Set the type of socket to indicate that it is peeled off from the * original UDP-style socket or created with the accept() call on a * TCP-style socket.. */ newsp->type = type; /* Mark the new socket "in-use" by the user so that any packets * that may arrive on the association after we've moved it are * queued to the backlog. This prevents a potential race between * backlog processing on the old socket and new-packet processing * on the new socket. * * The caller has just allocated newsk so we can guarantee that other * paths won't try to lock it and then oldsk. */ lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); sctp_assoc_migrate(assoc, newsk); /* If the association on the newsk is already closed before accept() * is called, set RCV_SHUTDOWN flag. */ if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) newsk->sk_shutdown |= RCV_SHUTDOWN; newsk->sk_state = SCTP_SS_ESTABLISHED; release_sock(newsk); } /* This proto struct describes the ULP interface for SCTP. */ struct proto sctp_prot = { .name = "SCTP", .owner = THIS_MODULE, .close = sctp_close, .connect = sctp_connect, .disconnect = sctp_disconnect, .accept = sctp_accept, .ioctl = sctp_ioctl, .init = sctp_init_sock, .destroy = sctp_destroy_sock, .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, .sendmsg = sctp_sendmsg, .recvmsg = sctp_recvmsg, .bind = sctp_bind, .backlog_rcv = sctp_backlog_rcv, .hash = sctp_hash, .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp_sock), .sysctl_mem = sysctl_sctp_mem, .sysctl_rmem = sysctl_sctp_rmem, .sysctl_wmem = sysctl_sctp_wmem, .memory_pressure = &sctp_memory_pressure, .enter_memory_pressure = sctp_enter_memory_pressure, .memory_allocated = &sctp_memory_allocated, .sockets_allocated = &sctp_sockets_allocated, }; #if IS_ENABLED(CONFIG_IPV6) struct proto sctpv6_prot = { .name = "SCTPv6", .owner = THIS_MODULE, .close = sctp_close, .connect = sctp_connect, .disconnect = sctp_disconnect, .accept = sctp_accept, .ioctl = sctp_ioctl, .init = sctp_init_sock, .destroy = sctp_destroy_sock, .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, .sendmsg = sctp_sendmsg, .recvmsg = sctp_recvmsg, .bind = sctp_bind, .backlog_rcv = sctp_backlog_rcv, .hash = sctp_hash, .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp6_sock), .sysctl_mem = sysctl_sctp_mem, .sysctl_rmem = sysctl_sctp_rmem, .sysctl_wmem = sysctl_sctp_wmem, .memory_pressure = &sctp_memory_pressure, .enter_memory_pressure = sctp_enter_memory_pressure, .memory_allocated = &sctp_memory_allocated, .sockets_allocated = &sctp_sockets_allocated, }; #endif /* IS_ENABLED(CONFIG_IPV6) */
./CrossVul/dataset_final_sorted/CWE-362/c/good_1578_2
crossvul-cpp_data_bad_3459_3
/* * Copyright (C)2003,2004 USAGI/WIDE Project * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Authors Mitsuru KANDA <mk@linux-ipv6.org> * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> * * Based on net/ipv4/xfrm4_tunnel.c * */ #include <linux/module.h> #include <linux/xfrm.h> #include <linux/rculist.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/ipv6.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/mutex.h> #include <net/netns/generic.h> #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256 #define XFRM6_TUNNEL_SPI_MIN 1 #define XFRM6_TUNNEL_SPI_MAX 0xffffffff struct xfrm6_tunnel_net { struct hlist_head spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE]; struct hlist_head spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE]; u32 spi; }; static int xfrm6_tunnel_net_id __read_mostly; static inline struct xfrm6_tunnel_net *xfrm6_tunnel_pernet(struct net *net) { return net_generic(net, xfrm6_tunnel_net_id); } /* * xfrm_tunnel_spi things are for allocating unique id ("spi") * per xfrm_address_t. */ struct xfrm6_tunnel_spi { struct hlist_node list_byaddr; struct hlist_node list_byspi; xfrm_address_t addr; u32 spi; atomic_t refcnt; struct rcu_head rcu_head; }; static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock); static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) { unsigned h; h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]); h ^= h >> 16; h ^= h >> 8; h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1; return h; } static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi) { return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE; } static int __init xfrm6_tunnel_spi_init(void) { xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi", sizeof(struct xfrm6_tunnel_spi), 0, SLAB_HWCACHE_ALIGN, NULL); if (!xfrm6_tunnel_spi_kmem) return -ENOMEM; return 0; } static void xfrm6_tunnel_spi_fini(void) { kmem_cache_destroy(xfrm6_tunnel_spi_kmem); } static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr) { struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); struct xfrm6_tunnel_spi *x6spi; struct hlist_node *pos; hlist_for_each_entry_rcu(x6spi, pos, &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], list_byaddr) { if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) return x6spi; } return NULL; } __be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr) { struct xfrm6_tunnel_spi *x6spi; u32 spi; rcu_read_lock_bh(); x6spi = __xfrm6_tunnel_spi_lookup(net, saddr); spi = x6spi ? x6spi->spi : 0; rcu_read_unlock_bh(); return htonl(spi); } EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup); static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi) { struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); struct xfrm6_tunnel_spi *x6spi; int index = xfrm6_tunnel_spi_hash_byspi(spi); struct hlist_node *pos; hlist_for_each_entry(x6spi, pos, &xfrm6_tn->spi_byspi[index], list_byspi) { if (x6spi->spi == spi) return -1; } return index; } static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr) { struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); u32 spi; struct xfrm6_tunnel_spi *x6spi; int index; if (xfrm6_tn->spi < XFRM6_TUNNEL_SPI_MIN || xfrm6_tn->spi >= XFRM6_TUNNEL_SPI_MAX) xfrm6_tn->spi = XFRM6_TUNNEL_SPI_MIN; else xfrm6_tn->spi++; for (spi = xfrm6_tn->spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) { index = __xfrm6_tunnel_spi_check(net, spi); if (index >= 0) goto alloc_spi; } for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) { index = __xfrm6_tunnel_spi_check(net, spi); if (index >= 0) goto alloc_spi; } spi = 0; goto out; alloc_spi: xfrm6_tn->spi = spi; x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC); if (!x6spi) goto out; INIT_RCU_HEAD(&x6spi->rcu_head); memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); x6spi->spi = spi; atomic_set(&x6spi->refcnt, 1); hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tn->spi_byspi[index]); index = xfrm6_tunnel_spi_hash_byaddr(saddr); hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tn->spi_byaddr[index]); out: return spi; } __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr) { struct xfrm6_tunnel_spi *x6spi; u32 spi; spin_lock_bh(&xfrm6_tunnel_spi_lock); x6spi = __xfrm6_tunnel_spi_lookup(net, saddr); if (x6spi) { atomic_inc(&x6spi->refcnt); spi = x6spi->spi; } else spi = __xfrm6_tunnel_alloc_spi(net, saddr); spin_unlock_bh(&xfrm6_tunnel_spi_lock); return htonl(spi); } EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi); static void x6spi_destroy_rcu(struct rcu_head *head) { kmem_cache_free(xfrm6_tunnel_spi_kmem, container_of(head, struct xfrm6_tunnel_spi, rcu_head)); } void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr) { struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); struct xfrm6_tunnel_spi *x6spi; struct hlist_node *pos, *n; spin_lock_bh(&xfrm6_tunnel_spi_lock); hlist_for_each_entry_safe(x6spi, pos, n, &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], list_byaddr) { if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { if (atomic_dec_and_test(&x6spi->refcnt)) { hlist_del_rcu(&x6spi->list_byaddr); hlist_del_rcu(&x6spi->list_byspi); call_rcu(&x6spi->rcu_head, x6spi_destroy_rcu); break; } } } spin_unlock_bh(&xfrm6_tunnel_spi_lock); } EXPORT_SYMBOL(xfrm6_tunnel_free_spi); static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) { skb_push(skb, -skb_network_offset(skb)); return 0; } static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) { return skb_network_header(skb)[IP6CB(skb)->nhoff]; } static int xfrm6_tunnel_rcv(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); struct ipv6hdr *iph = ipv6_hdr(skb); __be32 spi; spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&iph->saddr); return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0; } static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { /* xfrm6_tunnel native err handling */ switch (type) { case ICMPV6_DEST_UNREACH: switch (code) { case ICMPV6_NOROUTE: case ICMPV6_ADM_PROHIBITED: case ICMPV6_NOT_NEIGHBOUR: case ICMPV6_ADDR_UNREACH: case ICMPV6_PORT_UNREACH: default: break; } break; case ICMPV6_PKT_TOOBIG: break; case ICMPV6_TIME_EXCEED: switch (code) { case ICMPV6_EXC_HOPLIMIT: break; case ICMPV6_EXC_FRAGTIME: default: break; } break; case ICMPV6_PARAMPROB: switch (code) { case ICMPV6_HDR_FIELD: break; case ICMPV6_UNK_NEXTHDR: break; case ICMPV6_UNK_OPTION: break; } break; default: break; } return 0; } static int xfrm6_tunnel_init_state(struct xfrm_state *x) { if (x->props.mode != XFRM_MODE_TUNNEL) return -EINVAL; if (x->encap) return -EINVAL; x->props.header_len = sizeof(struct ipv6hdr); return 0; } static void xfrm6_tunnel_destroy(struct xfrm_state *x) { struct net *net = xs_net(x); xfrm6_tunnel_free_spi(net, (xfrm_address_t *)&x->props.saddr); } static const struct xfrm_type xfrm6_tunnel_type = { .description = "IP6IP6", .owner = THIS_MODULE, .proto = IPPROTO_IPV6, .init_state = xfrm6_tunnel_init_state, .destructor = xfrm6_tunnel_destroy, .input = xfrm6_tunnel_input, .output = xfrm6_tunnel_output, }; static struct xfrm6_tunnel xfrm6_tunnel_handler = { .handler = xfrm6_tunnel_rcv, .err_handler = xfrm6_tunnel_err, .priority = 2, }; static struct xfrm6_tunnel xfrm46_tunnel_handler = { .handler = xfrm6_tunnel_rcv, .err_handler = xfrm6_tunnel_err, .priority = 2, }; static int __net_init xfrm6_tunnel_net_init(struct net *net) { struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); unsigned int i; for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) INIT_HLIST_HEAD(&xfrm6_tn->spi_byaddr[i]); for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) INIT_HLIST_HEAD(&xfrm6_tn->spi_byspi[i]); xfrm6_tn->spi = 0; return 0; } static void __net_exit xfrm6_tunnel_net_exit(struct net *net) { } static struct pernet_operations xfrm6_tunnel_net_ops = { .init = xfrm6_tunnel_net_init, .exit = xfrm6_tunnel_net_exit, .id = &xfrm6_tunnel_net_id, .size = sizeof(struct xfrm6_tunnel_net), }; static int __init xfrm6_tunnel_init(void) { int rv; rv = xfrm_register_type(&xfrm6_tunnel_type, AF_INET6); if (rv < 0) goto err; rv = xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6); if (rv < 0) goto unreg; rv = xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET); if (rv < 0) goto dereg6; rv = xfrm6_tunnel_spi_init(); if (rv < 0) goto dereg46; rv = register_pernet_subsys(&xfrm6_tunnel_net_ops); if (rv < 0) goto deregspi; return 0; deregspi: xfrm6_tunnel_spi_fini(); dereg46: xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET); dereg6: xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6); unreg: xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); err: return rv; } static void __exit xfrm6_tunnel_fini(void) { unregister_pernet_subsys(&xfrm6_tunnel_net_ops); xfrm6_tunnel_spi_fini(); xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET); xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6); xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); } module_init(xfrm6_tunnel_init); module_exit(xfrm6_tunnel_fini); MODULE_LICENSE("GPL"); MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_IPV6);
./CrossVul/dataset_final_sorted/CWE-362/c/bad_3459_3
crossvul-cpp_data_bad_3765_0
/* * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com * Written by Alex Tomas <alex@clusterfs.com> * * Architecture independence: * Copyright (c) 2005, Bull S.A. * Written by Pierre Peiffer <pierre.peiffer@bull.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public Licens * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- */ /* * Extents support for EXT4 * * TODO: * - ext4*_error() should be used in some situations * - analyze all BUG()/BUG_ON(), use -EIO where appropriate * - smart tree reduction */ #include <linux/fs.h> #include <linux/time.h> #include <linux/jbd2.h> #include <linux/highuid.h> #include <linux/pagemap.h> #include <linux/quotaops.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/falloc.h> #include <asm/uaccess.h> #include <linux/fiemap.h> #include "ext4_jbd2.h" #include <trace/events/ext4.h> /* * used by extent splitting. */ #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ due to ENOSPC */ #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ static __le32 ext4_extent_block_csum(struct inode *inode, struct ext4_extent_header *eh) { struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); __u32 csum; csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, EXT4_EXTENT_TAIL_OFFSET(eh)); return cpu_to_le32(csum); } static int ext4_extent_block_csum_verify(struct inode *inode, struct ext4_extent_header *eh) { struct ext4_extent_tail *et; if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) return 1; et = find_ext4_extent_tail(eh); if (et->et_checksum != ext4_extent_block_csum(inode, eh)) return 0; return 1; } static void ext4_extent_block_csum_set(struct inode *inode, struct ext4_extent_header *eh) { struct ext4_extent_tail *et; if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) return; et = find_ext4_extent_tail(eh); et->et_checksum = ext4_extent_block_csum(inode, eh); } static int ext4_split_extent(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, struct ext4_map_blocks *map, int split_flag, int flags); static int ext4_split_extent_at(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t split, int split_flag, int flags); static int ext4_ext_truncate_extend_restart(handle_t *handle, struct inode *inode, int needed) { int err; if (!ext4_handle_valid(handle)) return 0; if (handle->h_buffer_credits > needed) return 0; err = ext4_journal_extend(handle, needed); if (err <= 0) return err; err = ext4_truncate_restart_trans(handle, inode, needed); if (err == 0) err = -EAGAIN; return err; } /* * could return: * - EROFS * - ENOMEM */ static int ext4_ext_get_access(handle_t *handle, struct inode *inode, struct ext4_ext_path *path) { if (path->p_bh) { /* path points to block */ return ext4_journal_get_write_access(handle, path->p_bh); } /* path points to leaf/index in inode body */ /* we use in-core data, no need to protect them */ return 0; } /* * could return: * - EROFS * - ENOMEM * - EIO */ #define ext4_ext_dirty(handle, inode, path) \ __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path)) static int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle, struct inode *inode, struct ext4_ext_path *path) { int err; if (path->p_bh) { ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); /* path points to block */ err = __ext4_handle_dirty_metadata(where, line, handle, inode, path->p_bh); } else { /* path points to leaf/index in inode body */ err = ext4_mark_inode_dirty(handle, inode); } return err; } static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t block) { if (path) { int depth = path->p_depth; struct ext4_extent *ex; /* * Try to predict block placement assuming that we are * filling in a file which will eventually be * non-sparse --- i.e., in the case of libbfd writing * an ELF object sections out-of-order but in a way * the eventually results in a contiguous object or * executable file, or some database extending a table * space file. However, this is actually somewhat * non-ideal if we are writing a sparse file such as * qemu or KVM writing a raw image file that is going * to stay fairly sparse, since it will end up * fragmenting the file system's free space. Maybe we * should have some hueristics or some way to allow * userspace to pass a hint to file system, * especially if the latter case turns out to be * common. */ ex = path[depth].p_ext; if (ex) { ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); if (block > ext_block) return ext_pblk + (block - ext_block); else return ext_pblk - (ext_block - block); } /* it looks like index is empty; * try to find starting block from index itself */ if (path[depth].p_bh) return path[depth].p_bh->b_blocknr; } /* OK. use inode's group */ return ext4_inode_to_goal_block(inode); } /* * Allocation for a meta data block */ static ext4_fsblk_t ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, struct ext4_extent *ex, int *err, unsigned int flags) { ext4_fsblk_t goal, newblock; goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); newblock = ext4_new_meta_blocks(handle, inode, goal, flags, NULL, err); return newblock; } static inline int ext4_ext_space_block(struct inode *inode, int check) { int size; size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) / sizeof(struct ext4_extent); #ifdef AGGRESSIVE_TEST if (!check && size > 6) size = 6; #endif return size; } static inline int ext4_ext_space_block_idx(struct inode *inode, int check) { int size; size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) / sizeof(struct ext4_extent_idx); #ifdef AGGRESSIVE_TEST if (!check && size > 5) size = 5; #endif return size; } static inline int ext4_ext_space_root(struct inode *inode, int check) { int size; size = sizeof(EXT4_I(inode)->i_data); size -= sizeof(struct ext4_extent_header); size /= sizeof(struct ext4_extent); #ifdef AGGRESSIVE_TEST if (!check && size > 3) size = 3; #endif return size; } static inline int ext4_ext_space_root_idx(struct inode *inode, int check) { int size; size = sizeof(EXT4_I(inode)->i_data); size -= sizeof(struct ext4_extent_header); size /= sizeof(struct ext4_extent_idx); #ifdef AGGRESSIVE_TEST if (!check && size > 4) size = 4; #endif return size; } /* * Calculate the number of metadata blocks needed * to allocate @blocks * Worse case is one block per extent */ int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) { struct ext4_inode_info *ei = EXT4_I(inode); int idxs; idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) / sizeof(struct ext4_extent_idx)); /* * If the new delayed allocation block is contiguous with the * previous da block, it can share index blocks with the * previous block, so we only need to allocate a new index * block every idxs leaf blocks. At ldxs**2 blocks, we need * an additional index block, and at ldxs**3 blocks, yet * another index blocks. */ if (ei->i_da_metadata_calc_len && ei->i_da_metadata_calc_last_lblock+1 == lblock) { int num = 0; if ((ei->i_da_metadata_calc_len % idxs) == 0) num++; if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0) num++; if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) { num++; ei->i_da_metadata_calc_len = 0; } else ei->i_da_metadata_calc_len++; ei->i_da_metadata_calc_last_lblock++; return num; } /* * In the worst case we need a new set of index blocks at * every level of the inode's extent tree. */ ei->i_da_metadata_calc_len = 1; ei->i_da_metadata_calc_last_lblock = lblock; return ext_depth(inode) + 1; } static int ext4_ext_max_entries(struct inode *inode, int depth) { int max; if (depth == ext_depth(inode)) { if (depth == 0) max = ext4_ext_space_root(inode, 1); else max = ext4_ext_space_root_idx(inode, 1); } else { if (depth == 0) max = ext4_ext_space_block(inode, 1); else max = ext4_ext_space_block_idx(inode, 1); } return max; } static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) { ext4_fsblk_t block = ext4_ext_pblock(ext); int len = ext4_ext_get_actual_len(ext); if (len == 0) return 0; return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); } static int ext4_valid_extent_idx(struct inode *inode, struct ext4_extent_idx *ext_idx) { ext4_fsblk_t block = ext4_idx_pblock(ext_idx); return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); } static int ext4_valid_extent_entries(struct inode *inode, struct ext4_extent_header *eh, int depth) { unsigned short entries; if (eh->eh_entries == 0) return 1; entries = le16_to_cpu(eh->eh_entries); if (depth == 0) { /* leaf entries */ struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); while (entries) { if (!ext4_valid_extent(inode, ext)) return 0; ext++; entries--; } } else { struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); while (entries) { if (!ext4_valid_extent_idx(inode, ext_idx)) return 0; ext_idx++; entries--; } } return 1; } static int __ext4_ext_check(const char *function, unsigned int line, struct inode *inode, struct ext4_extent_header *eh, int depth) { const char *error_msg; int max = 0; if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { error_msg = "invalid magic"; goto corrupted; } if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { error_msg = "unexpected eh_depth"; goto corrupted; } if (unlikely(eh->eh_max == 0)) { error_msg = "invalid eh_max"; goto corrupted; } max = ext4_ext_max_entries(inode, depth); if (unlikely(le16_to_cpu(eh->eh_max) > max)) { error_msg = "too large eh_max"; goto corrupted; } if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { error_msg = "invalid eh_entries"; goto corrupted; } if (!ext4_valid_extent_entries(inode, eh, depth)) { error_msg = "invalid extent entries"; goto corrupted; } /* Verify checksum on non-root extent tree nodes */ if (ext_depth(inode) != depth && !ext4_extent_block_csum_verify(inode, eh)) { error_msg = "extent tree corrupted"; goto corrupted; } return 0; corrupted: ext4_error_inode(inode, function, line, 0, "bad header/extent: %s - magic %x, " "entries %u, max %u(%u), depth %u(%u)", error_msg, le16_to_cpu(eh->eh_magic), le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), max, le16_to_cpu(eh->eh_depth), depth); return -EIO; } #define ext4_ext_check(inode, eh, depth) \ __ext4_ext_check(__func__, __LINE__, inode, eh, depth) int ext4_ext_check_inode(struct inode *inode) { return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode)); } static int __ext4_ext_check_block(const char *function, unsigned int line, struct inode *inode, struct ext4_extent_header *eh, int depth, struct buffer_head *bh) { int ret; if (buffer_verified(bh)) return 0; ret = ext4_ext_check(inode, eh, depth); if (ret) return ret; set_buffer_verified(bh); return ret; } #define ext4_ext_check_block(inode, eh, depth, bh) \ __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh) #ifdef EXT_DEBUG static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) { int k, l = path->p_depth; ext_debug("path:"); for (k = 0; k <= l; k++, path++) { if (path->p_idx) { ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), ext4_idx_pblock(path->p_idx)); } else if (path->p_ext) { ext_debug(" %d:[%d]%d:%llu ", le32_to_cpu(path->p_ext->ee_block), ext4_ext_is_uninitialized(path->p_ext), ext4_ext_get_actual_len(path->p_ext), ext4_ext_pblock(path->p_ext)); } else ext_debug(" []"); } ext_debug("\n"); } static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) { int depth = ext_depth(inode); struct ext4_extent_header *eh; struct ext4_extent *ex; int i; if (!path) return; eh = path[depth].p_hdr; ex = EXT_FIRST_EXTENT(eh); ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), ext4_ext_is_uninitialized(ex), ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); } ext_debug("\n"); } static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, ext4_fsblk_t newblock, int level) { int depth = ext_depth(inode); struct ext4_extent *ex; if (depth != level) { struct ext4_extent_idx *idx; idx = path[level].p_idx; while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { ext_debug("%d: move %d:%llu in new index %llu\n", level, le32_to_cpu(idx->ei_block), ext4_idx_pblock(idx), newblock); idx++; } return; } ex = path[depth].p_ext; while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex), ext4_ext_is_uninitialized(ex), ext4_ext_get_actual_len(ex), newblock); ex++; } } #else #define ext4_ext_show_path(inode, path) #define ext4_ext_show_leaf(inode, path) #define ext4_ext_show_move(inode, path, newblock, level) #endif void ext4_ext_drop_refs(struct ext4_ext_path *path) { int depth = path->p_depth; int i; for (i = 0; i <= depth; i++, path++) if (path->p_bh) { brelse(path->p_bh); path->p_bh = NULL; } } /* * ext4_ext_binsearch_idx: * binary search for the closest index of the given block * the header must be checked before calling this */ static void ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t block) { struct ext4_extent_header *eh = path->p_hdr; struct ext4_extent_idx *r, *l, *m; ext_debug("binsearch for %u(idx): ", block); l = EXT_FIRST_INDEX(eh) + 1; r = EXT_LAST_INDEX(eh); while (l <= r) { m = l + (r - l) / 2; if (block < le32_to_cpu(m->ei_block)) r = m - 1; else l = m + 1; ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block), r, le32_to_cpu(r->ei_block)); } path->p_idx = l - 1; ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), ext4_idx_pblock(path->p_idx)); #ifdef CHECK_BINSEARCH { struct ext4_extent_idx *chix, *ix; int k; chix = ix = EXT_FIRST_INDEX(eh); for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { if (k != 0 && le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { printk(KERN_DEBUG "k=%d, ix=0x%p, " "first=0x%p\n", k, ix, EXT_FIRST_INDEX(eh)); printk(KERN_DEBUG "%u <= %u\n", le32_to_cpu(ix->ei_block), le32_to_cpu(ix[-1].ei_block)); } BUG_ON(k && le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)); if (block < le32_to_cpu(ix->ei_block)) break; chix = ix; } BUG_ON(chix != path->p_idx); } #endif } /* * ext4_ext_binsearch: * binary search for closest extent of the given block * the header must be checked before calling this */ static void ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t block) { struct ext4_extent_header *eh = path->p_hdr; struct ext4_extent *r, *l, *m; if (eh->eh_entries == 0) { /* * this leaf is empty: * we get such a leaf in split/add case */ return; } ext_debug("binsearch for %u: ", block); l = EXT_FIRST_EXTENT(eh) + 1; r = EXT_LAST_EXTENT(eh); while (l <= r) { m = l + (r - l) / 2; if (block < le32_to_cpu(m->ee_block)) r = m - 1; else l = m + 1; ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block), r, le32_to_cpu(r->ee_block)); } path->p_ext = l - 1; ext_debug(" -> %d:%llu:[%d]%d ", le32_to_cpu(path->p_ext->ee_block), ext4_ext_pblock(path->p_ext), ext4_ext_is_uninitialized(path->p_ext), ext4_ext_get_actual_len(path->p_ext)); #ifdef CHECK_BINSEARCH { struct ext4_extent *chex, *ex; int k; chex = ex = EXT_FIRST_EXTENT(eh); for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { BUG_ON(k && le32_to_cpu(ex->ee_block) <= le32_to_cpu(ex[-1].ee_block)); if (block < le32_to_cpu(ex->ee_block)) break; chex = ex; } BUG_ON(chex != path->p_ext); } #endif } int ext4_ext_tree_init(handle_t *handle, struct inode *inode) { struct ext4_extent_header *eh; eh = ext_inode_hdr(inode); eh->eh_depth = 0; eh->eh_entries = 0; eh->eh_magic = EXT4_EXT_MAGIC; eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); ext4_mark_inode_dirty(handle, inode); ext4_ext_invalidate_cache(inode); return 0; } struct ext4_ext_path * ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, struct ext4_ext_path *path) { struct ext4_extent_header *eh; struct buffer_head *bh; short int depth, i, ppos = 0, alloc = 0; eh = ext_inode_hdr(inode); depth = ext_depth(inode); /* account possible depth increase */ if (!path) { path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), GFP_NOFS); if (!path) return ERR_PTR(-ENOMEM); alloc = 1; } path[0].p_hdr = eh; path[0].p_bh = NULL; i = depth; /* walk through the tree */ while (i) { ext_debug("depth %d: num %d, max %d\n", ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); ext4_ext_binsearch_idx(inode, path + ppos, block); path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); path[ppos].p_depth = i; path[ppos].p_ext = NULL; bh = sb_getblk(inode->i_sb, path[ppos].p_block); if (unlikely(!bh)) goto err; if (!bh_uptodate_or_lock(bh)) { trace_ext4_ext_load_extent(inode, block, path[ppos].p_block); if (bh_submit_read(bh) < 0) { put_bh(bh); goto err; } } eh = ext_block_hdr(bh); ppos++; if (unlikely(ppos > depth)) { put_bh(bh); EXT4_ERROR_INODE(inode, "ppos %d > depth %d", ppos, depth); goto err; } path[ppos].p_bh = bh; path[ppos].p_hdr = eh; i--; if (ext4_ext_check_block(inode, eh, i, bh)) goto err; } path[ppos].p_depth = i; path[ppos].p_ext = NULL; path[ppos].p_idx = NULL; /* find extent */ ext4_ext_binsearch(inode, path + ppos, block); /* if not an empty leaf */ if (path[ppos].p_ext) path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); ext4_ext_show_path(inode, path); return path; err: ext4_ext_drop_refs(path); if (alloc) kfree(path); return ERR_PTR(-EIO); } /* * ext4_ext_insert_index: * insert new index [@logical;@ptr] into the block at @curp; * check where to insert: before @curp or after @curp */ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, struct ext4_ext_path *curp, int logical, ext4_fsblk_t ptr) { struct ext4_extent_idx *ix; int len, err; err = ext4_ext_get_access(handle, inode, curp); if (err) return err; if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { EXT4_ERROR_INODE(inode, "logical %d == ei_block %d!", logical, le32_to_cpu(curp->p_idx->ei_block)); return -EIO; } if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) >= le16_to_cpu(curp->p_hdr->eh_max))) { EXT4_ERROR_INODE(inode, "eh_entries %d >= eh_max %d!", le16_to_cpu(curp->p_hdr->eh_entries), le16_to_cpu(curp->p_hdr->eh_max)); return -EIO; } if (logical > le32_to_cpu(curp->p_idx->ei_block)) { /* insert after */ ext_debug("insert new index %d after: %llu\n", logical, ptr); ix = curp->p_idx + 1; } else { /* insert before */ ext_debug("insert new index %d before: %llu\n", logical, ptr); ix = curp->p_idx; } len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; BUG_ON(len < 0); if (len > 0) { ext_debug("insert new index %d: " "move %d indices from 0x%p to 0x%p\n", logical, len, ix, ix + 1); memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); } if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); return -EIO; } ix->ei_block = cpu_to_le32(logical); ext4_idx_store_pblock(ix, ptr); le16_add_cpu(&curp->p_hdr->eh_entries, 1); if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); return -EIO; } err = ext4_ext_dirty(handle, inode, curp); ext4_std_error(inode->i_sb, err); return err; } /* * ext4_ext_split: * inserts new subtree into the path, using free index entry * at depth @at: * - allocates all needed blocks (new leaf and all intermediate index blocks) * - makes decision where to split * - moves remaining extents and index entries (right to the split point) * into the newly allocated blocks * - initializes subtree */ static int ext4_ext_split(handle_t *handle, struct inode *inode, unsigned int flags, struct ext4_ext_path *path, struct ext4_extent *newext, int at) { struct buffer_head *bh = NULL; int depth = ext_depth(inode); struct ext4_extent_header *neh; struct ext4_extent_idx *fidx; int i = at, k, m, a; ext4_fsblk_t newblock, oldblock; __le32 border; ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ int err = 0; /* make decision: where to split? */ /* FIXME: now decision is simplest: at current extent */ /* if current leaf will be split, then we should use * border from split point */ if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); return -EIO; } if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { border = path[depth].p_ext[1].ee_block; ext_debug("leaf will be split." " next leaf starts at %d\n", le32_to_cpu(border)); } else { border = newext->ee_block; ext_debug("leaf will be added." " next leaf starts at %d\n", le32_to_cpu(border)); } /* * If error occurs, then we break processing * and mark filesystem read-only. index won't * be inserted and tree will be in consistent * state. Next mount will repair buffers too. */ /* * Get array to track all allocated blocks. * We need this to handle errors and free blocks * upon them. */ ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); if (!ablocks) return -ENOMEM; /* allocate all needed blocks */ ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); for (a = 0; a < depth - at; a++) { newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err, flags); if (newblock == 0) goto cleanup; ablocks[a] = newblock; } /* initialize new leaf */ newblock = ablocks[--a]; if (unlikely(newblock == 0)) { EXT4_ERROR_INODE(inode, "newblock == 0!"); err = -EIO; goto cleanup; } bh = sb_getblk(inode->i_sb, newblock); if (!bh) { err = -EIO; goto cleanup; } lock_buffer(bh); err = ext4_journal_get_create_access(handle, bh); if (err) goto cleanup; neh = ext_block_hdr(bh); neh->eh_entries = 0; neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); neh->eh_magic = EXT4_EXT_MAGIC; neh->eh_depth = 0; /* move remainder of path[depth] to the new leaf */ if (unlikely(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max)) { EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", path[depth].p_hdr->eh_entries, path[depth].p_hdr->eh_max); err = -EIO; goto cleanup; } /* start copy from next extent */ m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; ext4_ext_show_move(inode, path, newblock, depth); if (m) { struct ext4_extent *ex; ex = EXT_FIRST_EXTENT(neh); memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); le16_add_cpu(&neh->eh_entries, m); } ext4_extent_block_csum_set(inode, neh); set_buffer_uptodate(bh); unlock_buffer(bh); err = ext4_handle_dirty_metadata(handle, inode, bh); if (err) goto cleanup; brelse(bh); bh = NULL; /* correct old leaf */ if (m) { err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto cleanup; le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); err = ext4_ext_dirty(handle, inode, path + depth); if (err) goto cleanup; } /* create intermediate indexes */ k = depth - at - 1; if (unlikely(k < 0)) { EXT4_ERROR_INODE(inode, "k %d < 0!", k); err = -EIO; goto cleanup; } if (k) ext_debug("create %d intermediate indices\n", k); /* insert new index into current index block */ /* current depth stored in i var */ i = depth - 1; while (k--) { oldblock = newblock; newblock = ablocks[--a]; bh = sb_getblk(inode->i_sb, newblock); if (!bh) { err = -EIO; goto cleanup; } lock_buffer(bh); err = ext4_journal_get_create_access(handle, bh); if (err) goto cleanup; neh = ext_block_hdr(bh); neh->eh_entries = cpu_to_le16(1); neh->eh_magic = EXT4_EXT_MAGIC; neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); neh->eh_depth = cpu_to_le16(depth - i); fidx = EXT_FIRST_INDEX(neh); fidx->ei_block = border; ext4_idx_store_pblock(fidx, oldblock); ext_debug("int.index at %d (block %llu): %u -> %llu\n", i, newblock, le32_to_cpu(border), oldblock); /* move remainder of path[i] to the new index block */ if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != EXT_LAST_INDEX(path[i].p_hdr))) { EXT4_ERROR_INODE(inode, "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", le32_to_cpu(path[i].p_ext->ee_block)); err = -EIO; goto cleanup; } /* start copy indexes */ m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, EXT_MAX_INDEX(path[i].p_hdr)); ext4_ext_show_move(inode, path, newblock, i); if (m) { memmove(++fidx, path[i].p_idx, sizeof(struct ext4_extent_idx) * m); le16_add_cpu(&neh->eh_entries, m); } ext4_extent_block_csum_set(inode, neh); set_buffer_uptodate(bh); unlock_buffer(bh); err = ext4_handle_dirty_metadata(handle, inode, bh); if (err) goto cleanup; brelse(bh); bh = NULL; /* correct old index */ if (m) { err = ext4_ext_get_access(handle, inode, path + i); if (err) goto cleanup; le16_add_cpu(&path[i].p_hdr->eh_entries, -m); err = ext4_ext_dirty(handle, inode, path + i); if (err) goto cleanup; } i--; } /* insert new index */ err = ext4_ext_insert_index(handle, inode, path + at, le32_to_cpu(border), newblock); cleanup: if (bh) { if (buffer_locked(bh)) unlock_buffer(bh); brelse(bh); } if (err) { /* free all allocated blocks in error case */ for (i = 0; i < depth; i++) { if (!ablocks[i]) continue; ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, EXT4_FREE_BLOCKS_METADATA); } } kfree(ablocks); return err; } /* * ext4_ext_grow_indepth: * implements tree growing procedure: * - allocates new block * - moves top-level data (index block or leaf) into the new block * - initializes new top-level, creating index that points to the * just created block */ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, unsigned int flags, struct ext4_extent *newext) { struct ext4_extent_header *neh; struct buffer_head *bh; ext4_fsblk_t newblock; int err = 0; newblock = ext4_ext_new_meta_block(handle, inode, NULL, newext, &err, flags); if (newblock == 0) return err; bh = sb_getblk(inode->i_sb, newblock); if (!bh) { err = -EIO; ext4_std_error(inode->i_sb, err); return err; } lock_buffer(bh); err = ext4_journal_get_create_access(handle, bh); if (err) { unlock_buffer(bh); goto out; } /* move top-level index/leaf into new block */ memmove(bh->b_data, EXT4_I(inode)->i_data, sizeof(EXT4_I(inode)->i_data)); /* set size of new block */ neh = ext_block_hdr(bh); /* old root could have indexes or leaves * so calculate e_max right way */ if (ext_depth(inode)) neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); else neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); neh->eh_magic = EXT4_EXT_MAGIC; ext4_extent_block_csum_set(inode, neh); set_buffer_uptodate(bh); unlock_buffer(bh); err = ext4_handle_dirty_metadata(handle, inode, bh); if (err) goto out; /* Update top-level index: num,max,pointer */ neh = ext_inode_hdr(inode); neh->eh_entries = cpu_to_le16(1); ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); if (neh->eh_depth == 0) { /* Root extent block becomes index block */ neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); EXT_FIRST_INDEX(neh)->ei_block = EXT_FIRST_EXTENT(neh)->ee_block; } ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), ext4_idx_pblock(EXT_FIRST_INDEX(neh))); le16_add_cpu(&neh->eh_depth, 1); ext4_mark_inode_dirty(handle, inode); out: brelse(bh); return err; } /* * ext4_ext_create_new_leaf: * finds empty index and adds new leaf. * if no free index is found, then it requests in-depth growing. */ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, unsigned int flags, struct ext4_ext_path *path, struct ext4_extent *newext) { struct ext4_ext_path *curp; int depth, i, err = 0; repeat: i = depth = ext_depth(inode); /* walk up to the tree and look for free index entry */ curp = path + depth; while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { i--; curp--; } /* we use already allocated block for index block, * so subsequent data blocks should be contiguous */ if (EXT_HAS_FREE_INDEX(curp)) { /* if we found index with free entry, then use that * entry: create all needed subtree and add new leaf */ err = ext4_ext_split(handle, inode, flags, path, newext, i); if (err) goto out; /* refill path */ ext4_ext_drop_refs(path); path = ext4_ext_find_extent(inode, (ext4_lblk_t)le32_to_cpu(newext->ee_block), path); if (IS_ERR(path)) err = PTR_ERR(path); } else { /* tree is full, time to grow in depth */ err = ext4_ext_grow_indepth(handle, inode, flags, newext); if (err) goto out; /* refill path */ ext4_ext_drop_refs(path); path = ext4_ext_find_extent(inode, (ext4_lblk_t)le32_to_cpu(newext->ee_block), path); if (IS_ERR(path)) { err = PTR_ERR(path); goto out; } /* * only first (depth 0 -> 1) produces free space; * in all other cases we have to split the grown tree */ depth = ext_depth(inode); if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { /* now we need to split */ goto repeat; } } out: return err; } /* * search the closest allocated block to the left for *logical * and returns it at @logical + it's physical address at @phys * if *logical is the smallest allocated block, the function * returns 0 at @phys * return value contains 0 (success) or error code */ static int ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t *logical, ext4_fsblk_t *phys) { struct ext4_extent_idx *ix; struct ext4_extent *ex; int depth, ee_len; if (unlikely(path == NULL)) { EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); return -EIO; } depth = path->p_depth; *phys = 0; if (depth == 0 && path->p_ext == NULL) return 0; /* usually extent in the path covers blocks smaller * then *logical, but it can be that extent is the * first one in the file */ ex = path[depth].p_ext; ee_len = ext4_ext_get_actual_len(ex); if (*logical < le32_to_cpu(ex->ee_block)) { if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { EXT4_ERROR_INODE(inode, "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", *logical, le32_to_cpu(ex->ee_block)); return -EIO; } while (--depth >= 0) { ix = path[depth].p_idx; if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { EXT4_ERROR_INODE(inode, "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", ix != NULL ? le32_to_cpu(ix->ei_block) : 0, EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, depth); return -EIO; } } return 0; } if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { EXT4_ERROR_INODE(inode, "logical %d < ee_block %d + ee_len %d!", *logical, le32_to_cpu(ex->ee_block), ee_len); return -EIO; } *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; *phys = ext4_ext_pblock(ex) + ee_len - 1; return 0; } /* * search the closest allocated block to the right for *logical * and returns it at @logical + it's physical address at @phys * if *logical is the largest allocated block, the function * returns 0 at @phys * return value contains 0 (success) or error code */ static int ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t *logical, ext4_fsblk_t *phys, struct ext4_extent **ret_ex) { struct buffer_head *bh = NULL; struct ext4_extent_header *eh; struct ext4_extent_idx *ix; struct ext4_extent *ex; ext4_fsblk_t block; int depth; /* Note, NOT eh_depth; depth from top of tree */ int ee_len; if (unlikely(path == NULL)) { EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); return -EIO; } depth = path->p_depth; *phys = 0; if (depth == 0 && path->p_ext == NULL) return 0; /* usually extent in the path covers blocks smaller * then *logical, but it can be that extent is the * first one in the file */ ex = path[depth].p_ext; ee_len = ext4_ext_get_actual_len(ex); if (*logical < le32_to_cpu(ex->ee_block)) { if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { EXT4_ERROR_INODE(inode, "first_extent(path[%d].p_hdr) != ex", depth); return -EIO; } while (--depth >= 0) { ix = path[depth].p_idx; if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { EXT4_ERROR_INODE(inode, "ix != EXT_FIRST_INDEX *logical %d!", *logical); return -EIO; } } goto found_extent; } if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { EXT4_ERROR_INODE(inode, "logical %d < ee_block %d + ee_len %d!", *logical, le32_to_cpu(ex->ee_block), ee_len); return -EIO; } if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { /* next allocated block in this leaf */ ex++; goto found_extent; } /* go up and search for index to the right */ while (--depth >= 0) { ix = path[depth].p_idx; if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) goto got_index; } /* we've gone up to the root and found no index to the right */ return 0; got_index: /* we've found index to the right, let's * follow it and find the closest allocated * block to the right */ ix++; block = ext4_idx_pblock(ix); while (++depth < path->p_depth) { bh = sb_bread(inode->i_sb, block); if (bh == NULL) return -EIO; eh = ext_block_hdr(bh); /* subtract from p_depth to get proper eh_depth */ if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) { put_bh(bh); return -EIO; } ix = EXT_FIRST_INDEX(eh); block = ext4_idx_pblock(ix); put_bh(bh); } bh = sb_bread(inode->i_sb, block); if (bh == NULL) return -EIO; eh = ext_block_hdr(bh); if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) { put_bh(bh); return -EIO; } ex = EXT_FIRST_EXTENT(eh); found_extent: *logical = le32_to_cpu(ex->ee_block); *phys = ext4_ext_pblock(ex); *ret_ex = ex; if (bh) put_bh(bh); return 0; } /* * ext4_ext_next_allocated_block: * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. * NOTE: it considers block number from index entry as * allocated block. Thus, index entries have to be consistent * with leaves. */ static ext4_lblk_t ext4_ext_next_allocated_block(struct ext4_ext_path *path) { int depth; BUG_ON(path == NULL); depth = path->p_depth; if (depth == 0 && path->p_ext == NULL) return EXT_MAX_BLOCKS; while (depth >= 0) { if (depth == path->p_depth) { /* leaf */ if (path[depth].p_ext && path[depth].p_ext != EXT_LAST_EXTENT(path[depth].p_hdr)) return le32_to_cpu(path[depth].p_ext[1].ee_block); } else { /* index */ if (path[depth].p_idx != EXT_LAST_INDEX(path[depth].p_hdr)) return le32_to_cpu(path[depth].p_idx[1].ei_block); } depth--; } return EXT_MAX_BLOCKS; } /* * ext4_ext_next_leaf_block: * returns first allocated block from next leaf or EXT_MAX_BLOCKS */ static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) { int depth; BUG_ON(path == NULL); depth = path->p_depth; /* zero-tree has no leaf blocks at all */ if (depth == 0) return EXT_MAX_BLOCKS; /* go to index block */ depth--; while (depth >= 0) { if (path[depth].p_idx != EXT_LAST_INDEX(path[depth].p_hdr)) return (ext4_lblk_t) le32_to_cpu(path[depth].p_idx[1].ei_block); depth--; } return EXT_MAX_BLOCKS; } /* * ext4_ext_correct_indexes: * if leaf gets modified and modified extent is first in the leaf, * then we have to correct all indexes above. * TODO: do we need to correct tree in all cases? */ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, struct ext4_ext_path *path) { struct ext4_extent_header *eh; int depth = ext_depth(inode); struct ext4_extent *ex; __le32 border; int k, err = 0; eh = path[depth].p_hdr; ex = path[depth].p_ext; if (unlikely(ex == NULL || eh == NULL)) { EXT4_ERROR_INODE(inode, "ex %p == NULL or eh %p == NULL", ex, eh); return -EIO; } if (depth == 0) { /* there is no tree at all */ return 0; } if (ex != EXT_FIRST_EXTENT(eh)) { /* we correct tree if first leaf got modified only */ return 0; } /* * TODO: we need correction if border is smaller than current one */ k = depth - 1; border = path[depth].p_ext->ee_block; err = ext4_ext_get_access(handle, inode, path + k); if (err) return err; path[k].p_idx->ei_block = border; err = ext4_ext_dirty(handle, inode, path + k); if (err) return err; while (k--) { /* change all left-side indexes */ if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) break; err = ext4_ext_get_access(handle, inode, path + k); if (err) break; path[k].p_idx->ei_block = border; err = ext4_ext_dirty(handle, inode, path + k); if (err) break; } return err; } int ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, struct ext4_extent *ex2) { unsigned short ext1_ee_len, ext2_ee_len, max_len; /* * Make sure that either both extents are uninitialized, or * both are _not_. */ if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2)) return 0; if (ext4_ext_is_uninitialized(ex1)) max_len = EXT_UNINIT_MAX_LEN; else max_len = EXT_INIT_MAX_LEN; ext1_ee_len = ext4_ext_get_actual_len(ex1); ext2_ee_len = ext4_ext_get_actual_len(ex2); if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != le32_to_cpu(ex2->ee_block)) return 0; /* * To allow future support for preallocated extents to be added * as an RO_COMPAT feature, refuse to merge to extents if * this can result in the top bit of ee_len being set. */ if (ext1_ee_len + ext2_ee_len > max_len) return 0; #ifdef AGGRESSIVE_TEST if (ext1_ee_len >= 4) return 0; #endif if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) return 1; return 0; } /* * This function tries to merge the "ex" extent to the next extent in the tree. * It always tries to merge towards right. If you want to merge towards * left, pass "ex - 1" as argument instead of "ex". * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns * 1 if they got merged. */ static int ext4_ext_try_to_merge_right(struct inode *inode, struct ext4_ext_path *path, struct ext4_extent *ex) { struct ext4_extent_header *eh; unsigned int depth, len; int merge_done = 0; int uninitialized = 0; depth = ext_depth(inode); BUG_ON(path[depth].p_hdr == NULL); eh = path[depth].p_hdr; while (ex < EXT_LAST_EXTENT(eh)) { if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) break; /* merge with next extent! */ if (ext4_ext_is_uninitialized(ex)) uninitialized = 1; ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) + ext4_ext_get_actual_len(ex + 1)); if (uninitialized) ext4_ext_mark_uninitialized(ex); if (ex + 1 < EXT_LAST_EXTENT(eh)) { len = (EXT_LAST_EXTENT(eh) - ex - 1) * sizeof(struct ext4_extent); memmove(ex + 1, ex + 2, len); } le16_add_cpu(&eh->eh_entries, -1); merge_done = 1; WARN_ON(eh->eh_entries == 0); if (!eh->eh_entries) EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); } return merge_done; } /* * This function does a very simple check to see if we can collapse * an extent tree with a single extent tree leaf block into the inode. */ static void ext4_ext_try_to_merge_up(handle_t *handle, struct inode *inode, struct ext4_ext_path *path) { size_t s; unsigned max_root = ext4_ext_space_root(inode, 0); ext4_fsblk_t blk; if ((path[0].p_depth != 1) || (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) return; /* * We need to modify the block allocation bitmap and the block * group descriptor to release the extent tree block. If we * can't get the journal credits, give up. */ if (ext4_journal_extend(handle, 2)) return; /* * Copy the extent data up to the inode */ blk = ext4_idx_pblock(path[0].p_idx); s = le16_to_cpu(path[1].p_hdr->eh_entries) * sizeof(struct ext4_extent_idx); s += sizeof(struct ext4_extent_header); memcpy(path[0].p_hdr, path[1].p_hdr, s); path[0].p_depth = 0; path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); path[0].p_hdr->eh_max = cpu_to_le16(max_root); brelse(path[1].p_bh); ext4_free_blocks(handle, inode, NULL, blk, 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); } /* * This function tries to merge the @ex extent to neighbours in the tree. * return 1 if merge left else 0. */ static void ext4_ext_try_to_merge(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, struct ext4_extent *ex) { struct ext4_extent_header *eh; unsigned int depth; int merge_done = 0; depth = ext_depth(inode); BUG_ON(path[depth].p_hdr == NULL); eh = path[depth].p_hdr; if (ex > EXT_FIRST_EXTENT(eh)) merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); if (!merge_done) (void) ext4_ext_try_to_merge_right(inode, path, ex); ext4_ext_try_to_merge_up(handle, inode, path); } /* * check if a portion of the "newext" extent overlaps with an * existing extent. * * If there is an overlap discovered, it updates the length of the newext * such that there will be no overlap, and then returns 1. * If there is no overlap found, it returns 0. */ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, struct inode *inode, struct ext4_extent *newext, struct ext4_ext_path *path) { ext4_lblk_t b1, b2; unsigned int depth, len1; unsigned int ret = 0; b1 = le32_to_cpu(newext->ee_block); len1 = ext4_ext_get_actual_len(newext); depth = ext_depth(inode); if (!path[depth].p_ext) goto out; b2 = le32_to_cpu(path[depth].p_ext->ee_block); b2 &= ~(sbi->s_cluster_ratio - 1); /* * get the next allocated block if the extent in the path * is before the requested block(s) */ if (b2 < b1) { b2 = ext4_ext_next_allocated_block(path); if (b2 == EXT_MAX_BLOCKS) goto out; b2 &= ~(sbi->s_cluster_ratio - 1); } /* check for wrap through zero on extent logical start block*/ if (b1 + len1 < b1) { len1 = EXT_MAX_BLOCKS - b1; newext->ee_len = cpu_to_le16(len1); ret = 1; } /* check for overlap */ if (b1 + len1 > b2) { newext->ee_len = cpu_to_le16(b2 - b1); ret = 1; } out: return ret; } /* * ext4_ext_insert_extent: * tries to merge requsted extent into the existing extent or * inserts requested extent as new one into the tree, * creating new leaf in the no-space case. */ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, struct ext4_extent *newext, int flag) { struct ext4_extent_header *eh; struct ext4_extent *ex, *fex; struct ext4_extent *nearex; /* nearest extent */ struct ext4_ext_path *npath = NULL; int depth, len, err; ext4_lblk_t next; unsigned uninitialized = 0; int flags = 0; if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); return -EIO; } depth = ext_depth(inode); ex = path[depth].p_ext; if (unlikely(path[depth].p_hdr == NULL)) { EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); return -EIO; } /* try to insert block into found extent and return */ if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO) && ext4_can_extents_be_merged(inode, ex, newext)) { ext_debug("append [%d]%d block to %u:[%d]%d (from %llu)\n", ext4_ext_is_uninitialized(newext), ext4_ext_get_actual_len(newext), le32_to_cpu(ex->ee_block), ext4_ext_is_uninitialized(ex), ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); err = ext4_ext_get_access(handle, inode, path + depth); if (err) return err; /* * ext4_can_extents_be_merged should have checked that either * both extents are uninitialized, or both aren't. Thus we * need to check only one of them here. */ if (ext4_ext_is_uninitialized(ex)) uninitialized = 1; ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) + ext4_ext_get_actual_len(newext)); if (uninitialized) ext4_ext_mark_uninitialized(ex); eh = path[depth].p_hdr; nearex = ex; goto merge; } depth = ext_depth(inode); eh = path[depth].p_hdr; if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) goto has_space; /* probably next leaf has space for us? */ fex = EXT_LAST_EXTENT(eh); next = EXT_MAX_BLOCKS; if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) next = ext4_ext_next_leaf_block(path); if (next != EXT_MAX_BLOCKS) { ext_debug("next leaf block - %u\n", next); BUG_ON(npath != NULL); npath = ext4_ext_find_extent(inode, next, NULL); if (IS_ERR(npath)) return PTR_ERR(npath); BUG_ON(npath->p_depth != path->p_depth); eh = npath[depth].p_hdr; if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { ext_debug("next leaf isn't full(%d)\n", le16_to_cpu(eh->eh_entries)); path = npath; goto has_space; } ext_debug("next leaf has no free space(%d,%d)\n", le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); } /* * There is no free space in the found leaf. * We're gonna add a new leaf in the tree. */ if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) flags = EXT4_MB_USE_ROOT_BLOCKS; err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext); if (err) goto cleanup; depth = ext_depth(inode); eh = path[depth].p_hdr; has_space: nearex = path[depth].p_ext; err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto cleanup; if (!nearex) { /* there is no extent in this leaf, create first one */ ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n", le32_to_cpu(newext->ee_block), ext4_ext_pblock(newext), ext4_ext_is_uninitialized(newext), ext4_ext_get_actual_len(newext)); nearex = EXT_FIRST_EXTENT(eh); } else { if (le32_to_cpu(newext->ee_block) > le32_to_cpu(nearex->ee_block)) { /* Insert after */ ext_debug("insert %u:%llu:[%d]%d before: " "nearest %p\n", le32_to_cpu(newext->ee_block), ext4_ext_pblock(newext), ext4_ext_is_uninitialized(newext), ext4_ext_get_actual_len(newext), nearex); nearex++; } else { /* Insert before */ BUG_ON(newext->ee_block == nearex->ee_block); ext_debug("insert %u:%llu:[%d]%d after: " "nearest %p\n", le32_to_cpu(newext->ee_block), ext4_ext_pblock(newext), ext4_ext_is_uninitialized(newext), ext4_ext_get_actual_len(newext), nearex); } len = EXT_LAST_EXTENT(eh) - nearex + 1; if (len > 0) { ext_debug("insert %u:%llu:[%d]%d: " "move %d extents from 0x%p to 0x%p\n", le32_to_cpu(newext->ee_block), ext4_ext_pblock(newext), ext4_ext_is_uninitialized(newext), ext4_ext_get_actual_len(newext), len, nearex, nearex + 1); memmove(nearex + 1, nearex, len * sizeof(struct ext4_extent)); } } le16_add_cpu(&eh->eh_entries, 1); path[depth].p_ext = nearex; nearex->ee_block = newext->ee_block; ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); nearex->ee_len = newext->ee_len; merge: /* try to merge extents */ if (!(flag & EXT4_GET_BLOCKS_PRE_IO)) ext4_ext_try_to_merge(handle, inode, path, nearex); /* time to correct all indexes above */ err = ext4_ext_correct_indexes(handle, inode, path); if (err) goto cleanup; err = ext4_ext_dirty(handle, inode, path + path->p_depth); cleanup: if (npath) { ext4_ext_drop_refs(npath); kfree(npath); } ext4_ext_invalidate_cache(inode); return err; } static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, ext4_lblk_t num, ext_prepare_callback func, void *cbdata) { struct ext4_ext_path *path = NULL; struct ext4_ext_cache cbex; struct ext4_extent *ex; ext4_lblk_t next, start = 0, end = 0; ext4_lblk_t last = block + num; int depth, exists, err = 0; BUG_ON(func == NULL); BUG_ON(inode == NULL); while (block < last && block != EXT_MAX_BLOCKS) { num = last - block; /* find extent for this block */ down_read(&EXT4_I(inode)->i_data_sem); path = ext4_ext_find_extent(inode, block, path); up_read(&EXT4_I(inode)->i_data_sem); if (IS_ERR(path)) { err = PTR_ERR(path); path = NULL; break; } depth = ext_depth(inode); if (unlikely(path[depth].p_hdr == NULL)) { EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); err = -EIO; break; } ex = path[depth].p_ext; next = ext4_ext_next_allocated_block(path); exists = 0; if (!ex) { /* there is no extent yet, so try to allocate * all requested space */ start = block; end = block + num; } else if (le32_to_cpu(ex->ee_block) > block) { /* need to allocate space before found extent */ start = block; end = le32_to_cpu(ex->ee_block); if (block + num < end) end = block + num; } else if (block >= le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex)) { /* need to allocate space after found extent */ start = block; end = block + num; if (end >= next) end = next; } else if (block >= le32_to_cpu(ex->ee_block)) { /* * some part of requested space is covered * by found extent */ start = block; end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); if (block + num < end) end = block + num; exists = 1; } else { BUG(); } BUG_ON(end <= start); if (!exists) { cbex.ec_block = start; cbex.ec_len = end - start; cbex.ec_start = 0; } else { cbex.ec_block = le32_to_cpu(ex->ee_block); cbex.ec_len = ext4_ext_get_actual_len(ex); cbex.ec_start = ext4_ext_pblock(ex); } if (unlikely(cbex.ec_len == 0)) { EXT4_ERROR_INODE(inode, "cbex.ec_len == 0"); err = -EIO; break; } err = func(inode, next, &cbex, ex, cbdata); ext4_ext_drop_refs(path); if (err < 0) break; if (err == EXT_REPEAT) continue; else if (err == EXT_BREAK) { err = 0; break; } if (ext_depth(inode) != depth) { /* depth was changed. we have to realloc path */ kfree(path); path = NULL; } block = cbex.ec_block + cbex.ec_len; } if (path) { ext4_ext_drop_refs(path); kfree(path); } return err; } static void ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, __u32 len, ext4_fsblk_t start) { struct ext4_ext_cache *cex; BUG_ON(len == 0); spin_lock(&EXT4_I(inode)->i_block_reservation_lock); trace_ext4_ext_put_in_cache(inode, block, len, start); cex = &EXT4_I(inode)->i_cached_extent; cex->ec_block = block; cex->ec_len = len; cex->ec_start = start; spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); } /* * ext4_ext_put_gap_in_cache: * calculate boundaries of the gap that the requested block fits into * and cache this gap */ static void ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t block) { int depth = ext_depth(inode); unsigned long len; ext4_lblk_t lblock; struct ext4_extent *ex; ex = path[depth].p_ext; if (ex == NULL) { /* there is no extent yet, so gap is [0;-] */ lblock = 0; len = EXT_MAX_BLOCKS; ext_debug("cache gap(whole file):"); } else if (block < le32_to_cpu(ex->ee_block)) { lblock = block; len = le32_to_cpu(ex->ee_block) - block; ext_debug("cache gap(before): %u [%u:%u]", block, le32_to_cpu(ex->ee_block), ext4_ext_get_actual_len(ex)); } else if (block >= le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex)) { ext4_lblk_t next; lblock = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); next = ext4_ext_next_allocated_block(path); ext_debug("cache gap(after): [%u:%u] %u", le32_to_cpu(ex->ee_block), ext4_ext_get_actual_len(ex), block); BUG_ON(next == lblock); len = next - lblock; } else { lblock = len = 0; BUG(); } ext_debug(" -> %u:%lu\n", lblock, len); ext4_ext_put_in_cache(inode, lblock, len, 0); } /* * ext4_ext_in_cache() * Checks to see if the given block is in the cache. * If it is, the cached extent is stored in the given * cache extent pointer. * * @inode: The files inode * @block: The block to look for in the cache * @ex: Pointer where the cached extent will be stored * if it contains block * * Return 0 if cache is invalid; 1 if the cache is valid */ static int ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, struct ext4_extent *ex) { struct ext4_ext_cache *cex; struct ext4_sb_info *sbi; int ret = 0; /* * We borrow i_block_reservation_lock to protect i_cached_extent */ spin_lock(&EXT4_I(inode)->i_block_reservation_lock); cex = &EXT4_I(inode)->i_cached_extent; sbi = EXT4_SB(inode->i_sb); /* has cache valid data? */ if (cex->ec_len == 0) goto errout; if (in_range(block, cex->ec_block, cex->ec_len)) { ex->ee_block = cpu_to_le32(cex->ec_block); ext4_ext_store_pblock(ex, cex->ec_start); ex->ee_len = cpu_to_le16(cex->ec_len); ext_debug("%u cached by %u:%u:%llu\n", block, cex->ec_block, cex->ec_len, cex->ec_start); ret = 1; } errout: trace_ext4_ext_in_cache(inode, block, ret); spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); return ret; } /* * ext4_ext_rm_idx: * removes index from the index block. */ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, struct ext4_ext_path *path) { int err; ext4_fsblk_t leaf; /* free index block */ path--; leaf = ext4_idx_pblock(path->p_idx); if (unlikely(path->p_hdr->eh_entries == 0)) { EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); return -EIO; } err = ext4_ext_get_access(handle, inode, path); if (err) return err; if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; len *= sizeof(struct ext4_extent_idx); memmove(path->p_idx, path->p_idx + 1, len); } le16_add_cpu(&path->p_hdr->eh_entries, -1); err = ext4_ext_dirty(handle, inode, path); if (err) return err; ext_debug("index is empty, remove it, free block %llu\n", leaf); trace_ext4_ext_rm_idx(inode, leaf); ext4_free_blocks(handle, inode, NULL, leaf, 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); return err; } /* * ext4_ext_calc_credits_for_single_extent: * This routine returns max. credits that needed to insert an extent * to the extent tree. * When pass the actual path, the caller should calculate credits * under i_data_sem. */ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, struct ext4_ext_path *path) { if (path) { int depth = ext_depth(inode); int ret = 0; /* probably there is space in leaf? */ if (le16_to_cpu(path[depth].p_hdr->eh_entries) < le16_to_cpu(path[depth].p_hdr->eh_max)) { /* * There are some space in the leaf tree, no * need to account for leaf block credit * * bitmaps and block group descriptor blocks * and other metadata blocks still need to be * accounted. */ /* 1 bitmap, 1 block group descriptor */ ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); return ret; } } return ext4_chunk_trans_blocks(inode, nrblocks); } /* * How many index/leaf blocks need to change/allocate to modify nrblocks? * * if nrblocks are fit in a single extent (chunk flag is 1), then * in the worse case, each tree level index/leaf need to be changed * if the tree split due to insert a new extent, then the old tree * index/leaf need to be updated too * * If the nrblocks are discontiguous, they could cause * the whole tree split more than once, but this is really rare. */ int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) { int index; int depth = ext_depth(inode); if (chunk) index = depth * 2; else index = depth * 3; return index; } static int ext4_remove_blocks(handle_t *handle, struct inode *inode, struct ext4_extent *ex, ext4_fsblk_t *partial_cluster, ext4_lblk_t from, ext4_lblk_t to) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); unsigned short ee_len = ext4_ext_get_actual_len(ex); ext4_fsblk_t pblk; int flags = 0; if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) flags |= EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; else if (ext4_should_journal_data(inode)) flags |= EXT4_FREE_BLOCKS_FORGET; /* * For bigalloc file systems, we never free a partial cluster * at the beginning of the extent. Instead, we make a note * that we tried freeing the cluster, and check to see if we * need to free it on a subsequent call to ext4_remove_blocks, * or at the end of the ext4_truncate() operation. */ flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster); /* * If we have a partial cluster, and it's different from the * cluster of the last block, we need to explicitly free the * partial cluster here. */ pblk = ext4_ext_pblock(ex) + ee_len - 1; if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) { ext4_free_blocks(handle, inode, NULL, EXT4_C2B(sbi, *partial_cluster), sbi->s_cluster_ratio, flags); *partial_cluster = 0; } #ifdef EXTENTS_STATS { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); spin_lock(&sbi->s_ext_stats_lock); sbi->s_ext_blocks += ee_len; sbi->s_ext_extents++; if (ee_len < sbi->s_ext_min) sbi->s_ext_min = ee_len; if (ee_len > sbi->s_ext_max) sbi->s_ext_max = ee_len; if (ext_depth(inode) > sbi->s_depth_max) sbi->s_depth_max = ext_depth(inode); spin_unlock(&sbi->s_ext_stats_lock); } #endif if (from >= le32_to_cpu(ex->ee_block) && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { /* tail removal */ ext4_lblk_t num; num = le32_to_cpu(ex->ee_block) + ee_len - from; pblk = ext4_ext_pblock(ex) + ee_len - num; ext_debug("free last %u blocks starting %llu\n", num, pblk); ext4_free_blocks(handle, inode, NULL, pblk, num, flags); /* * If the block range to be freed didn't start at the * beginning of a cluster, and we removed the entire * extent, save the partial cluster here, since we * might need to delete if we determine that the * truncate operation has removed all of the blocks in * the cluster. */ if (pblk & (sbi->s_cluster_ratio - 1) && (ee_len == num)) *partial_cluster = EXT4_B2C(sbi, pblk); else *partial_cluster = 0; } else if (from == le32_to_cpu(ex->ee_block) && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { /* head removal */ ext4_lblk_t num; ext4_fsblk_t start; num = to - from; start = ext4_ext_pblock(ex); ext_debug("free first %u blocks starting %llu\n", num, start); ext4_free_blocks(handle, inode, NULL, start, num, flags); } else { printk(KERN_INFO "strange request: removal(2) " "%u-%u from %u:%u\n", from, to, le32_to_cpu(ex->ee_block), ee_len); } return 0; } /* * ext4_ext_rm_leaf() Removes the extents associated with the * blocks appearing between "start" and "end", and splits the extents * if "start" and "end" appear in the same extent * * @handle: The journal handle * @inode: The files inode * @path: The path to the leaf * @start: The first block to remove * @end: The last block to remove */ static int ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster, ext4_lblk_t start, ext4_lblk_t end) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); int err = 0, correct_index = 0; int depth = ext_depth(inode), credits; struct ext4_extent_header *eh; ext4_lblk_t a, b; unsigned num; ext4_lblk_t ex_ee_block; unsigned short ex_ee_len; unsigned uninitialized = 0; struct ext4_extent *ex; /* the header must be checked already in ext4_ext_remove_space() */ ext_debug("truncate since %u in leaf to %u\n", start, end); if (!path[depth].p_hdr) path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); eh = path[depth].p_hdr; if (unlikely(path[depth].p_hdr == NULL)) { EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); return -EIO; } /* find where to start removing */ ex = EXT_LAST_EXTENT(eh); ex_ee_block = le32_to_cpu(ex->ee_block); ex_ee_len = ext4_ext_get_actual_len(ex); trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster); while (ex >= EXT_FIRST_EXTENT(eh) && ex_ee_block + ex_ee_len > start) { if (ext4_ext_is_uninitialized(ex)) uninitialized = 1; else uninitialized = 0; ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, uninitialized, ex_ee_len); path[depth].p_ext = ex; a = ex_ee_block > start ? ex_ee_block : start; b = ex_ee_block+ex_ee_len - 1 < end ? ex_ee_block+ex_ee_len - 1 : end; ext_debug(" border %u:%u\n", a, b); /* If this extent is beyond the end of the hole, skip it */ if (end < ex_ee_block) { ex--; ex_ee_block = le32_to_cpu(ex->ee_block); ex_ee_len = ext4_ext_get_actual_len(ex); continue; } else if (b != ex_ee_block + ex_ee_len - 1) { EXT4_ERROR_INODE(inode, "can not handle truncate %u:%u " "on extent %u:%u", start, end, ex_ee_block, ex_ee_block + ex_ee_len - 1); err = -EIO; goto out; } else if (a != ex_ee_block) { /* remove tail of the extent */ num = a - ex_ee_block; } else { /* remove whole extent: excellent! */ num = 0; } /* * 3 for leaf, sb, and inode plus 2 (bmap and group * descriptor) for each block group; assume two block * groups plus ex_ee_len/blocks_per_block_group for * the worst case */ credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); if (ex == EXT_FIRST_EXTENT(eh)) { correct_index = 1; credits += (ext_depth(inode)) + 1; } credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); err = ext4_ext_truncate_extend_restart(handle, inode, credits); if (err) goto out; err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; err = ext4_remove_blocks(handle, inode, ex, partial_cluster, a, b); if (err) goto out; if (num == 0) /* this extent is removed; mark slot entirely unused */ ext4_ext_store_pblock(ex, 0); ex->ee_len = cpu_to_le16(num); /* * Do not mark uninitialized if all the blocks in the * extent have been removed. */ if (uninitialized && num) ext4_ext_mark_uninitialized(ex); /* * If the extent was completely released, * we need to remove it from the leaf */ if (num == 0) { if (end != EXT_MAX_BLOCKS - 1) { /* * For hole punching, we need to scoot all the * extents up when an extent is removed so that * we dont have blank extents in the middle */ memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * sizeof(struct ext4_extent)); /* Now get rid of the one at the end */ memset(EXT_LAST_EXTENT(eh), 0, sizeof(struct ext4_extent)); } le16_add_cpu(&eh->eh_entries, -1); } else *partial_cluster = 0; err = ext4_ext_dirty(handle, inode, path + depth); if (err) goto out; ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num, ext4_ext_pblock(ex)); ex--; ex_ee_block = le32_to_cpu(ex->ee_block); ex_ee_len = ext4_ext_get_actual_len(ex); } if (correct_index && eh->eh_entries) err = ext4_ext_correct_indexes(handle, inode, path); /* * If there is still a entry in the leaf node, check to see if * it references the partial cluster. This is the only place * where it could; if it doesn't, we can free the cluster. */ if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) && (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) != *partial_cluster)) { int flags = EXT4_FREE_BLOCKS_FORGET; if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) flags |= EXT4_FREE_BLOCKS_METADATA; ext4_free_blocks(handle, inode, NULL, EXT4_C2B(sbi, *partial_cluster), sbi->s_cluster_ratio, flags); *partial_cluster = 0; } /* if this leaf is free, then we should * remove it from index block above */ if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) err = ext4_ext_rm_idx(handle, inode, path + depth); out: return err; } /* * ext4_ext_more_to_rm: * returns 1 if current index has to be freed (even partial) */ static int ext4_ext_more_to_rm(struct ext4_ext_path *path) { BUG_ON(path->p_idx == NULL); if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) return 0; /* * if truncate on deeper level happened, it wasn't partial, * so we have to consider current index for truncation */ if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) return 0; return 1; } static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, ext4_lblk_t end) { struct super_block *sb = inode->i_sb; int depth = ext_depth(inode); struct ext4_ext_path *path = NULL; ext4_fsblk_t partial_cluster = 0; handle_t *handle; int i = 0, err = 0; ext_debug("truncate since %u to %u\n", start, end); /* probably first extent we're gonna free will be last in block */ handle = ext4_journal_start(inode, depth + 1); if (IS_ERR(handle)) return PTR_ERR(handle); again: ext4_ext_invalidate_cache(inode); trace_ext4_ext_remove_space(inode, start, depth); /* * Check if we are removing extents inside the extent tree. If that * is the case, we are going to punch a hole inside the extent tree * so we have to check whether we need to split the extent covering * the last block to remove so we can easily remove the part of it * in ext4_ext_rm_leaf(). */ if (end < EXT_MAX_BLOCKS - 1) { struct ext4_extent *ex; ext4_lblk_t ee_block; /* find extent for this block */ path = ext4_ext_find_extent(inode, end, NULL); if (IS_ERR(path)) { ext4_journal_stop(handle); return PTR_ERR(path); } depth = ext_depth(inode); /* Leaf not may not exist only if inode has no blocks at all */ ex = path[depth].p_ext; if (!ex) { if (depth) { EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); err = -EIO; } goto out; } ee_block = le32_to_cpu(ex->ee_block); /* * See if the last block is inside the extent, if so split * the extent at 'end' block so we can easily remove the * tail of the first part of the split extent in * ext4_ext_rm_leaf(). */ if (end >= ee_block && end < ee_block + ext4_ext_get_actual_len(ex) - 1) { int split_flag = 0; if (ext4_ext_is_uninitialized(ex)) split_flag = EXT4_EXT_MARK_UNINIT1 | EXT4_EXT_MARK_UNINIT2; /* * Split the extent in two so that 'end' is the last * block in the first new extent */ err = ext4_split_extent_at(handle, inode, path, end + 1, split_flag, EXT4_GET_BLOCKS_PRE_IO | EXT4_GET_BLOCKS_PUNCH_OUT_EXT); if (err < 0) goto out; } } /* * We start scanning from right side, freeing all the blocks * after i_size and walking into the tree depth-wise. */ depth = ext_depth(inode); if (path) { int k = i = depth; while (--k > 0) path[k].p_block = le16_to_cpu(path[k].p_hdr->eh_entries)+1; } else { path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS); if (path == NULL) { ext4_journal_stop(handle); return -ENOMEM; } path[0].p_depth = depth; path[0].p_hdr = ext_inode_hdr(inode); i = 0; if (ext4_ext_check(inode, path[0].p_hdr, depth)) { err = -EIO; goto out; } } err = 0; while (i >= 0 && err == 0) { if (i == depth) { /* this is leaf block */ err = ext4_ext_rm_leaf(handle, inode, path, &partial_cluster, start, end); /* root level has p_bh == NULL, brelse() eats this */ brelse(path[i].p_bh); path[i].p_bh = NULL; i--; continue; } /* this is index block */ if (!path[i].p_hdr) { ext_debug("initialize header\n"); path[i].p_hdr = ext_block_hdr(path[i].p_bh); } if (!path[i].p_idx) { /* this level hasn't been touched yet */ path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; ext_debug("init index ptr: hdr 0x%p, num %d\n", path[i].p_hdr, le16_to_cpu(path[i].p_hdr->eh_entries)); } else { /* we were already here, see at next index */ path[i].p_idx--; } ext_debug("level %d - index, first 0x%p, cur 0x%p\n", i, EXT_FIRST_INDEX(path[i].p_hdr), path[i].p_idx); if (ext4_ext_more_to_rm(path + i)) { struct buffer_head *bh; /* go to the next level */ ext_debug("move to level %d (block %llu)\n", i + 1, ext4_idx_pblock(path[i].p_idx)); memset(path + i + 1, 0, sizeof(*path)); bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx)); if (!bh) { /* should we reset i_size? */ err = -EIO; break; } if (WARN_ON(i + 1 > depth)) { err = -EIO; break; } if (ext4_ext_check_block(inode, ext_block_hdr(bh), depth - i - 1, bh)) { err = -EIO; break; } path[i + 1].p_bh = bh; /* save actual number of indexes since this * number is changed at the next iteration */ path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); i++; } else { /* we finished processing this index, go up */ if (path[i].p_hdr->eh_entries == 0 && i > 0) { /* index is empty, remove it; * handle must be already prepared by the * truncatei_leaf() */ err = ext4_ext_rm_idx(handle, inode, path + i); } /* root level has p_bh == NULL, brelse() eats this */ brelse(path[i].p_bh); path[i].p_bh = NULL; i--; ext_debug("return to level %d\n", i); } } trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster, path->p_hdr->eh_entries); /* If we still have something in the partial cluster and we have removed * even the first extent, then we should free the blocks in the partial * cluster as well. */ if (partial_cluster && path->p_hdr->eh_entries == 0) { int flags = EXT4_FREE_BLOCKS_FORGET; if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) flags |= EXT4_FREE_BLOCKS_METADATA; ext4_free_blocks(handle, inode, NULL, EXT4_C2B(EXT4_SB(sb), partial_cluster), EXT4_SB(sb)->s_cluster_ratio, flags); partial_cluster = 0; } /* TODO: flexible tree reduction should be here */ if (path->p_hdr->eh_entries == 0) { /* * truncate to zero freed all the tree, * so we need to correct eh_depth */ err = ext4_ext_get_access(handle, inode, path); if (err == 0) { ext_inode_hdr(inode)->eh_depth = 0; ext_inode_hdr(inode)->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); err = ext4_ext_dirty(handle, inode, path); } } out: ext4_ext_drop_refs(path); kfree(path); if (err == -EAGAIN) { path = NULL; goto again; } ext4_journal_stop(handle); return err; } /* * called at mount time */ void ext4_ext_init(struct super_block *sb) { /* * possible initialization would be here */ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) printk(KERN_INFO "EXT4-fs: file extents enabled" #ifdef AGGRESSIVE_TEST ", aggressive tests" #endif #ifdef CHECK_BINSEARCH ", check binsearch" #endif #ifdef EXTENTS_STATS ", stats" #endif "\n"); #endif #ifdef EXTENTS_STATS spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); EXT4_SB(sb)->s_ext_min = 1 << 30; EXT4_SB(sb)->s_ext_max = 0; #endif } } /* * called at umount time */ void ext4_ext_release(struct super_block *sb) { if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) return; #ifdef EXTENTS_STATS if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { struct ext4_sb_info *sbi = EXT4_SB(sb); printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", sbi->s_ext_blocks, sbi->s_ext_extents, sbi->s_ext_blocks / sbi->s_ext_extents); printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); } #endif } /* FIXME!! we need to try to merge to left or right after zero-out */ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) { ext4_fsblk_t ee_pblock; unsigned int ee_len; int ret; ee_len = ext4_ext_get_actual_len(ex); ee_pblock = ext4_ext_pblock(ex); ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS); if (ret > 0) ret = 0; return ret; } /* * ext4_split_extent_at() splits an extent at given block. * * @handle: the journal handle * @inode: the file inode * @path: the path to the extent * @split: the logical block where the extent is splitted. * @split_flags: indicates if the extent could be zeroout if split fails, and * the states(init or uninit) of new extents. * @flags: flags used to insert new extent to extent tree. * * * Splits extent [a, b] into two extents [a, @split) and [@split, b], states * of which are deterimined by split_flag. * * There are two cases: * a> the extent are splitted into two extent. * b> split is not needed, and just mark the extent. * * return 0 on success. */ static int ext4_split_extent_at(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t split, int split_flag, int flags) { ext4_fsblk_t newblock; ext4_lblk_t ee_block; struct ext4_extent *ex, newex, orig_ex; struct ext4_extent *ex2 = NULL; unsigned int ee_len, depth; int err = 0; ext_debug("ext4_split_extents_at: inode %lu, logical" "block %llu\n", inode->i_ino, (unsigned long long)split); ext4_ext_show_leaf(inode, path); depth = ext_depth(inode); ex = path[depth].p_ext; ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); newblock = split - ee_block + ext4_ext_pblock(ex); BUG_ON(split < ee_block || split >= (ee_block + ee_len)); err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; if (split == ee_block) { /* * case b: block @split is the block that the extent begins with * then we just change the state of the extent, and splitting * is not needed. */ if (split_flag & EXT4_EXT_MARK_UNINIT2) ext4_ext_mark_uninitialized(ex); else ext4_ext_mark_initialized(ex); if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) ext4_ext_try_to_merge(handle, inode, path, ex); err = ext4_ext_dirty(handle, inode, path + path->p_depth); goto out; } /* case a */ memcpy(&orig_ex, ex, sizeof(orig_ex)); ex->ee_len = cpu_to_le16(split - ee_block); if (split_flag & EXT4_EXT_MARK_UNINIT1) ext4_ext_mark_uninitialized(ex); /* * path may lead to new leaf, not to original leaf any more * after ext4_ext_insert_extent() returns, */ err = ext4_ext_dirty(handle, inode, path + depth); if (err) goto fix_extent_len; ex2 = &newex; ex2->ee_block = cpu_to_le32(split); ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); ext4_ext_store_pblock(ex2, newblock); if (split_flag & EXT4_EXT_MARK_UNINIT2) ext4_ext_mark_uninitialized(ex2); err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { err = ext4_ext_zeroout(inode, &orig_ex); if (err) goto fix_extent_len; /* update the extent length and mark as initialized */ ex->ee_len = cpu_to_le16(ee_len); ext4_ext_try_to_merge(handle, inode, path, ex); err = ext4_ext_dirty(handle, inode, path + path->p_depth); goto out; } else if (err) goto fix_extent_len; out: ext4_ext_show_leaf(inode, path); return err; fix_extent_len: ex->ee_len = orig_ex.ee_len; ext4_ext_dirty(handle, inode, path + depth); return err; } /* * ext4_split_extents() splits an extent and mark extent which is covered * by @map as split_flags indicates * * It may result in splitting the extent into multiple extents (upto three) * There are three possibilities: * a> There is no split required * b> Splits in two extents: Split is happening at either end of the extent * c> Splits in three extents: Somone is splitting in middle of the extent * */ static int ext4_split_extent(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, struct ext4_map_blocks *map, int split_flag, int flags) { ext4_lblk_t ee_block; struct ext4_extent *ex; unsigned int ee_len, depth; int err = 0; int uninitialized; int split_flag1, flags1; depth = ext_depth(inode); ex = path[depth].p_ext; ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); uninitialized = ext4_ext_is_uninitialized(ex); if (map->m_lblk + map->m_len < ee_block + ee_len) { split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? EXT4_EXT_MAY_ZEROOUT : 0; flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; if (uninitialized) split_flag1 |= EXT4_EXT_MARK_UNINIT1 | EXT4_EXT_MARK_UNINIT2; err = ext4_split_extent_at(handle, inode, path, map->m_lblk + map->m_len, split_flag1, flags1); if (err) goto out; } ext4_ext_drop_refs(path); path = ext4_ext_find_extent(inode, map->m_lblk, path); if (IS_ERR(path)) return PTR_ERR(path); if (map->m_lblk >= ee_block) { split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? EXT4_EXT_MAY_ZEROOUT : 0; if (uninitialized) split_flag1 |= EXT4_EXT_MARK_UNINIT1; if (split_flag & EXT4_EXT_MARK_UNINIT2) split_flag1 |= EXT4_EXT_MARK_UNINIT2; err = ext4_split_extent_at(handle, inode, path, map->m_lblk, split_flag1, flags); if (err) goto out; } ext4_ext_show_leaf(inode, path); out: return err ? err : map->m_len; } /* * This function is called by ext4_ext_map_blocks() if someone tries to write * to an uninitialized extent. It may result in splitting the uninitialized * extent into multiple extents (up to three - one initialized and two * uninitialized). * There are three possibilities: * a> There is no split required: Entire extent should be initialized * b> Splits in two extents: Write is happening at either end of the extent * c> Splits in three extents: Somone is writing in middle of the extent * * Pre-conditions: * - The extent pointed to by 'path' is uninitialized. * - The extent pointed to by 'path' contains a superset * of the logical span [map->m_lblk, map->m_lblk + map->m_len). * * Post-conditions on success: * - the returned value is the number of blocks beyond map->l_lblk * that are allocated and initialized. * It is guaranteed to be >= map->m_len. */ static int ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, struct ext4_ext_path *path) { struct ext4_sb_info *sbi; struct ext4_extent_header *eh; struct ext4_map_blocks split_map; struct ext4_extent zero_ex; struct ext4_extent *ex; ext4_lblk_t ee_block, eof_block; unsigned int ee_len, depth; int allocated, max_zeroout = 0; int err = 0; int split_flag = 0; ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" "block %llu, max_blocks %u\n", inode->i_ino, (unsigned long long)map->m_lblk, map->m_len); sbi = EXT4_SB(inode->i_sb); eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; if (eof_block < map->m_lblk + map->m_len) eof_block = map->m_lblk + map->m_len; depth = ext_depth(inode); eh = path[depth].p_hdr; ex = path[depth].p_ext; ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); allocated = ee_len - (map->m_lblk - ee_block); trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); /* Pre-conditions */ BUG_ON(!ext4_ext_is_uninitialized(ex)); BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); /* * Attempt to transfer newly initialized blocks from the currently * uninitialized extent to its left neighbor. This is much cheaper * than an insertion followed by a merge as those involve costly * memmove() calls. This is the common case in steady state for * workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append * writes. * * Limitations of the current logic: * - L1: we only deal with writes at the start of the extent. * The approach could be extended to writes at the end * of the extent but this scenario was deemed less common. * - L2: we do not deal with writes covering the whole extent. * This would require removing the extent if the transfer * is possible. * - L3: we only attempt to merge with an extent stored in the * same extent tree node. */ if ((map->m_lblk == ee_block) && /*L1*/ (map->m_len < ee_len) && /*L2*/ (ex > EXT_FIRST_EXTENT(eh))) { /*L3*/ struct ext4_extent *prev_ex; ext4_lblk_t prev_lblk; ext4_fsblk_t prev_pblk, ee_pblk; unsigned int prev_len, write_len; prev_ex = ex - 1; prev_lblk = le32_to_cpu(prev_ex->ee_block); prev_len = ext4_ext_get_actual_len(prev_ex); prev_pblk = ext4_ext_pblock(prev_ex); ee_pblk = ext4_ext_pblock(ex); write_len = map->m_len; /* * A transfer of blocks from 'ex' to 'prev_ex' is allowed * upon those conditions: * - C1: prev_ex is initialized, * - C2: prev_ex is logically abutting ex, * - C3: prev_ex is physically abutting ex, * - C4: prev_ex can receive the additional blocks without * overflowing the (initialized) length limit. */ if ((!ext4_ext_is_uninitialized(prev_ex)) && /*C1*/ ((prev_lblk + prev_len) == ee_block) && /*C2*/ ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ (prev_len < (EXT_INIT_MAX_LEN - write_len))) { /*C4*/ err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; trace_ext4_ext_convert_to_initialized_fastpath(inode, map, ex, prev_ex); /* Shift the start of ex by 'write_len' blocks */ ex->ee_block = cpu_to_le32(ee_block + write_len); ext4_ext_store_pblock(ex, ee_pblk + write_len); ex->ee_len = cpu_to_le16(ee_len - write_len); ext4_ext_mark_uninitialized(ex); /* Restore the flag */ /* Extend prev_ex by 'write_len' blocks */ prev_ex->ee_len = cpu_to_le16(prev_len + write_len); /* Mark the block containing both extents as dirty */ ext4_ext_dirty(handle, inode, path + depth); /* Update path to point to the right extent */ path[depth].p_ext = prev_ex; /* Result: number of initialized blocks past m_lblk */ allocated = write_len; goto out; } } WARN_ON(map->m_lblk < ee_block); /* * It is safe to convert extent to initialized via explicit * zeroout only if extent is fully insde i_size or new_size. */ split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; if (EXT4_EXT_MAY_ZEROOUT & split_flag) max_zeroout = sbi->s_extent_max_zeroout_kb >> inode->i_sb->s_blocksize_bits; /* If extent is less than s_max_zeroout_kb, zeroout directly */ if (max_zeroout && (ee_len <= max_zeroout)) { err = ext4_ext_zeroout(inode, ex); if (err) goto out; err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; ext4_ext_mark_initialized(ex); ext4_ext_try_to_merge(handle, inode, path, ex); err = ext4_ext_dirty(handle, inode, path + path->p_depth); goto out; } /* * four cases: * 1. split the extent into three extents. * 2. split the extent into two extents, zeroout the first half. * 3. split the extent into two extents, zeroout the second half. * 4. split the extent into two extents with out zeroout. */ split_map.m_lblk = map->m_lblk; split_map.m_len = map->m_len; if (max_zeroout && (allocated > map->m_len)) { if (allocated <= max_zeroout) { /* case 3 */ zero_ex.ee_block = cpu_to_le32(map->m_lblk); zero_ex.ee_len = cpu_to_le16(allocated); ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex) + map->m_lblk - ee_block); err = ext4_ext_zeroout(inode, &zero_ex); if (err) goto out; split_map.m_lblk = map->m_lblk; split_map.m_len = allocated; } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) { /* case 2 */ if (map->m_lblk != ee_block) { zero_ex.ee_block = ex->ee_block; zero_ex.ee_len = cpu_to_le16(map->m_lblk - ee_block); ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); err = ext4_ext_zeroout(inode, &zero_ex); if (err) goto out; } split_map.m_lblk = ee_block; split_map.m_len = map->m_lblk - ee_block + map->m_len; allocated = map->m_len; } } allocated = ext4_split_extent(handle, inode, path, &split_map, split_flag, 0); if (allocated < 0) err = allocated; out: return err ? err : allocated; } /* * This function is called by ext4_ext_map_blocks() from * ext4_get_blocks_dio_write() when DIO to write * to an uninitialized extent. * * Writing to an uninitialized extent may result in splitting the uninitialized * extent into multiple initialized/uninitialized extents (up to three) * There are three possibilities: * a> There is no split required: Entire extent should be uninitialized * b> Splits in two extents: Write is happening at either end of the extent * c> Splits in three extents: Somone is writing in middle of the extent * * One of more index blocks maybe needed if the extent tree grow after * the uninitialized extent split. To prevent ENOSPC occur at the IO * complete, we need to split the uninitialized extent before DIO submit * the IO. The uninitialized extent called at this time will be split * into three uninitialized extent(at most). After IO complete, the part * being filled will be convert to initialized by the end_io callback function * via ext4_convert_unwritten_extents(). * * Returns the size of uninitialized extent to be written on success. */ static int ext4_split_unwritten_extents(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, struct ext4_ext_path *path, int flags) { ext4_lblk_t eof_block; ext4_lblk_t ee_block; struct ext4_extent *ex; unsigned int ee_len; int split_flag = 0, depth; ext_debug("ext4_split_unwritten_extents: inode %lu, logical" "block %llu, max_blocks %u\n", inode->i_ino, (unsigned long long)map->m_lblk, map->m_len); eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; if (eof_block < map->m_lblk + map->m_len) eof_block = map->m_lblk + map->m_len; /* * It is safe to convert extent to initialized via explicit * zeroout only if extent is fully insde i_size or new_size. */ depth = ext_depth(inode); ex = path[depth].p_ext; ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; split_flag |= EXT4_EXT_MARK_UNINIT2; flags |= EXT4_GET_BLOCKS_PRE_IO; return ext4_split_extent(handle, inode, path, map, split_flag, flags); } static int ext4_convert_unwritten_extents_endio(handle_t *handle, struct inode *inode, struct ext4_ext_path *path) { struct ext4_extent *ex; int depth; int err = 0; depth = ext_depth(inode); ex = path[depth].p_ext; ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" "block %llu, max_blocks %u\n", inode->i_ino, (unsigned long long)le32_to_cpu(ex->ee_block), ext4_ext_get_actual_len(ex)); err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; /* first mark the extent as initialized */ ext4_ext_mark_initialized(ex); /* note: ext4_ext_correct_indexes() isn't needed here because * borders are not changed */ ext4_ext_try_to_merge(handle, inode, path, ex); /* Mark modified extent as dirty */ err = ext4_ext_dirty(handle, inode, path + path->p_depth); out: ext4_ext_show_leaf(inode, path); return err; } static void unmap_underlying_metadata_blocks(struct block_device *bdev, sector_t block, int count) { int i; for (i = 0; i < count; i++) unmap_underlying_metadata(bdev, block + i); } /* * Handle EOFBLOCKS_FL flag, clearing it if necessary */ static int check_eofblocks_fl(handle_t *handle, struct inode *inode, ext4_lblk_t lblk, struct ext4_ext_path *path, unsigned int len) { int i, depth; struct ext4_extent_header *eh; struct ext4_extent *last_ex; if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) return 0; depth = ext_depth(inode); eh = path[depth].p_hdr; /* * We're going to remove EOFBLOCKS_FL entirely in future so we * do not care for this case anymore. Simply remove the flag * if there are no extents. */ if (unlikely(!eh->eh_entries)) goto out; last_ex = EXT_LAST_EXTENT(eh); /* * We should clear the EOFBLOCKS_FL flag if we are writing the * last block in the last extent in the file. We test this by * first checking to see if the caller to * ext4_ext_get_blocks() was interested in the last block (or * a block beyond the last block) in the current extent. If * this turns out to be false, we can bail out from this * function immediately. */ if (lblk + len < le32_to_cpu(last_ex->ee_block) + ext4_ext_get_actual_len(last_ex)) return 0; /* * If the caller does appear to be planning to write at or * beyond the end of the current extent, we then test to see * if the current extent is the last extent in the file, by * checking to make sure it was reached via the rightmost node * at each level of the tree. */ for (i = depth-1; i >= 0; i--) if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) return 0; out: ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); return ext4_mark_inode_dirty(handle, inode); } /** * ext4_find_delalloc_range: find delayed allocated block in the given range. * * Goes through the buffer heads in the range [lblk_start, lblk_end] and returns * whether there are any buffers marked for delayed allocation. It returns '1' * on the first delalloc'ed buffer head found. If no buffer head in the given * range is marked for delalloc, it returns 0. * lblk_start should always be <= lblk_end. * search_hint_reverse is to indicate that searching in reverse from lblk_end to * lblk_start might be more efficient (i.e., we will likely hit the delalloc'ed * block sooner). This is useful when blocks are truncated sequentially from * lblk_start towards lblk_end. */ static int ext4_find_delalloc_range(struct inode *inode, ext4_lblk_t lblk_start, ext4_lblk_t lblk_end, int search_hint_reverse) { struct address_space *mapping = inode->i_mapping; struct buffer_head *head, *bh = NULL; struct page *page; ext4_lblk_t i, pg_lblk; pgoff_t index; if (!test_opt(inode->i_sb, DELALLOC)) return 0; /* reverse search wont work if fs block size is less than page size */ if (inode->i_blkbits < PAGE_CACHE_SHIFT) search_hint_reverse = 0; if (search_hint_reverse) i = lblk_end; else i = lblk_start; index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits); while ((i >= lblk_start) && (i <= lblk_end)) { page = find_get_page(mapping, index); if (!page) goto nextpage; if (!page_has_buffers(page)) goto nextpage; head = page_buffers(page); if (!head) goto nextpage; bh = head; pg_lblk = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); do { if (unlikely(pg_lblk < lblk_start)) { /* * This is possible when fs block size is less * than page size and our cluster starts/ends in * middle of the page. So we need to skip the * initial few blocks till we reach the 'lblk' */ pg_lblk++; continue; } /* Check if the buffer is delayed allocated and that it * is not yet mapped. (when da-buffers are mapped during * their writeout, their da_mapped bit is set.) */ if (buffer_delay(bh) && !buffer_da_mapped(bh)) { page_cache_release(page); trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end, search_hint_reverse, 1, i); return 1; } if (search_hint_reverse) i--; else i++; } while ((i >= lblk_start) && (i <= lblk_end) && ((bh = bh->b_this_page) != head)); nextpage: if (page) page_cache_release(page); /* * Move to next page. 'i' will be the first lblk in the next * page. */ if (search_hint_reverse) index--; else index++; i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); } trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end, search_hint_reverse, 0, 0); return 0; } int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk, int search_hint_reverse) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); ext4_lblk_t lblk_start, lblk_end; lblk_start = lblk & (~(sbi->s_cluster_ratio - 1)); lblk_end = lblk_start + sbi->s_cluster_ratio - 1; return ext4_find_delalloc_range(inode, lblk_start, lblk_end, search_hint_reverse); } /** * Determines how many complete clusters (out of those specified by the 'map') * are under delalloc and were reserved quota for. * This function is called when we are writing out the blocks that were * originally written with their allocation delayed, but then the space was * allocated using fallocate() before the delayed allocation could be resolved. * The cases to look for are: * ('=' indicated delayed allocated blocks * '-' indicates non-delayed allocated blocks) * (a) partial clusters towards beginning and/or end outside of allocated range * are not delalloc'ed. * Ex: * |----c---=|====c====|====c====|===-c----| * |++++++ allocated ++++++| * ==> 4 complete clusters in above example * * (b) partial cluster (outside of allocated range) towards either end is * marked for delayed allocation. In this case, we will exclude that * cluster. * Ex: * |----====c========|========c========| * |++++++ allocated ++++++| * ==> 1 complete clusters in above example * * Ex: * |================c================| * |++++++ allocated ++++++| * ==> 0 complete clusters in above example * * The ext4_da_update_reserve_space will be called only if we * determine here that there were some "entire" clusters that span * this 'allocated' range. * In the non-bigalloc case, this function will just end up returning num_blks * without ever calling ext4_find_delalloc_range. */ static unsigned int get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, unsigned int num_blks) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); ext4_lblk_t alloc_cluster_start, alloc_cluster_end; ext4_lblk_t lblk_from, lblk_to, c_offset; unsigned int allocated_clusters = 0; alloc_cluster_start = EXT4_B2C(sbi, lblk_start); alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1); /* max possible clusters for this allocation */ allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1; trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); /* Check towards left side */ c_offset = lblk_start & (sbi->s_cluster_ratio - 1); if (c_offset) { lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1)); lblk_to = lblk_from + c_offset - 1; if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0)) allocated_clusters--; } /* Now check towards right. */ c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1); if (allocated_clusters && c_offset) { lblk_from = lblk_start + num_blks; lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0)) allocated_clusters--; } return allocated_clusters; } static int ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, struct ext4_ext_path *path, int flags, unsigned int allocated, ext4_fsblk_t newblock) { int ret = 0; int err = 0; ext4_io_end_t *io = ext4_inode_aio(inode); ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical " "block %llu, max_blocks %u, flags %x, allocated %u\n", inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, flags, allocated); ext4_ext_show_leaf(inode, path); trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated, newblock); /* get_block() before submit the IO, split the extent */ if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { ret = ext4_split_unwritten_extents(handle, inode, map, path, flags); if (ret <= 0) goto out; /* * Flag the inode(non aio case) or end_io struct (aio case) * that this IO needs to conversion to written when IO is * completed */ if (io) ext4_set_io_unwritten_flag(inode, io); else ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); if (ext4_should_dioread_nolock(inode)) map->m_flags |= EXT4_MAP_UNINIT; goto out; } /* IO end_io complete, convert the filled extent to written */ if ((flags & EXT4_GET_BLOCKS_CONVERT)) { ret = ext4_convert_unwritten_extents_endio(handle, inode, path); if (ret >= 0) { ext4_update_inode_fsync_trans(handle, inode, 1); err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len); } else err = ret; goto out2; } /* buffered IO case */ /* * repeat fallocate creation request * we already have an unwritten extent */ if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) goto map_out; /* buffered READ or buffered write_begin() lookup */ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { /* * We have blocks reserved already. We * return allocated blocks so that delalloc * won't do block reservation for us. But * the buffer head will be unmapped so that * a read from the block returns 0s. */ map->m_flags |= EXT4_MAP_UNWRITTEN; goto out1; } /* buffered write, writepage time, convert*/ ret = ext4_ext_convert_to_initialized(handle, inode, map, path); if (ret >= 0) ext4_update_inode_fsync_trans(handle, inode, 1); out: if (ret <= 0) { err = ret; goto out2; } else allocated = ret; map->m_flags |= EXT4_MAP_NEW; /* * if we allocated more blocks than requested * we need to make sure we unmap the extra block * allocated. The actual needed block will get * unmapped later when we find the buffer_head marked * new. */ if (allocated > map->m_len) { unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, newblock + map->m_len, allocated - map->m_len); allocated = map->m_len; } /* * If we have done fallocate with the offset that is already * delayed allocated, we would have block reservation * and quota reservation done in the delayed write path. * But fallocate would have already updated quota and block * count for this offset. So cancel these reservation */ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { unsigned int reserved_clusters; reserved_clusters = get_reserved_cluster_alloc(inode, map->m_lblk, map->m_len); if (reserved_clusters) ext4_da_update_reserve_space(inode, reserved_clusters, 0); } map_out: map->m_flags |= EXT4_MAP_MAPPED; if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) { err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len); if (err < 0) goto out2; } out1: if (allocated > map->m_len) allocated = map->m_len; ext4_ext_show_leaf(inode, path); map->m_pblk = newblock; map->m_len = allocated; out2: if (path) { ext4_ext_drop_refs(path); kfree(path); } return err ? err : allocated; } /* * get_implied_cluster_alloc - check to see if the requested * allocation (in the map structure) overlaps with a cluster already * allocated in an extent. * @sb The filesystem superblock structure * @map The requested lblk->pblk mapping * @ex The extent structure which might contain an implied * cluster allocation * * This function is called by ext4_ext_map_blocks() after we failed to * find blocks that were already in the inode's extent tree. Hence, * we know that the beginning of the requested region cannot overlap * the extent from the inode's extent tree. There are three cases we * want to catch. The first is this case: * * |--- cluster # N--| * |--- extent ---| |---- requested region ---| * |==========| * * The second case that we need to test for is this one: * * |--------- cluster # N ----------------| * |--- requested region --| |------- extent ----| * |=======================| * * The third case is when the requested region lies between two extents * within the same cluster: * |------------- cluster # N-------------| * |----- ex -----| |---- ex_right ----| * |------ requested region ------| * |================| * * In each of the above cases, we need to set the map->m_pblk and * map->m_len so it corresponds to the return the extent labelled as * "|====|" from cluster #N, since it is already in use for data in * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to * signal to ext4_ext_map_blocks() that map->m_pblk should be treated * as a new "allocated" block region. Otherwise, we will return 0 and * ext4_ext_map_blocks() will then allocate one or more new clusters * by calling ext4_mb_new_blocks(). */ static int get_implied_cluster_alloc(struct super_block *sb, struct ext4_map_blocks *map, struct ext4_extent *ex, struct ext4_ext_path *path) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1); ext4_lblk_t ex_cluster_start, ex_cluster_end; ext4_lblk_t rr_cluster_start; ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); ext4_fsblk_t ee_start = ext4_ext_pblock(ex); unsigned short ee_len = ext4_ext_get_actual_len(ex); /* The extent passed in that we are trying to match */ ex_cluster_start = EXT4_B2C(sbi, ee_block); ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); /* The requested region passed into ext4_map_blocks() */ rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); if ((rr_cluster_start == ex_cluster_end) || (rr_cluster_start == ex_cluster_start)) { if (rr_cluster_start == ex_cluster_end) ee_start += ee_len - 1; map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) + c_offset; map->m_len = min(map->m_len, (unsigned) sbi->s_cluster_ratio - c_offset); /* * Check for and handle this case: * * |--------- cluster # N-------------| * |------- extent ----| * |--- requested region ---| * |===========| */ if (map->m_lblk < ee_block) map->m_len = min(map->m_len, ee_block - map->m_lblk); /* * Check for the case where there is already another allocated * block to the right of 'ex' but before the end of the cluster. * * |------------- cluster # N-------------| * |----- ex -----| |---- ex_right ----| * |------ requested region ------| * |================| */ if (map->m_lblk > ee_block) { ext4_lblk_t next = ext4_ext_next_allocated_block(path); map->m_len = min(map->m_len, next - map->m_lblk); } trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); return 1; } trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); return 0; } /* * Block allocation/map/preallocation routine for extents based files * * * Need to be called with * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) * * return > 0, number of of blocks already mapped/allocated * if create == 0 and these are pre-allocated blocks * buffer head is unmapped * otherwise blocks are mapped * * return = 0, if plain look up failed (blocks have not been allocated) * buffer head is unmapped * * return < 0, error case. */ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags) { struct ext4_ext_path *path = NULL; struct ext4_extent newex, *ex, *ex2; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); ext4_fsblk_t newblock = 0; int free_on_err = 0, err = 0, depth, ret; unsigned int allocated = 0, offset = 0; unsigned int allocated_clusters = 0; struct ext4_allocation_request ar; ext4_io_end_t *io = ext4_inode_aio(inode); ext4_lblk_t cluster_offset; int set_unwritten = 0; ext_debug("blocks %u/%u requested for inode %lu\n", map->m_lblk, map->m_len, inode->i_ino); trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); /* check in cache */ if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) { if (!newex.ee_start_lo && !newex.ee_start_hi) { if ((sbi->s_cluster_ratio > 1) && ext4_find_delalloc_cluster(inode, map->m_lblk, 0)) map->m_flags |= EXT4_MAP_FROM_CLUSTER; if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { /* * block isn't allocated yet and * user doesn't want to allocate it */ goto out2; } /* we should allocate requested block */ } else { /* block is already allocated */ if (sbi->s_cluster_ratio > 1) map->m_flags |= EXT4_MAP_FROM_CLUSTER; newblock = map->m_lblk - le32_to_cpu(newex.ee_block) + ext4_ext_pblock(&newex); /* number of remaining blocks in the extent */ allocated = ext4_ext_get_actual_len(&newex) - (map->m_lblk - le32_to_cpu(newex.ee_block)); goto out; } } /* find extent for this block */ path = ext4_ext_find_extent(inode, map->m_lblk, NULL); if (IS_ERR(path)) { err = PTR_ERR(path); path = NULL; goto out2; } depth = ext_depth(inode); /* * consistent leaf must not be empty; * this situation is possible, though, _during_ tree modification; * this is why assert can't be put in ext4_ext_find_extent() */ if (unlikely(path[depth].p_ext == NULL && depth != 0)) { EXT4_ERROR_INODE(inode, "bad extent address " "lblock: %lu, depth: %d pblock %lld", (unsigned long) map->m_lblk, depth, path[depth].p_block); err = -EIO; goto out2; } ex = path[depth].p_ext; if (ex) { ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); ext4_fsblk_t ee_start = ext4_ext_pblock(ex); unsigned short ee_len; /* * Uninitialized extents are treated as holes, except that * we split out initialized portions during a write. */ ee_len = ext4_ext_get_actual_len(ex); trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); /* if found extent covers block, simply return it */ if (in_range(map->m_lblk, ee_block, ee_len)) { newblock = map->m_lblk - ee_block + ee_start; /* number of remaining blocks in the extent */ allocated = ee_len - (map->m_lblk - ee_block); ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, ee_block, ee_len, newblock); /* * Do not put uninitialized extent * in the cache */ if (!ext4_ext_is_uninitialized(ex)) { ext4_ext_put_in_cache(inode, ee_block, ee_len, ee_start); goto out; } ret = ext4_ext_handle_uninitialized_extents( handle, inode, map, path, flags, allocated, newblock); return ret; } } if ((sbi->s_cluster_ratio > 1) && ext4_find_delalloc_cluster(inode, map->m_lblk, 0)) map->m_flags |= EXT4_MAP_FROM_CLUSTER; /* * requested block isn't allocated yet; * we couldn't try to create block if create flag is zero */ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { /* * put just found gap into cache to speed up * subsequent requests */ ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); goto out2; } /* * Okay, we need to do block allocation. */ map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; newex.ee_block = cpu_to_le32(map->m_lblk); cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1); /* * If we are doing bigalloc, check to see if the extent returned * by ext4_ext_find_extent() implies a cluster we can use. */ if (cluster_offset && ex && get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { ar.len = allocated = map->m_len; newblock = map->m_pblk; map->m_flags |= EXT4_MAP_FROM_CLUSTER; goto got_allocated_blocks; } /* find neighbour allocated blocks */ ar.lleft = map->m_lblk; err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); if (err) goto out2; ar.lright = map->m_lblk; ex2 = NULL; err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); if (err) goto out2; /* Check if the extent after searching to the right implies a * cluster we can use. */ if ((sbi->s_cluster_ratio > 1) && ex2 && get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { ar.len = allocated = map->m_len; newblock = map->m_pblk; map->m_flags |= EXT4_MAP_FROM_CLUSTER; goto got_allocated_blocks; } /* * See if request is beyond maximum number of blocks we can have in * a single extent. For an initialized extent this limit is * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is * EXT_UNINIT_MAX_LEN. */ if (map->m_len > EXT_INIT_MAX_LEN && !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) map->m_len = EXT_INIT_MAX_LEN; else if (map->m_len > EXT_UNINIT_MAX_LEN && (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) map->m_len = EXT_UNINIT_MAX_LEN; /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ newex.ee_len = cpu_to_le16(map->m_len); err = ext4_ext_check_overlap(sbi, inode, &newex, path); if (err) allocated = ext4_ext_get_actual_len(&newex); else allocated = map->m_len; /* allocate new block */ ar.inode = inode; ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); ar.logical = map->m_lblk; /* * We calculate the offset from the beginning of the cluster * for the logical block number, since when we allocate a * physical cluster, the physical block should start at the * same offset from the beginning of the cluster. This is * needed so that future calls to get_implied_cluster_alloc() * work correctly. */ offset = map->m_lblk & (sbi->s_cluster_ratio - 1); ar.len = EXT4_NUM_B2C(sbi, offset+allocated); ar.goal -= offset; ar.logical -= offset; if (S_ISREG(inode->i_mode)) ar.flags = EXT4_MB_HINT_DATA; else /* disable in-core preallocation for non-regular files */ ar.flags = 0; if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) ar.flags |= EXT4_MB_HINT_NOPREALLOC; newblock = ext4_mb_new_blocks(handle, &ar, &err); if (!newblock) goto out2; ext_debug("allocate new block: goal %llu, found %llu/%u\n", ar.goal, newblock, allocated); free_on_err = 1; allocated_clusters = ar.len; ar.len = EXT4_C2B(sbi, ar.len) - offset; if (ar.len > allocated) ar.len = allocated; got_allocated_blocks: /* try to insert new extent into found leaf and return */ ext4_ext_store_pblock(&newex, newblock + offset); newex.ee_len = cpu_to_le16(ar.len); /* Mark uninitialized */ if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ ext4_ext_mark_uninitialized(&newex); /* * io_end structure was created for every IO write to an * uninitialized extent. To avoid unnecessary conversion, * here we flag the IO that really needs the conversion. * For non asycn direct IO case, flag the inode state * that we need to perform conversion when IO is done. */ if ((flags & EXT4_GET_BLOCKS_PRE_IO)) set_unwritten = 1; if (ext4_should_dioread_nolock(inode)) map->m_flags |= EXT4_MAP_UNINIT; } err = 0; if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len); if (!err) err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); if (!err && set_unwritten) { if (io) ext4_set_io_unwritten_flag(inode, io); else ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); } if (err && free_on_err) { int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ? EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0; /* free data blocks we just allocated */ /* not a good idea to call discard here directly, * but otherwise we'd need to call it every free() */ ext4_discard_preallocations(inode); ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex), ext4_ext_get_actual_len(&newex), fb_flags); goto out2; } /* previous routine could use block we allocated */ newblock = ext4_ext_pblock(&newex); allocated = ext4_ext_get_actual_len(&newex); if (allocated > map->m_len) allocated = map->m_len; map->m_flags |= EXT4_MAP_NEW; /* * Update reserved blocks/metadata blocks after successful * block allocation which had been deferred till now. */ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { unsigned int reserved_clusters; /* * Check how many clusters we had reserved this allocated range */ reserved_clusters = get_reserved_cluster_alloc(inode, map->m_lblk, allocated); if (map->m_flags & EXT4_MAP_FROM_CLUSTER) { if (reserved_clusters) { /* * We have clusters reserved for this range. * But since we are not doing actual allocation * and are simply using blocks from previously * allocated cluster, we should release the * reservation and not claim quota. */ ext4_da_update_reserve_space(inode, reserved_clusters, 0); } } else { BUG_ON(allocated_clusters < reserved_clusters); /* We will claim quota for all newly allocated blocks.*/ ext4_da_update_reserve_space(inode, allocated_clusters, 1); if (reserved_clusters < allocated_clusters) { struct ext4_inode_info *ei = EXT4_I(inode); int reservation = allocated_clusters - reserved_clusters; /* * It seems we claimed few clusters outside of * the range of this allocation. We should give * it back to the reservation pool. This can * happen in the following case: * * * Suppose s_cluster_ratio is 4 (i.e., each * cluster has 4 blocks. Thus, the clusters * are [0-3],[4-7],[8-11]... * * First comes delayed allocation write for * logical blocks 10 & 11. Since there were no * previous delayed allocated blocks in the * range [8-11], we would reserve 1 cluster * for this write. * * Next comes write for logical blocks 3 to 8. * In this case, we will reserve 2 clusters * (for [0-3] and [4-7]; and not for [8-11] as * that range has a delayed allocated blocks. * Thus total reserved clusters now becomes 3. * * Now, during the delayed allocation writeout * time, we will first write blocks [3-8] and * allocate 3 clusters for writing these * blocks. Also, we would claim all these * three clusters above. * * Now when we come here to writeout the * blocks [10-11], we would expect to claim * the reservation of 1 cluster we had made * (and we would claim it since there are no * more delayed allocated blocks in the range * [8-11]. But our reserved cluster count had * already gone to 0. * * Thus, at the step 4 above when we determine * that there are still some unwritten delayed * allocated blocks outside of our current * block range, we should increment the * reserved clusters count so that when the * remaining blocks finally gets written, we * could claim them. */ dquot_reserve_block(inode, EXT4_C2B(sbi, reservation)); spin_lock(&ei->i_block_reservation_lock); ei->i_reserved_data_blocks += reservation; spin_unlock(&ei->i_block_reservation_lock); } } } /* * Cache the extent and update transaction to commit on fdatasync only * when it is _not_ an uninitialized extent. */ if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock); ext4_update_inode_fsync_trans(handle, inode, 1); } else ext4_update_inode_fsync_trans(handle, inode, 0); out: if (allocated > map->m_len) allocated = map->m_len; ext4_ext_show_leaf(inode, path); map->m_flags |= EXT4_MAP_MAPPED; map->m_pblk = newblock; map->m_len = allocated; out2: if (path) { ext4_ext_drop_refs(path); kfree(path); } trace_ext4_ext_map_blocks_exit(inode, map->m_lblk, newblock, map->m_len, err ? err : allocated); return err ? err : allocated; } void ext4_ext_truncate(struct inode *inode) { struct address_space *mapping = inode->i_mapping; struct super_block *sb = inode->i_sb; ext4_lblk_t last_block; handle_t *handle; loff_t page_len; int err = 0; /* * finish any pending end_io work so we won't run the risk of * converting any truncated blocks to initialized later */ ext4_flush_unwritten_io(inode); /* * probably first extent we're gonna free will be last in block */ err = ext4_writepage_trans_blocks(inode); handle = ext4_journal_start(inode, err); if (IS_ERR(handle)) return; if (inode->i_size % PAGE_CACHE_SIZE != 0) { page_len = PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)); err = ext4_discard_partial_page_buffers(handle, mapping, inode->i_size, page_len, 0); if (err) goto out_stop; } if (ext4_orphan_add(handle, inode)) goto out_stop; down_write(&EXT4_I(inode)->i_data_sem); ext4_ext_invalidate_cache(inode); ext4_discard_preallocations(inode); /* * TODO: optimization is possible here. * Probably we need not scan at all, * because page truncation is enough. */ /* we have to know where to truncate from in crash case */ EXT4_I(inode)->i_disksize = inode->i_size; ext4_mark_inode_dirty(handle, inode); last_block = (inode->i_size + sb->s_blocksize - 1) >> EXT4_BLOCK_SIZE_BITS(sb); err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); /* In a multi-transaction truncate, we only make the final * transaction synchronous. */ if (IS_SYNC(inode)) ext4_handle_sync(handle); up_write(&EXT4_I(inode)->i_data_sem); out_stop: /* * If this was a simple ftruncate() and the file will remain alive, * then we need to clear up the orphan record which we created above. * However, if this was a real unlink then we were called by * ext4_delete_inode(), and we allow that function to clean up the * orphan info for us. */ if (inode->i_nlink) ext4_orphan_del(handle, inode); inode->i_mtime = inode->i_ctime = ext4_current_time(inode); ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); } static void ext4_falloc_update_inode(struct inode *inode, int mode, loff_t new_size, int update_ctime) { struct timespec now; if (update_ctime) { now = current_fs_time(inode->i_sb); if (!timespec_equal(&inode->i_ctime, &now)) inode->i_ctime = now; } /* * Update only when preallocation was requested beyond * the file size. */ if (!(mode & FALLOC_FL_KEEP_SIZE)) { if (new_size > i_size_read(inode)) i_size_write(inode, new_size); if (new_size > EXT4_I(inode)->i_disksize) ext4_update_i_disksize(inode, new_size); } else { /* * Mark that we allocate beyond EOF so the subsequent truncate * can proceed even if the new size is the same as i_size. */ if (new_size > i_size_read(inode)) ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); } } /* * preallocate space for a file. This implements ext4's fallocate file * operation, which gets called from sys_fallocate system call. * For block-mapped files, posix_fallocate should fall back to the method * of writing zeroes to the required new blocks (the same behavior which is * expected for file systems which do not support fallocate() system call). */ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file->f_path.dentry->d_inode; handle_t *handle; loff_t new_size; unsigned int max_blocks; int ret = 0; int ret2 = 0; int retries = 0; int flags; struct ext4_map_blocks map; unsigned int credits, blkbits = inode->i_blkbits; /* * currently supporting (pre)allocate mode for extent-based * files _only_ */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return -EOPNOTSUPP; /* Return error if mode is not supported */ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP; if (mode & FALLOC_FL_PUNCH_HOLE) return ext4_punch_hole(file, offset, len); trace_ext4_fallocate_enter(inode, offset, len, mode); map.m_lblk = offset >> blkbits; /* * We can't just convert len to max_blocks because * If blocksize = 4096 offset = 3072 and len = 2048 */ max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - map.m_lblk; /* * credits to insert 1 extent into extent tree */ credits = ext4_chunk_trans_blocks(inode, max_blocks); mutex_lock(&inode->i_mutex); ret = inode_newsize_ok(inode, (len + offset)); if (ret) { mutex_unlock(&inode->i_mutex); trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); return ret; } flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT; if (mode & FALLOC_FL_KEEP_SIZE) flags |= EXT4_GET_BLOCKS_KEEP_SIZE; /* * Don't normalize the request if it can fit in one extent so * that it doesn't get unnecessarily split into multiple * extents. */ if (len <= EXT_UNINIT_MAX_LEN << blkbits) flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; /* Prevent race condition between unwritten */ ext4_flush_unwritten_io(inode); retry: while (ret >= 0 && ret < max_blocks) { map.m_lblk = map.m_lblk + ret; map.m_len = max_blocks = max_blocks - ret; handle = ext4_journal_start(inode, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); break; } ret = ext4_map_blocks(handle, inode, &map, flags); if (ret <= 0) { #ifdef EXT4FS_DEBUG WARN_ON(ret <= 0); printk(KERN_ERR "%s: ext4_ext_map_blocks " "returned error inode#%lu, block=%u, " "max_blocks=%u", __func__, inode->i_ino, map.m_lblk, max_blocks); #endif ext4_mark_inode_dirty(handle, inode); ret2 = ext4_journal_stop(handle); break; } if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len, blkbits) >> blkbits)) new_size = offset + len; else new_size = ((loff_t) map.m_lblk + ret) << blkbits; ext4_falloc_update_inode(inode, mode, new_size, (map.m_flags & EXT4_MAP_NEW)); ext4_mark_inode_dirty(handle, inode); if ((file->f_flags & O_SYNC) && ret >= max_blocks) ext4_handle_sync(handle); ret2 = ext4_journal_stop(handle); if (ret2) break; } if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) { ret = 0; goto retry; } mutex_unlock(&inode->i_mutex); trace_ext4_fallocate_exit(inode, offset, max_blocks, ret > 0 ? ret2 : ret); return ret > 0 ? ret2 : ret; } /* * This function convert a range of blocks to written extents * The caller of this function will pass the start offset and the size. * all unwritten extents within this range will be converted to * written extents. * * This function is called from the direct IO end io call back * function, to convert the fallocated extents after IO is completed. * Returns 0 on success. */ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, ssize_t len) { handle_t *handle; unsigned int max_blocks; int ret = 0; int ret2 = 0; struct ext4_map_blocks map; unsigned int credits, blkbits = inode->i_blkbits; map.m_lblk = offset >> blkbits; /* * We can't just convert len to max_blocks because * If blocksize = 4096 offset = 3072 and len = 2048 */ max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - map.m_lblk); /* * credits to insert 1 extent into extent tree */ credits = ext4_chunk_trans_blocks(inode, max_blocks); while (ret >= 0 && ret < max_blocks) { map.m_lblk += ret; map.m_len = (max_blocks -= ret); handle = ext4_journal_start(inode, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); break; } ret = ext4_map_blocks(handle, inode, &map, EXT4_GET_BLOCKS_IO_CONVERT_EXT); if (ret <= 0) { WARN_ON(ret <= 0); ext4_msg(inode->i_sb, KERN_ERR, "%s:%d: inode #%lu: block %u: len %u: " "ext4_ext_map_blocks returned %d", __func__, __LINE__, inode->i_ino, map.m_lblk, map.m_len, ret); } ext4_mark_inode_dirty(handle, inode); ret2 = ext4_journal_stop(handle); if (ret <= 0 || ret2 ) break; } return ret > 0 ? ret2 : ret; } /* * Callback function called for each extent to gather FIEMAP information. */ static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next, struct ext4_ext_cache *newex, struct ext4_extent *ex, void *data) { __u64 logical; __u64 physical; __u64 length; __u32 flags = 0; int ret = 0; struct fiemap_extent_info *fieinfo = data; unsigned char blksize_bits; blksize_bits = inode->i_sb->s_blocksize_bits; logical = (__u64)newex->ec_block << blksize_bits; if (newex->ec_start == 0) { /* * No extent in extent-tree contains block @newex->ec_start, * then the block may stay in 1)a hole or 2)delayed-extent. * * Holes or delayed-extents are processed as follows. * 1. lookup dirty pages with specified range in pagecache. * If no page is got, then there is no delayed-extent and * return with EXT_CONTINUE. * 2. find the 1st mapped buffer, * 3. check if the mapped buffer is both in the request range * and a delayed buffer. If not, there is no delayed-extent, * then return. * 4. a delayed-extent is found, the extent will be collected. */ ext4_lblk_t end = 0; pgoff_t last_offset; pgoff_t offset; pgoff_t index; pgoff_t start_index = 0; struct page **pages = NULL; struct buffer_head *bh = NULL; struct buffer_head *head = NULL; unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *); pages = kmalloc(PAGE_SIZE, GFP_KERNEL); if (pages == NULL) return -ENOMEM; offset = logical >> PAGE_SHIFT; repeat: last_offset = offset; head = NULL; ret = find_get_pages_tag(inode->i_mapping, &offset, PAGECACHE_TAG_DIRTY, nr_pages, pages); if (!(flags & FIEMAP_EXTENT_DELALLOC)) { /* First time, try to find a mapped buffer. */ if (ret == 0) { out: for (index = 0; index < ret; index++) page_cache_release(pages[index]); /* just a hole. */ kfree(pages); return EXT_CONTINUE; } index = 0; next_page: /* Try to find the 1st mapped buffer. */ end = ((__u64)pages[index]->index << PAGE_SHIFT) >> blksize_bits; if (!page_has_buffers(pages[index])) goto out; head = page_buffers(pages[index]); if (!head) goto out; index++; bh = head; do { if (end >= newex->ec_block + newex->ec_len) /* The buffer is out of * the request range. */ goto out; if (buffer_mapped(bh) && end >= newex->ec_block) { start_index = index - 1; /* get the 1st mapped buffer. */ goto found_mapped_buffer; } bh = bh->b_this_page; end++; } while (bh != head); /* No mapped buffer in the range found in this page, * We need to look up next page. */ if (index >= ret) { /* There is no page left, but we need to limit * newex->ec_len. */ newex->ec_len = end - newex->ec_block; goto out; } goto next_page; } else { /*Find contiguous delayed buffers. */ if (ret > 0 && pages[0]->index == last_offset) head = page_buffers(pages[0]); bh = head; index = 1; start_index = 0; } found_mapped_buffer: if (bh != NULL && buffer_delay(bh)) { /* 1st or contiguous delayed buffer found. */ if (!(flags & FIEMAP_EXTENT_DELALLOC)) { /* * 1st delayed buffer found, record * the start of extent. */ flags |= FIEMAP_EXTENT_DELALLOC; newex->ec_block = end; logical = (__u64)end << blksize_bits; } /* Find contiguous delayed buffers. */ do { if (!buffer_delay(bh)) goto found_delayed_extent; bh = bh->b_this_page; end++; } while (bh != head); for (; index < ret; index++) { if (!page_has_buffers(pages[index])) { bh = NULL; break; } head = page_buffers(pages[index]); if (!head) { bh = NULL; break; } if (pages[index]->index != pages[start_index]->index + index - start_index) { /* Blocks are not contiguous. */ bh = NULL; break; } bh = head; do { if (!buffer_delay(bh)) /* Delayed-extent ends. */ goto found_delayed_extent; bh = bh->b_this_page; end++; } while (bh != head); } } else if (!(flags & FIEMAP_EXTENT_DELALLOC)) /* a hole found. */ goto out; found_delayed_extent: newex->ec_len = min(end - newex->ec_block, (ext4_lblk_t)EXT_INIT_MAX_LEN); if (ret == nr_pages && bh != NULL && newex->ec_len < EXT_INIT_MAX_LEN && buffer_delay(bh)) { /* Have not collected an extent and continue. */ for (index = 0; index < ret; index++) page_cache_release(pages[index]); goto repeat; } for (index = 0; index < ret; index++) page_cache_release(pages[index]); kfree(pages); } physical = (__u64)newex->ec_start << blksize_bits; length = (__u64)newex->ec_len << blksize_bits; if (ex && ext4_ext_is_uninitialized(ex)) flags |= FIEMAP_EXTENT_UNWRITTEN; if (next == EXT_MAX_BLOCKS) flags |= FIEMAP_EXTENT_LAST; ret = fiemap_fill_next_extent(fieinfo, logical, physical, length, flags); if (ret < 0) return ret; if (ret == 1) return EXT_BREAK; return EXT_CONTINUE; } /* fiemap flags we can handle specified here */ #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) static int ext4_xattr_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo) { __u64 physical = 0; __u64 length; __u32 flags = FIEMAP_EXTENT_LAST; int blockbits = inode->i_sb->s_blocksize_bits; int error = 0; /* in-inode? */ if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { struct ext4_iloc iloc; int offset; /* offset of xattr in inode */ error = ext4_get_inode_loc(inode, &iloc); if (error) return error; physical = iloc.bh->b_blocknr << blockbits; offset = EXT4_GOOD_OLD_INODE_SIZE + EXT4_I(inode)->i_extra_isize; physical += offset; length = EXT4_SB(inode->i_sb)->s_inode_size - offset; flags |= FIEMAP_EXTENT_DATA_INLINE; brelse(iloc.bh); } else { /* external block */ physical = EXT4_I(inode)->i_file_acl << blockbits; length = inode->i_sb->s_blocksize; } if (physical) error = fiemap_fill_next_extent(fieinfo, 0, physical, length, flags); return (error < 0 ? error : 0); } /* * ext4_ext_punch_hole * * Punches a hole of "length" bytes in a file starting * at byte "offset" * * @inode: The inode of the file to punch a hole in * @offset: The starting byte offset of the hole * @length: The length of the hole * * Returns the number of blocks removed or negative on err */ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) { struct inode *inode = file->f_path.dentry->d_inode; struct super_block *sb = inode->i_sb; ext4_lblk_t first_block, stop_block; struct address_space *mapping = inode->i_mapping; handle_t *handle; loff_t first_page, last_page, page_len; loff_t first_page_offset, last_page_offset; int credits, err = 0; /* * Write out all dirty pages to avoid race conditions * Then release them. */ if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { err = filemap_write_and_wait_range(mapping, offset, offset + length - 1); if (err) return err; } mutex_lock(&inode->i_mutex); /* It's not possible punch hole on append only file */ if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) { err = -EPERM; goto out_mutex; } if (IS_SWAPFILE(inode)) { err = -ETXTBSY; goto out_mutex; } /* No need to punch hole beyond i_size */ if (offset >= inode->i_size) goto out_mutex; /* * If the hole extends beyond i_size, set the hole * to end after the page that contains i_size */ if (offset + length > inode->i_size) { length = inode->i_size + PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - offset; } first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; last_page = (offset + length) >> PAGE_CACHE_SHIFT; first_page_offset = first_page << PAGE_CACHE_SHIFT; last_page_offset = last_page << PAGE_CACHE_SHIFT; /* Now release the pages */ if (last_page_offset > first_page_offset) { truncate_pagecache_range(inode, first_page_offset, last_page_offset - 1); } /* Wait all existing dio workers, newcomers will block on i_mutex */ ext4_inode_block_unlocked_dio(inode); err = ext4_flush_unwritten_io(inode); if (err) goto out_dio; inode_dio_wait(inode); credits = ext4_writepage_trans_blocks(inode); handle = ext4_journal_start(inode, credits); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto out_dio; } /* * Now we need to zero out the non-page-aligned data in the * pages at the start and tail of the hole, and unmap the buffer * heads for the block aligned regions of the page that were * completely zeroed. */ if (first_page > last_page) { /* * If the file space being truncated is contained within a page * just zero out and unmap the middle of that page */ err = ext4_discard_partial_page_buffers(handle, mapping, offset, length, 0); if (err) goto out; } else { /* * zero out and unmap the partial page that contains * the start of the hole */ page_len = first_page_offset - offset; if (page_len > 0) { err = ext4_discard_partial_page_buffers(handle, mapping, offset, page_len, 0); if (err) goto out; } /* * zero out and unmap the partial page that contains * the end of the hole */ page_len = offset + length - last_page_offset; if (page_len > 0) { err = ext4_discard_partial_page_buffers(handle, mapping, last_page_offset, page_len, 0); if (err) goto out; } } /* * If i_size is contained in the last page, we need to * unmap and zero the partial page after i_size */ if (inode->i_size >> PAGE_CACHE_SHIFT == last_page && inode->i_size % PAGE_CACHE_SIZE != 0) { page_len = PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)); if (page_len > 0) { err = ext4_discard_partial_page_buffers(handle, mapping, inode->i_size, page_len, 0); if (err) goto out; } } first_block = (offset + sb->s_blocksize - 1) >> EXT4_BLOCK_SIZE_BITS(sb); stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); /* If there are no blocks to remove, return now */ if (first_block >= stop_block) goto out; down_write(&EXT4_I(inode)->i_data_sem); ext4_ext_invalidate_cache(inode); ext4_discard_preallocations(inode); err = ext4_ext_remove_space(inode, first_block, stop_block - 1); ext4_ext_invalidate_cache(inode); ext4_discard_preallocations(inode); if (IS_SYNC(inode)) ext4_handle_sync(handle); up_write(&EXT4_I(inode)->i_data_sem); out: inode->i_mtime = inode->i_ctime = ext4_current_time(inode); ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); out_dio: ext4_inode_resume_unlocked_dio(inode); out_mutex: mutex_unlock(&inode->i_mutex); return err; } int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len) { ext4_lblk_t start_blk; int error = 0; /* fallback to generic here if not in extents fmt */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return generic_block_fiemap(inode, fieinfo, start, len, ext4_get_block); if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) return -EBADR; if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { error = ext4_xattr_fiemap(inode, fieinfo); } else { ext4_lblk_t len_blks; __u64 last_blk; start_blk = start >> inode->i_sb->s_blocksize_bits; last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; if (last_blk >= EXT_MAX_BLOCKS) last_blk = EXT_MAX_BLOCKS-1; len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; /* * Walk the extent tree gathering extent information. * ext4_ext_fiemap_cb will push extents back to user. */ error = ext4_ext_walk_space(inode, start_blk, len_blks, ext4_ext_fiemap_cb, fieinfo); } return error; }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_3765_0
crossvul-cpp_data_good_3459_1
/* * IPv6 tunneling device * Linux INET6 implementation * * Authors: * Ville Nuorvala <vnuorval@tcs.hut.fi> * Yasuyuki Kozakai <kozakai@linux-ipv6.org> * * Based on: * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c * * RFC 2473 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/module.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/sockios.h> #include <linux/icmp.h> #include <linux/if.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/if_tunnel.h> #include <linux/net.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/icmpv6.h> #include <linux/init.h> #include <linux/route.h> #include <linux/rtnetlink.h> #include <linux/netfilter_ipv6.h> #include <asm/uaccess.h> #include <asm/atomic.h> #include <net/icmp.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/ip6_tunnel.h> #include <net/xfrm.h> #include <net/dsfield.h> #include <net/inet_ecn.h> #include <net/net_namespace.h> #include <net/netns/generic.h> MODULE_AUTHOR("Ville Nuorvala"); MODULE_DESCRIPTION("IPv6 tunneling device"); MODULE_LICENSE("GPL"); #define IPV6_TLV_TEL_DST_SIZE 8 #ifdef IP6_TNL_DEBUG #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) #else #define IP6_TNL_TRACE(x...) do {;} while(0) #endif #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) #define IPV6_TCLASS_SHIFT 20 #define HASH_SIZE 32 #define HASH(addr) ((__force u32)((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \ (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \ (HASH_SIZE - 1)) static void ip6_tnl_dev_init(struct net_device *dev); static void ip6_tnl_dev_setup(struct net_device *dev); static int ip6_tnl_net_id __read_mostly; struct ip6_tnl_net { /* the IPv6 tunnel fallback device */ struct net_device *fb_tnl_dev; /* lists for storing tunnels in use */ struct ip6_tnl *tnls_r_l[HASH_SIZE]; struct ip6_tnl *tnls_wc[1]; struct ip6_tnl **tnls[2]; }; /* * Locking : hash tables are protected by RCU and a spinlock */ static DEFINE_SPINLOCK(ip6_tnl_lock); static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) { struct dst_entry *dst = t->dst_cache; if (dst && dst->obsolete && dst->ops->check(dst, t->dst_cookie) == NULL) { t->dst_cache = NULL; dst_release(dst); return NULL; } return dst; } static inline void ip6_tnl_dst_reset(struct ip6_tnl *t) { dst_release(t->dst_cache); t->dst_cache = NULL; } static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst) { struct rt6_info *rt = (struct rt6_info *) dst; t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; dst_release(t->dst_cache); t->dst_cache = dst; } /** * ip6_tnl_lookup - fetch tunnel matching the end-point addresses * @remote: the address of the tunnel exit-point * @local: the address of the tunnel entry-point * * Return: * tunnel matching given end-points if found, * else fallback tunnel if its device is up, * else %NULL **/ #define for_each_ip6_tunnel_rcu(start) \ for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) static struct ip6_tnl * ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local) { unsigned h0 = HASH(remote); unsigned h1 = HASH(local); struct ip6_tnl *t; struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[h0 ^ h1]) { if (ipv6_addr_equal(local, &t->parms.laddr) && ipv6_addr_equal(remote, &t->parms.raddr) && (t->dev->flags & IFF_UP)) return t; } t = rcu_dereference(ip6n->tnls_wc[0]); if (t && (t->dev->flags & IFF_UP)) return t; return NULL; } /** * ip6_tnl_bucket - get head of list matching given tunnel parameters * @p: parameters containing tunnel end-points * * Description: * ip6_tnl_bucket() returns the head of the list matching the * &struct in6_addr entries laddr and raddr in @p. * * Return: head of IPv6 tunnel list **/ static struct ip6_tnl ** ip6_tnl_bucket(struct ip6_tnl_net *ip6n, struct ip6_tnl_parm *p) { struct in6_addr *remote = &p->raddr; struct in6_addr *local = &p->laddr; unsigned h = 0; int prio = 0; if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { prio = 1; h = HASH(remote) ^ HASH(local); } return &ip6n->tnls[prio][h]; } /** * ip6_tnl_link - add tunnel to hash table * @t: tunnel to be added **/ static void ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) { struct ip6_tnl **tp = ip6_tnl_bucket(ip6n, &t->parms); spin_lock_bh(&ip6_tnl_lock); t->next = *tp; rcu_assign_pointer(*tp, t); spin_unlock_bh(&ip6_tnl_lock); } /** * ip6_tnl_unlink - remove tunnel from hash table * @t: tunnel to be removed **/ static void ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) { struct ip6_tnl **tp; for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) { if (t == *tp) { spin_lock_bh(&ip6_tnl_lock); *tp = t->next; spin_unlock_bh(&ip6_tnl_lock); break; } } } /** * ip6_tnl_create() - create a new tunnel * @p: tunnel parameters * @pt: pointer to new tunnel * * Description: * Create tunnel matching given parameters. * * Return: * created tunnel or NULL **/ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p) { struct net_device *dev; struct ip6_tnl *t; char name[IFNAMSIZ]; int err; struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); if (p->name[0]) strlcpy(name, p->name, IFNAMSIZ); else sprintf(name, "ip6tnl%%d"); dev = alloc_netdev(sizeof (*t), name, ip6_tnl_dev_setup); if (dev == NULL) goto failed; dev_net_set(dev, net); if (strchr(name, '%')) { if (dev_alloc_name(dev, name) < 0) goto failed_free; } t = netdev_priv(dev); t->parms = *p; ip6_tnl_dev_init(dev); if ((err = register_netdevice(dev)) < 0) goto failed_free; dev_hold(dev); ip6_tnl_link(ip6n, t); return t; failed_free: free_netdev(dev); failed: return NULL; } /** * ip6_tnl_locate - find or create tunnel matching given parameters * @p: tunnel parameters * @create: != 0 if allowed to create new tunnel if no match found * * Description: * ip6_tnl_locate() first tries to locate an existing tunnel * based on @parms. If this is unsuccessful, but @create is set a new * tunnel device is created and registered for use. * * Return: * matching tunnel or NULL **/ static struct ip6_tnl *ip6_tnl_locate(struct net *net, struct ip6_tnl_parm *p, int create) { struct in6_addr *remote = &p->raddr; struct in6_addr *local = &p->laddr; struct ip6_tnl *t; struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); for (t = *ip6_tnl_bucket(ip6n, p); t; t = t->next) { if (ipv6_addr_equal(local, &t->parms.laddr) && ipv6_addr_equal(remote, &t->parms.raddr)) return t; } if (!create) return NULL; return ip6_tnl_create(net, p); } /** * ip6_tnl_dev_uninit - tunnel device uninitializer * @dev: the device to be destroyed * * Description: * ip6_tnl_dev_uninit() removes tunnel from its list **/ static void ip6_tnl_dev_uninit(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct net *net = dev_net(dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); if (dev == ip6n->fb_tnl_dev) { spin_lock_bh(&ip6_tnl_lock); ip6n->tnls_wc[0] = NULL; spin_unlock_bh(&ip6_tnl_lock); } else { ip6_tnl_unlink(ip6n, t); } ip6_tnl_dst_reset(t); dev_put(dev); } /** * parse_tvl_tnl_enc_lim - handle encapsulation limit option * @skb: received socket buffer * * Return: * 0 if none was found, * else index to encapsulation limit **/ static __u16 parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw) { struct ipv6hdr *ipv6h = (struct ipv6hdr *) raw; __u8 nexthdr = ipv6h->nexthdr; __u16 off = sizeof (*ipv6h); while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { __u16 optlen = 0; struct ipv6_opt_hdr *hdr; if (raw + off + sizeof (*hdr) > skb->data && !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) break; hdr = (struct ipv6_opt_hdr *) (raw + off); if (nexthdr == NEXTHDR_FRAGMENT) { struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; if (frag_hdr->frag_off) break; optlen = 8; } else if (nexthdr == NEXTHDR_AUTH) { optlen = (hdr->hdrlen + 2) << 2; } else { optlen = ipv6_optlen(hdr); } if (nexthdr == NEXTHDR_DEST) { __u16 i = off + 2; while (1) { struct ipv6_tlv_tnl_enc_lim *tel; /* No more room for encapsulation limit */ if (i + sizeof (*tel) > off + optlen) break; tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i]; /* return index of option if found and valid */ if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && tel->length == 1) return i; /* else jump to next option */ if (tel->type) i += tel->length + 2; else i++; } } nexthdr = hdr->nexthdr; off += optlen; } return 0; } /** * ip6_tnl_err - tunnel error handler * * Description: * ip6_tnl_err() should handle errors in the tunnel according * to the specifications in RFC 2473. **/ static int ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, u8 *type, u8 *code, int *msg, __u32 *info, int offset) { struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data; struct ip6_tnl *t; int rel_msg = 0; u8 rel_type = ICMPV6_DEST_UNREACH; u8 rel_code = ICMPV6_ADDR_UNREACH; __u32 rel_info = 0; __u16 len; int err = -ENOENT; /* If the packet doesn't contain the original IPv6 header we are in trouble since we might need the source address for further processing of the error. */ rcu_read_lock(); if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr)) == NULL) goto out; if (t->parms.proto != ipproto && t->parms.proto != 0) goto out; err = 0; switch (*type) { __u32 teli; struct ipv6_tlv_tnl_enc_lim *tel; __u32 mtu; case ICMPV6_DEST_UNREACH: if (net_ratelimit()) printk(KERN_WARNING "%s: Path to destination invalid " "or inactive!\n", t->parms.name); rel_msg = 1; break; case ICMPV6_TIME_EXCEED: if ((*code) == ICMPV6_EXC_HOPLIMIT) { if (net_ratelimit()) printk(KERN_WARNING "%s: Too small hop limit or " "routing loop in tunnel!\n", t->parms.name); rel_msg = 1; } break; case ICMPV6_PARAMPROB: teli = 0; if ((*code) == ICMPV6_HDR_FIELD) teli = parse_tlv_tnl_enc_lim(skb, skb->data); if (teli && teli == *info - 2) { tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; if (tel->encap_limit == 0) { if (net_ratelimit()) printk(KERN_WARNING "%s: Too small encapsulation " "limit or routing loop in " "tunnel!\n", t->parms.name); rel_msg = 1; } } else if (net_ratelimit()) { printk(KERN_WARNING "%s: Recipient unable to parse tunneled " "packet!\n ", t->parms.name); } break; case ICMPV6_PKT_TOOBIG: mtu = *info - offset; if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; t->dev->mtu = mtu; if ((len = sizeof (*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) { rel_type = ICMPV6_PKT_TOOBIG; rel_code = 0; rel_info = mtu; rel_msg = 1; } break; } *type = rel_type; *code = rel_code; *info = rel_info; *msg = rel_msg; out: rcu_read_unlock(); return err; } static int ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { int rel_msg = 0; u8 rel_type = type; u8 rel_code = code; __u32 rel_info = ntohl(info); int err; struct sk_buff *skb2; struct iphdr *eiph; struct flowi fl; struct rtable *rt; err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, &rel_msg, &rel_info, offset); if (err < 0) return err; if (rel_msg == 0) return 0; switch (rel_type) { case ICMPV6_DEST_UNREACH: if (rel_code != ICMPV6_ADDR_UNREACH) return 0; rel_type = ICMP_DEST_UNREACH; rel_code = ICMP_HOST_UNREACH; break; case ICMPV6_PKT_TOOBIG: if (rel_code != 0) return 0; rel_type = ICMP_DEST_UNREACH; rel_code = ICMP_FRAG_NEEDED; break; default: return 0; } if (!pskb_may_pull(skb, offset + sizeof(struct iphdr))) return 0; skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) return 0; skb_dst_drop(skb2); skb_pull(skb2, offset); skb_reset_network_header(skb2); eiph = ip_hdr(skb2); /* Try to guess incoming interface */ memset(&fl, 0, sizeof(fl)); fl.fl4_dst = eiph->saddr; fl.fl4_tos = RT_TOS(eiph->tos); fl.proto = IPPROTO_IPIP; if (ip_route_output_key(dev_net(skb->dev), &rt, &fl)) goto out; skb2->dev = rt->u.dst.dev; /* route "incoming" packet */ if (rt->rt_flags & RTCF_LOCAL) { ip_rt_put(rt); rt = NULL; fl.fl4_dst = eiph->daddr; fl.fl4_src = eiph->saddr; fl.fl4_tos = eiph->tos; if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) || rt->u.dst.dev->type != ARPHRD_TUNNEL) { ip_rt_put(rt); goto out; } skb_dst_set(skb2, (struct dst_entry *)rt); } else { ip_rt_put(rt); if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) || skb_dst(skb2)->dev->type != ARPHRD_TUNNEL) goto out; } /* change mtu on this route */ if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) { if (rel_info > dst_mtu(skb_dst(skb2))) goto out; skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), rel_info); } icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); out: kfree_skb(skb2); return 0; } static int ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { int rel_msg = 0; u8 rel_type = type; u8 rel_code = code; __u32 rel_info = ntohl(info); int err; err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code, &rel_msg, &rel_info, offset); if (err < 0) return err; if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) { struct rt6_info *rt; struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) return 0; skb_dst_drop(skb2); skb_pull(skb2, offset); skb_reset_network_header(skb2); /* Try to guess incoming interface */ rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0); if (rt && rt->rt6i_dev) skb2->dev = rt->rt6i_dev; icmpv6_send(skb2, rel_type, rel_code, rel_info, skb2->dev); if (rt) dst_release(&rt->u.dst); kfree_skb(skb2); } return 0; } static void ip4ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, struct ipv6hdr *ipv6h, struct sk_buff *skb) { __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield); if (INET_ECN_is_ce(dsfield)) IP_ECN_set_ce(ip_hdr(skb)); } static void ip6ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, struct ipv6hdr *ipv6h, struct sk_buff *skb) { if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb)); if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h))) IP6_ECN_set_ce(ipv6_hdr(skb)); } /* called with rcu_read_lock() */ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t) { struct ip6_tnl_parm *p = &t->parms; int ret = 0; struct net *net = dev_net(t->dev); if (p->flags & IP6_TNL_F_CAP_RCV) { struct net_device *ldev = NULL; if (p->link) ldev = dev_get_by_index_rcu(net, p->link); if ((ipv6_addr_is_multicast(&p->laddr) || likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) && likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0))) ret = 1; } return ret; } /** * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally * @skb: received socket buffer * @protocol: ethernet protocol ID * @dscp_ecn_decapsulate: the function to decapsulate DSCP code and ECN * * Return: 0 **/ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, __u8 ipproto, void (*dscp_ecn_decapsulate)(struct ip6_tnl *t, struct ipv6hdr *ipv6h, struct sk_buff *skb)) { struct ip6_tnl *t; struct ipv6hdr *ipv6h = ipv6_hdr(skb); rcu_read_lock(); if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr)) != NULL) { if (t->parms.proto != ipproto && t->parms.proto != 0) { rcu_read_unlock(); goto discard; } if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { rcu_read_unlock(); goto discard; } if (!ip6_tnl_rcv_ctl(t)) { t->dev->stats.rx_dropped++; rcu_read_unlock(); goto discard; } secpath_reset(skb); skb->mac_header = skb->network_header; skb_reset_network_header(skb); skb->protocol = htons(protocol); skb->pkt_type = PACKET_HOST; memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); skb->dev = t->dev; skb_dst_drop(skb); nf_reset(skb); dscp_ecn_decapsulate(t, ipv6h, skb); t->dev->stats.rx_packets++; t->dev->stats.rx_bytes += skb->len; netif_rx(skb); rcu_read_unlock(); return 0; } rcu_read_unlock(); return 1; discard: kfree_skb(skb); return 0; } static int ip4ip6_rcv(struct sk_buff *skb) { return ip6_tnl_rcv(skb, ETH_P_IP, IPPROTO_IPIP, ip4ip6_dscp_ecn_decapsulate); } static int ip6ip6_rcv(struct sk_buff *skb) { return ip6_tnl_rcv(skb, ETH_P_IPV6, IPPROTO_IPV6, ip6ip6_dscp_ecn_decapsulate); } struct ipv6_tel_txoption { struct ipv6_txoptions ops; __u8 dst_opt[8]; }; static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) { memset(opt, 0, sizeof(struct ipv6_tel_txoption)); opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT; opt->dst_opt[3] = 1; opt->dst_opt[4] = encap_limit; opt->dst_opt[5] = IPV6_TLV_PADN; opt->dst_opt[6] = 1; opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt; opt->ops.opt_nflen = 8; } /** * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own * @t: the outgoing tunnel device * @hdr: IPv6 header from the incoming packet * * Description: * Avoid trivial tunneling loop by checking that tunnel exit-point * doesn't match source of incoming packet. * * Return: * 1 if conflict, * 0 else **/ static inline int ip6_tnl_addr_conflict(struct ip6_tnl *t, struct ipv6hdr *hdr) { return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); } static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t) { struct ip6_tnl_parm *p = &t->parms; int ret = 0; struct net *net = dev_net(t->dev); if (p->flags & IP6_TNL_F_CAP_XMIT) { struct net_device *ldev = NULL; rcu_read_lock(); if (p->link) ldev = dev_get_by_index_rcu(net, p->link); if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0))) printk(KERN_WARNING "%s xmit: Local address not yet configured!\n", p->name); else if (!ipv6_addr_is_multicast(&p->raddr) && unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0))) printk(KERN_WARNING "%s xmit: Routing loop! " "Remote address found on this node!\n", p->name); else ret = 1; rcu_read_unlock(); } return ret; } /** * ip6_tnl_xmit2 - encapsulate packet and send * @skb: the outgoing socket buffer * @dev: the outgoing tunnel device * @dsfield: dscp code for outer header * @fl: flow of tunneled packet * @encap_limit: encapsulation limit * @pmtu: Path MTU is stored if packet is too big * * Description: * Build new header and do some sanity checks on the packet before sending * it. * * Return: * 0 on success * -1 fail * %-EMSGSIZE message too big. return mtu in this case. **/ static int ip6_tnl_xmit2(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, struct flowi *fl, int encap_limit, __u32 *pmtu) { struct net *net = dev_net(dev); struct ip6_tnl *t = netdev_priv(dev); struct net_device_stats *stats = &t->dev->stats; struct ipv6hdr *ipv6h = ipv6_hdr(skb); struct ipv6_tel_txoption opt; struct dst_entry *dst; struct net_device *tdev; int mtu; unsigned int max_headroom = sizeof(struct ipv6hdr); u8 proto; int err = -1; int pkt_len; if ((dst = ip6_tnl_dst_check(t)) != NULL) dst_hold(dst); else { dst = ip6_route_output(net, NULL, fl); if (dst->error || xfrm_lookup(net, &dst, fl, NULL, 0) < 0) goto tx_err_link_failure; } tdev = dst->dev; if (tdev == dev) { stats->collisions++; if (net_ratelimit()) printk(KERN_WARNING "%s: Local routing loop detected!\n", t->parms.name); goto tx_err_dst_release; } mtu = dst_mtu(dst) - sizeof (*ipv6h); if (encap_limit >= 0) { max_headroom += 8; mtu -= 8; } if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; if (skb_dst(skb)) skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); if (skb->len > mtu) { *pmtu = mtu; err = -EMSGSIZE; goto tx_err_dst_release; } /* * Okay, now see if we can stuff it in the buffer as-is. */ max_headroom += LL_RESERVED_SPACE(tdev); if (skb_headroom(skb) < max_headroom || skb_shared(skb) || (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { struct sk_buff *new_skb; if (!(new_skb = skb_realloc_headroom(skb, max_headroom))) goto tx_err_dst_release; if (skb->sk) skb_set_owner_w(new_skb, skb->sk); kfree_skb(skb); skb = new_skb; } skb_dst_drop(skb); skb_dst_set(skb, dst_clone(dst)); skb->transport_header = skb->network_header; proto = fl->proto; if (encap_limit >= 0) { init_tel_txopt(&opt, encap_limit); ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); } skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); ipv6h = ipv6_hdr(skb); *(__be32*)ipv6h = fl->fl6_flowlabel | htonl(0x60000000); dsfield = INET_ECN_encapsulate(0, dsfield); ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield); ipv6h->hop_limit = t->parms.hop_limit; ipv6h->nexthdr = proto; ipv6_addr_copy(&ipv6h->saddr, &fl->fl6_src); ipv6_addr_copy(&ipv6h->daddr, &fl->fl6_dst); nf_reset(skb); pkt_len = skb->len; err = ip6_local_out(skb); if (net_xmit_eval(err) == 0) { stats->tx_bytes += pkt_len; stats->tx_packets++; } else { stats->tx_errors++; stats->tx_aborted_errors++; } ip6_tnl_dst_store(t, dst); return 0; tx_err_link_failure: stats->tx_carrier_errors++; dst_link_failure(skb); tx_err_dst_release: dst_release(dst); return err; } static inline int ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct iphdr *iph = ip_hdr(skb); int encap_limit = -1; struct flowi fl; __u8 dsfield; __u32 mtu; int err; if ((t->parms.proto != IPPROTO_IPIP && t->parms.proto != 0) || !ip6_tnl_xmit_ctl(t)) return -1; if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) encap_limit = t->parms.encap_limit; memcpy(&fl, &t->fl, sizeof (fl)); fl.proto = IPPROTO_IPIP; dsfield = ipv4_get_dsfield(iph); if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)) fl.fl6_flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) & IPV6_TCLASS_MASK; err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu); if (err != 0) { /* XXX: send ICMP error even if DF is not set. */ if (err == -EMSGSIZE) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); return -1; } return 0; } static inline int ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct ipv6hdr *ipv6h = ipv6_hdr(skb); int encap_limit = -1; __u16 offset; struct flowi fl; __u8 dsfield; __u32 mtu; int err; if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) || !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h)) return -1; offset = parse_tlv_tnl_enc_lim(skb, skb_network_header(skb)); if (offset > 0) { struct ipv6_tlv_tnl_enc_lim *tel; tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; if (tel->encap_limit == 0) { icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_HDR_FIELD, offset + 2, skb->dev); return -1; } encap_limit = tel->encap_limit - 1; } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) encap_limit = t->parms.encap_limit; memcpy(&fl, &t->fl, sizeof (fl)); fl.proto = IPPROTO_IPV6; dsfield = ipv6_get_dsfield(ipv6h); if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)) fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)) fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK); err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu); if (err != 0) { if (err == -EMSGSIZE) icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); return -1; } return 0; } static netdev_tx_t ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct net_device_stats *stats = &t->dev->stats; int ret; switch (skb->protocol) { case htons(ETH_P_IP): ret = ip4ip6_tnl_xmit(skb, dev); break; case htons(ETH_P_IPV6): ret = ip6ip6_tnl_xmit(skb, dev); break; default: goto tx_err; } if (ret < 0) goto tx_err; return NETDEV_TX_OK; tx_err: stats->tx_errors++; stats->tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; } static void ip6_tnl_set_cap(struct ip6_tnl *t) { struct ip6_tnl_parm *p = &t->parms; int ltype = ipv6_addr_type(&p->laddr); int rtype = ipv6_addr_type(&p->raddr); p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV); if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && !((ltype|rtype) & IPV6_ADDR_LOOPBACK) && (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) { if (ltype&IPV6_ADDR_UNICAST) p->flags |= IP6_TNL_F_CAP_XMIT; if (rtype&IPV6_ADDR_UNICAST) p->flags |= IP6_TNL_F_CAP_RCV; } } static void ip6_tnl_link_config(struct ip6_tnl *t) { struct net_device *dev = t->dev; struct ip6_tnl_parm *p = &t->parms; struct flowi *fl = &t->fl; memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); /* Set up flowi template */ ipv6_addr_copy(&fl->fl6_src, &p->laddr); ipv6_addr_copy(&fl->fl6_dst, &p->raddr); fl->oif = p->link; fl->fl6_flowlabel = 0; if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) fl->fl6_flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) fl->fl6_flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; ip6_tnl_set_cap(t); if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV) dev->flags |= IFF_POINTOPOINT; else dev->flags &= ~IFF_POINTOPOINT; dev->iflink = p->link; if (p->flags & IP6_TNL_F_CAP_XMIT) { int strict = (ipv6_addr_type(&p->raddr) & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); struct rt6_info *rt = rt6_lookup(dev_net(dev), &p->raddr, &p->laddr, p->link, strict); if (rt == NULL) return; if (rt->rt6i_dev) { dev->hard_header_len = rt->rt6i_dev->hard_header_len + sizeof (struct ipv6hdr); dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr); if (dev->mtu < IPV6_MIN_MTU) dev->mtu = IPV6_MIN_MTU; } dst_release(&rt->u.dst); } } /** * ip6_tnl_change - update the tunnel parameters * @t: tunnel to be changed * @p: tunnel configuration parameters * * Description: * ip6_tnl_change() updates the tunnel parameters **/ static int ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p) { ipv6_addr_copy(&t->parms.laddr, &p->laddr); ipv6_addr_copy(&t->parms.raddr, &p->raddr); t->parms.flags = p->flags; t->parms.hop_limit = p->hop_limit; t->parms.encap_limit = p->encap_limit; t->parms.flowinfo = p->flowinfo; t->parms.link = p->link; t->parms.proto = p->proto; ip6_tnl_dst_reset(t); ip6_tnl_link_config(t); return 0; } /** * ip6_tnl_ioctl - configure ipv6 tunnels from userspace * @dev: virtual device associated with tunnel * @ifr: parameters passed from userspace * @cmd: command to be performed * * Description: * ip6_tnl_ioctl() is used for managing IPv6 tunnels * from userspace. * * The possible commands are the following: * %SIOCGETTUNNEL: get tunnel parameters for device * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters * %SIOCCHGTUNNEL: change tunnel parameters to those given * %SIOCDELTUNNEL: delete tunnel * * The fallback device "ip6tnl0", created during module * initialization, can be used for creating other tunnel devices. * * Return: * 0 on success, * %-EFAULT if unable to copy data to or from userspace, * %-EPERM if current process hasn't %CAP_NET_ADMIN set * %-EINVAL if passed tunnel parameters are invalid, * %-EEXIST if changing a tunnel's parameters would cause a conflict * %-ENODEV if attempting to change or delete a nonexisting device **/ static int ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { int err = 0; struct ip6_tnl_parm p; struct ip6_tnl *t = NULL; struct net *net = dev_net(dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); switch (cmd) { case SIOCGETTUNNEL: if (dev == ip6n->fb_tnl_dev) { if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) { err = -EFAULT; break; } t = ip6_tnl_locate(net, &p, 0); } if (t == NULL) t = netdev_priv(dev); memcpy(&p, &t->parms, sizeof (p)); if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { err = -EFAULT; } break; case SIOCADDTUNNEL: case SIOCCHGTUNNEL: err = -EPERM; if (!capable(CAP_NET_ADMIN)) break; err = -EFAULT; if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) break; err = -EINVAL; if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP && p.proto != 0) break; t = ip6_tnl_locate(net, &p, cmd == SIOCADDTUNNEL); if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) { if (t != NULL) { if (t->dev != dev) { err = -EEXIST; break; } } else t = netdev_priv(dev); ip6_tnl_unlink(ip6n, t); err = ip6_tnl_change(t, &p); ip6_tnl_link(ip6n, t); netdev_state_change(dev); } if (t) { err = 0; if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof (p))) err = -EFAULT; } else err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); break; case SIOCDELTUNNEL: err = -EPERM; if (!capable(CAP_NET_ADMIN)) break; if (dev == ip6n->fb_tnl_dev) { err = -EFAULT; if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) break; err = -ENOENT; if ((t = ip6_tnl_locate(net, &p, 0)) == NULL) break; err = -EPERM; if (t->dev == ip6n->fb_tnl_dev) break; dev = t->dev; } err = 0; unregister_netdevice(dev); break; default: err = -EINVAL; } return err; } /** * ip6_tnl_change_mtu - change mtu manually for tunnel device * @dev: virtual device associated with tunnel * @new_mtu: the new mtu * * Return: * 0 on success, * %-EINVAL if mtu too small **/ static int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu < IPV6_MIN_MTU) { return -EINVAL; } dev->mtu = new_mtu; return 0; } static const struct net_device_ops ip6_tnl_netdev_ops = { .ndo_uninit = ip6_tnl_dev_uninit, .ndo_start_xmit = ip6_tnl_xmit, .ndo_do_ioctl = ip6_tnl_ioctl, .ndo_change_mtu = ip6_tnl_change_mtu, }; /** * ip6_tnl_dev_setup - setup virtual tunnel device * @dev: virtual device associated with tunnel * * Description: * Initialize function pointers and device parameters **/ static void ip6_tnl_dev_setup(struct net_device *dev) { dev->netdev_ops = &ip6_tnl_netdev_ops; dev->destructor = free_netdev; dev->type = ARPHRD_TUNNEL6; dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr); dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr); dev->flags |= IFF_NOARP; dev->addr_len = sizeof(struct in6_addr); dev->features |= NETIF_F_NETNS_LOCAL; } /** * ip6_tnl_dev_init_gen - general initializer for all tunnel devices * @dev: virtual device associated with tunnel **/ static inline void ip6_tnl_dev_init_gen(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); t->dev = dev; strcpy(t->parms.name, dev->name); } /** * ip6_tnl_dev_init - initializer for all non fallback tunnel devices * @dev: virtual device associated with tunnel **/ static void ip6_tnl_dev_init(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); ip6_tnl_dev_init_gen(dev); ip6_tnl_link_config(t); } /** * ip6_fb_tnl_dev_init - initializer for fallback tunnel device * @dev: fallback device * * Return: 0 **/ static void __net_init ip6_fb_tnl_dev_init(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct net *net = dev_net(dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); ip6_tnl_dev_init_gen(dev); t->parms.proto = IPPROTO_IPV6; dev_hold(dev); ip6n->tnls_wc[0] = t; } static struct xfrm6_tunnel ip4ip6_handler = { .handler = ip4ip6_rcv, .err_handler = ip4ip6_err, .priority = 1, }; static struct xfrm6_tunnel ip6ip6_handler = { .handler = ip6ip6_rcv, .err_handler = ip6ip6_err, .priority = 1, }; static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n) { int h; struct ip6_tnl *t; LIST_HEAD(list); for (h = 0; h < HASH_SIZE; h++) { t = ip6n->tnls_r_l[h]; while (t != NULL) { unregister_netdevice_queue(t->dev, &list); t = t->next; } } t = ip6n->tnls_wc[0]; unregister_netdevice_queue(t->dev, &list); unregister_netdevice_many(&list); } static int __net_init ip6_tnl_init_net(struct net *net) { struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); int err; ip6n->tnls[0] = ip6n->tnls_wc; ip6n->tnls[1] = ip6n->tnls_r_l; err = -ENOMEM; ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0", ip6_tnl_dev_setup); if (!ip6n->fb_tnl_dev) goto err_alloc_dev; dev_net_set(ip6n->fb_tnl_dev, net); ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev); err = register_netdev(ip6n->fb_tnl_dev); if (err < 0) goto err_register; return 0; err_register: free_netdev(ip6n->fb_tnl_dev); err_alloc_dev: return err; } static void __net_exit ip6_tnl_exit_net(struct net *net) { struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); rtnl_lock(); ip6_tnl_destroy_tunnels(ip6n); rtnl_unlock(); } static struct pernet_operations ip6_tnl_net_ops = { .init = ip6_tnl_init_net, .exit = ip6_tnl_exit_net, .id = &ip6_tnl_net_id, .size = sizeof(struct ip6_tnl_net), }; /** * ip6_tunnel_init - register protocol and reserve needed resources * * Return: 0 on success **/ static int __init ip6_tunnel_init(void) { int err; err = register_pernet_device(&ip6_tnl_net_ops); if (err < 0) goto out_pernet; err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET); if (err < 0) { printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n"); goto out_ip4ip6; } err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6); if (err < 0) { printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n"); goto out_ip6ip6; } return 0; out_ip6ip6: xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); out_ip4ip6: unregister_pernet_device(&ip6_tnl_net_ops); out_pernet: return err; } /** * ip6_tunnel_cleanup - free resources and unregister protocol **/ static void __exit ip6_tunnel_cleanup(void) { if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) printk(KERN_INFO "ip6_tunnel close: can't deregister ip4ip6\n"); if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n"); unregister_pernet_device(&ip6_tnl_net_ops); } module_init(ip6_tunnel_init); module_exit(ip6_tunnel_cleanup);
./CrossVul/dataset_final_sorted/CWE-362/c/good_3459_1
crossvul-cpp_data_good_4741_0
/* * Copyright (C) 1991, 1992 Linus Torvalds */ /* * 'tty_io.c' gives an orthogonal feeling to tty's, be they consoles * or rs-channels. It also implements echoing, cooked mode etc. * * Kill-line thanks to John T Kohl, who also corrected VMIN = VTIME = 0. * * Modified by Theodore Ts'o, 9/14/92, to dynamically allocate the * tty_struct and tty_queue structures. Previously there was an array * of 256 tty_struct's which was statically allocated, and the * tty_queue structures were allocated at boot time. Both are now * dynamically allocated only when the tty is open. * * Also restructured routines so that there is more of a separation * between the high-level tty routines (tty_io.c and tty_ioctl.c) and * the low-level tty routines (serial.c, pty.c, console.c). This * makes for cleaner and more compact code. -TYT, 9/17/92 * * Modified by Fred N. van Kempen, 01/29/93, to add line disciplines * which can be dynamically activated and de-activated by the line * discipline handling modules (like SLIP). * * NOTE: pay no attention to the line discipline code (yet); its * interface is still subject to change in this version... * -- TYT, 1/31/92 * * Added functionality to the OPOST tty handling. No delays, but all * other bits should be there. * -- Nick Holloway <alfie@dcs.warwick.ac.uk>, 27th May 1993. * * Rewrote canonical mode and added more termios flags. * -- julian@uhunix.uhcc.hawaii.edu (J. Cowley), 13Jan94 * * Reorganized FASYNC support so mouse code can share it. * -- ctm@ardi.com, 9Sep95 * * New TIOCLINUX variants added. * -- mj@k332.feld.cvut.cz, 19-Nov-95 * * Restrict vt switching via ioctl() * -- grif@cs.ucr.edu, 5-Dec-95 * * Move console and virtual terminal code to more appropriate files, * implement CONFIG_VT and generalize console device interface. * -- Marko Kohtala <Marko.Kohtala@hut.fi>, March 97 * * Rewrote tty_init_dev and tty_release_dev to eliminate races. * -- Bill Hawes <whawes@star.net>, June 97 * * Added devfs support. * -- C. Scott Ananian <cananian@alumni.princeton.edu>, 13-Jan-1998 * * Added support for a Unix98-style ptmx device. * -- C. Scott Ananian <cananian@alumni.princeton.edu>, 14-Jan-1998 * * Reduced memory usage for older ARM systems * -- Russell King <rmk@arm.linux.org.uk> * * Move do_SAK() into process context. Less stack use in devfs functions. * alloc_tty_struct() always uses kmalloc() * -- Andrew Morton <andrewm@uow.edu.eu> 17Mar01 */ #include <linux/types.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/fcntl.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/devpts_fs.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/console.h> #include <linux/timer.h> #include <linux/ctype.h> #include <linux/kd.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/wait.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/serial.h> #include <linux/ratelimit.h> #include <linux/uaccess.h> #include <linux/kbd_kern.h> #include <linux/vt_kern.h> #include <linux/selection.h> #include <linux/kmod.h> #include <linux/nsproxy.h> #undef TTY_DEBUG_HANGUP #ifdef TTY_DEBUG_HANGUP # define tty_debug_hangup(tty, f, args...) tty_debug(tty, f, ##args) #else # define tty_debug_hangup(tty, f, args...) do { } while (0) #endif #define TTY_PARANOIA_CHECK 1 #define CHECK_TTY_COUNT 1 struct ktermios tty_std_termios = { /* for the benefit of tty drivers */ .c_iflag = ICRNL | IXON, .c_oflag = OPOST | ONLCR, .c_cflag = B38400 | CS8 | CREAD | HUPCL, .c_lflag = ISIG | ICANON | ECHO | ECHOE | ECHOK | ECHOCTL | ECHOKE | IEXTEN, .c_cc = INIT_C_CC, .c_ispeed = 38400, .c_ospeed = 38400 }; EXPORT_SYMBOL(tty_std_termios); /* This list gets poked at by procfs and various bits of boot up code. This could do with some rationalisation such as pulling the tty proc function into this file */ LIST_HEAD(tty_drivers); /* linked list of tty drivers */ /* Mutex to protect creating and releasing a tty. This is shared with vt.c for deeply disgusting hack reasons */ DEFINE_MUTEX(tty_mutex); EXPORT_SYMBOL(tty_mutex); /* Spinlock to protect the tty->tty_files list */ DEFINE_SPINLOCK(tty_files_lock); static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *); static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *); ssize_t redirected_tty_write(struct file *, const char __user *, size_t, loff_t *); static unsigned int tty_poll(struct file *, poll_table *); static int tty_open(struct inode *, struct file *); long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT static long tty_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #else #define tty_compat_ioctl NULL #endif static int __tty_fasync(int fd, struct file *filp, int on); static int tty_fasync(int fd, struct file *filp, int on); static void release_tty(struct tty_struct *tty, int idx); /** * free_tty_struct - free a disused tty * @tty: tty struct to free * * Free the write buffers, tty queue and tty memory itself. * * Locking: none. Must be called after tty is definitely unused */ void free_tty_struct(struct tty_struct *tty) { if (!tty) return; put_device(tty->dev); kfree(tty->write_buf); tty->magic = 0xDEADDEAD; kfree(tty); } static inline struct tty_struct *file_tty(struct file *file) { return ((struct tty_file_private *)file->private_data)->tty; } int tty_alloc_file(struct file *file) { struct tty_file_private *priv; priv = kmalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; file->private_data = priv; return 0; } /* Associate a new file with the tty structure */ void tty_add_file(struct tty_struct *tty, struct file *file) { struct tty_file_private *priv = file->private_data; priv->tty = tty; priv->file = file; spin_lock(&tty_files_lock); list_add(&priv->list, &tty->tty_files); spin_unlock(&tty_files_lock); } /** * tty_free_file - free file->private_data * * This shall be used only for fail path handling when tty_add_file was not * called yet. */ void tty_free_file(struct file *file) { struct tty_file_private *priv = file->private_data; file->private_data = NULL; kfree(priv); } /* Delete file from its tty */ static void tty_del_file(struct file *file) { struct tty_file_private *priv = file->private_data; spin_lock(&tty_files_lock); list_del(&priv->list); spin_unlock(&tty_files_lock); tty_free_file(file); } #define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base) /** * tty_name - return tty naming * @tty: tty structure * * Convert a tty structure into a name. The name reflects the kernel * naming policy and if udev is in use may not reflect user space * * Locking: none */ const char *tty_name(const struct tty_struct *tty) { if (!tty) /* Hmm. NULL pointer. That's fun. */ return "NULL tty"; return tty->name; } EXPORT_SYMBOL(tty_name); const char *tty_driver_name(const struct tty_struct *tty) { if (!tty || !tty->driver) return ""; return tty->driver->name; } static int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, const char *routine) { #ifdef TTY_PARANOIA_CHECK if (!tty) { pr_warn("(%d:%d): %s: NULL tty\n", imajor(inode), iminor(inode), routine); return 1; } if (tty->magic != TTY_MAGIC) { pr_warn("(%d:%d): %s: bad magic number\n", imajor(inode), iminor(inode), routine); return 1; } #endif return 0; } /* Caller must hold tty_lock */ static int check_tty_count(struct tty_struct *tty, const char *routine) { #ifdef CHECK_TTY_COUNT struct list_head *p; int count = 0; spin_lock(&tty_files_lock); list_for_each(p, &tty->tty_files) { count++; } spin_unlock(&tty_files_lock); if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_SLAVE && tty->link && tty->link->count) count++; if (tty->count != count) { tty_warn(tty, "%s: tty->count(%d) != #fd's(%d)\n", routine, tty->count, count); return count; } #endif return 0; } /** * get_tty_driver - find device of a tty * @dev_t: device identifier * @index: returns the index of the tty * * This routine returns a tty driver structure, given a device number * and also passes back the index number. * * Locking: caller must hold tty_mutex */ static struct tty_driver *get_tty_driver(dev_t device, int *index) { struct tty_driver *p; list_for_each_entry(p, &tty_drivers, tty_drivers) { dev_t base = MKDEV(p->major, p->minor_start); if (device < base || device >= base + p->num) continue; *index = device - base; return tty_driver_kref_get(p); } return NULL; } #ifdef CONFIG_CONSOLE_POLL /** * tty_find_polling_driver - find device of a polled tty * @name: name string to match * @line: pointer to resulting tty line nr * * This routine returns a tty driver structure, given a name * and the condition that the tty driver is capable of polled * operation. */ struct tty_driver *tty_find_polling_driver(char *name, int *line) { struct tty_driver *p, *res = NULL; int tty_line = 0; int len; char *str, *stp; for (str = name; *str; str++) if ((*str >= '0' && *str <= '9') || *str == ',') break; if (!*str) return NULL; len = str - name; tty_line = simple_strtoul(str, &str, 10); mutex_lock(&tty_mutex); /* Search through the tty devices to look for a match */ list_for_each_entry(p, &tty_drivers, tty_drivers) { if (strncmp(name, p->name, len) != 0) continue; stp = str; if (*stp == ',') stp++; if (*stp == '\0') stp = NULL; if (tty_line >= 0 && tty_line < p->num && p->ops && p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) { res = tty_driver_kref_get(p); *line = tty_line; break; } } mutex_unlock(&tty_mutex); return res; } EXPORT_SYMBOL_GPL(tty_find_polling_driver); #endif /** * tty_check_change - check for POSIX terminal changes * @tty: tty to check * * If we try to write to, or set the state of, a terminal and we're * not in the foreground, send a SIGTTOU. If the signal is blocked or * ignored, go ahead and perform the operation. (POSIX 7.2) * * Locking: ctrl_lock */ int __tty_check_change(struct tty_struct *tty, int sig) { unsigned long flags; struct pid *pgrp, *tty_pgrp; int ret = 0; if (current->signal->tty != tty) return 0; rcu_read_lock(); pgrp = task_pgrp(current); spin_lock_irqsave(&tty->ctrl_lock, flags); tty_pgrp = tty->pgrp; spin_unlock_irqrestore(&tty->ctrl_lock, flags); if (tty_pgrp && pgrp != tty->pgrp) { if (is_ignored(sig)) { if (sig == SIGTTIN) ret = -EIO; } else if (is_current_pgrp_orphaned()) ret = -EIO; else { kill_pgrp(pgrp, sig, 1); set_thread_flag(TIF_SIGPENDING); ret = -ERESTARTSYS; } } rcu_read_unlock(); if (!tty_pgrp) tty_warn(tty, "sig=%d, tty->pgrp == NULL!\n", sig); return ret; } int tty_check_change(struct tty_struct *tty) { return __tty_check_change(tty, SIGTTOU); } EXPORT_SYMBOL(tty_check_change); static ssize_t hung_up_tty_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return 0; } static ssize_t hung_up_tty_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { return -EIO; } /* No kernel lock held - none needed ;) */ static unsigned int hung_up_tty_poll(struct file *filp, poll_table *wait) { return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM; } static long hung_up_tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return cmd == TIOCSPGRP ? -ENOTTY : -EIO; } static long hung_up_tty_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return cmd == TIOCSPGRP ? -ENOTTY : -EIO; } static const struct file_operations tty_fops = { .llseek = no_llseek, .read = tty_read, .write = tty_write, .poll = tty_poll, .unlocked_ioctl = tty_ioctl, .compat_ioctl = tty_compat_ioctl, .open = tty_open, .release = tty_release, .fasync = tty_fasync, }; static const struct file_operations console_fops = { .llseek = no_llseek, .read = tty_read, .write = redirected_tty_write, .poll = tty_poll, .unlocked_ioctl = tty_ioctl, .compat_ioctl = tty_compat_ioctl, .open = tty_open, .release = tty_release, .fasync = tty_fasync, }; static const struct file_operations hung_up_tty_fops = { .llseek = no_llseek, .read = hung_up_tty_read, .write = hung_up_tty_write, .poll = hung_up_tty_poll, .unlocked_ioctl = hung_up_tty_ioctl, .compat_ioctl = hung_up_tty_compat_ioctl, .release = tty_release, }; static DEFINE_SPINLOCK(redirect_lock); static struct file *redirect; void proc_clear_tty(struct task_struct *p) { unsigned long flags; struct tty_struct *tty; spin_lock_irqsave(&p->sighand->siglock, flags); tty = p->signal->tty; p->signal->tty = NULL; spin_unlock_irqrestore(&p->sighand->siglock, flags); tty_kref_put(tty); } /** * proc_set_tty - set the controlling terminal * * Only callable by the session leader and only if it does not already have * a controlling terminal. * * Caller must hold: tty_lock() * a readlock on tasklist_lock * sighand lock */ static void __proc_set_tty(struct tty_struct *tty) { unsigned long flags; spin_lock_irqsave(&tty->ctrl_lock, flags); /* * The session and fg pgrp references will be non-NULL if * tiocsctty() is stealing the controlling tty */ put_pid(tty->session); put_pid(tty->pgrp); tty->pgrp = get_pid(task_pgrp(current)); spin_unlock_irqrestore(&tty->ctrl_lock, flags); tty->session = get_pid(task_session(current)); if (current->signal->tty) { tty_debug(tty, "current tty %s not NULL!!\n", current->signal->tty->name); tty_kref_put(current->signal->tty); } put_pid(current->signal->tty_old_pgrp); current->signal->tty = tty_kref_get(tty); current->signal->tty_old_pgrp = NULL; } static void proc_set_tty(struct tty_struct *tty) { spin_lock_irq(&current->sighand->siglock); __proc_set_tty(tty); spin_unlock_irq(&current->sighand->siglock); } struct tty_struct *get_current_tty(void) { struct tty_struct *tty; unsigned long flags; spin_lock_irqsave(&current->sighand->siglock, flags); tty = tty_kref_get(current->signal->tty); spin_unlock_irqrestore(&current->sighand->siglock, flags); return tty; } EXPORT_SYMBOL_GPL(get_current_tty); static void session_clear_tty(struct pid *session) { struct task_struct *p; do_each_pid_task(session, PIDTYPE_SID, p) { proc_clear_tty(p); } while_each_pid_task(session, PIDTYPE_SID, p); } /** * tty_wakeup - request more data * @tty: terminal * * Internal and external helper for wakeups of tty. This function * informs the line discipline if present that the driver is ready * to receive more output data. */ void tty_wakeup(struct tty_struct *tty) { struct tty_ldisc *ld; if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) { ld = tty_ldisc_ref(tty); if (ld) { if (ld->ops->write_wakeup) ld->ops->write_wakeup(tty); tty_ldisc_deref(ld); } } wake_up_interruptible_poll(&tty->write_wait, POLLOUT); } EXPORT_SYMBOL_GPL(tty_wakeup); /** * tty_signal_session_leader - sends SIGHUP to session leader * @tty controlling tty * @exit_session if non-zero, signal all foreground group processes * * Send SIGHUP and SIGCONT to the session leader and its process group. * Optionally, signal all processes in the foreground process group. * * Returns the number of processes in the session with this tty * as their controlling terminal. This value is used to drop * tty references for those processes. */ static int tty_signal_session_leader(struct tty_struct *tty, int exit_session) { struct task_struct *p; int refs = 0; struct pid *tty_pgrp = NULL; read_lock(&tasklist_lock); if (tty->session) { do_each_pid_task(tty->session, PIDTYPE_SID, p) { spin_lock_irq(&p->sighand->siglock); if (p->signal->tty == tty) { p->signal->tty = NULL; /* We defer the dereferences outside fo the tasklist lock */ refs++; } if (!p->signal->leader) { spin_unlock_irq(&p->sighand->siglock); continue; } __group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p); __group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p); put_pid(p->signal->tty_old_pgrp); /* A noop */ spin_lock(&tty->ctrl_lock); tty_pgrp = get_pid(tty->pgrp); if (tty->pgrp) p->signal->tty_old_pgrp = get_pid(tty->pgrp); spin_unlock(&tty->ctrl_lock); spin_unlock_irq(&p->sighand->siglock); } while_each_pid_task(tty->session, PIDTYPE_SID, p); } read_unlock(&tasklist_lock); if (tty_pgrp) { if (exit_session) kill_pgrp(tty_pgrp, SIGHUP, exit_session); put_pid(tty_pgrp); } return refs; } /** * __tty_hangup - actual handler for hangup events * @work: tty device * * This can be called by a "kworker" kernel thread. That is process * synchronous but doesn't hold any locks, so we need to make sure we * have the appropriate locks for what we're doing. * * The hangup event clears any pending redirections onto the hung up * device. It ensures future writes will error and it does the needed * line discipline hangup and signal delivery. The tty object itself * remains intact. * * Locking: * BTM * redirect lock for undoing redirection * file list lock for manipulating list of ttys * tty_ldiscs_lock from called functions * termios_rwsem resetting termios data * tasklist_lock to walk task list for hangup event * ->siglock to protect ->signal/->sighand */ static void __tty_hangup(struct tty_struct *tty, int exit_session) { struct file *cons_filp = NULL; struct file *filp, *f = NULL; struct tty_file_private *priv; int closecount = 0, n; int refs; if (!tty) return; spin_lock(&redirect_lock); if (redirect && file_tty(redirect) == tty) { f = redirect; redirect = NULL; } spin_unlock(&redirect_lock); tty_lock(tty); if (test_bit(TTY_HUPPED, &tty->flags)) { tty_unlock(tty); return; } /* inuse_filps is protected by the single tty lock, this really needs to change if we want to flush the workqueue with the lock held */ check_tty_count(tty, "tty_hangup"); spin_lock(&tty_files_lock); /* This breaks for file handles being sent over AF_UNIX sockets ? */ list_for_each_entry(priv, &tty->tty_files, list) { filp = priv->file; if (filp->f_op->write == redirected_tty_write) cons_filp = filp; if (filp->f_op->write != tty_write) continue; closecount++; __tty_fasync(-1, filp, 0); /* can't block */ filp->f_op = &hung_up_tty_fops; } spin_unlock(&tty_files_lock); refs = tty_signal_session_leader(tty, exit_session); /* Account for the p->signal references we killed */ while (refs--) tty_kref_put(tty); tty_ldisc_hangup(tty); spin_lock_irq(&tty->ctrl_lock); clear_bit(TTY_THROTTLED, &tty->flags); clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); put_pid(tty->session); put_pid(tty->pgrp); tty->session = NULL; tty->pgrp = NULL; tty->ctrl_status = 0; spin_unlock_irq(&tty->ctrl_lock); /* * If one of the devices matches a console pointer, we * cannot just call hangup() because that will cause * tty->count and state->count to go out of sync. * So we just call close() the right number of times. */ if (cons_filp) { if (tty->ops->close) for (n = 0; n < closecount; n++) tty->ops->close(tty, cons_filp); } else if (tty->ops->hangup) tty->ops->hangup(tty); /* * We don't want to have driver/ldisc interactions beyond * the ones we did here. The driver layer expects no * calls after ->hangup() from the ldisc side. However we * can't yet guarantee all that. */ set_bit(TTY_HUPPED, &tty->flags); tty_unlock(tty); if (f) fput(f); } static void do_tty_hangup(struct work_struct *work) { struct tty_struct *tty = container_of(work, struct tty_struct, hangup_work); __tty_hangup(tty, 0); } /** * tty_hangup - trigger a hangup event * @tty: tty to hangup * * A carrier loss (virtual or otherwise) has occurred on this like * schedule a hangup sequence to run after this event. */ void tty_hangup(struct tty_struct *tty) { tty_debug_hangup(tty, "hangup\n"); schedule_work(&tty->hangup_work); } EXPORT_SYMBOL(tty_hangup); /** * tty_vhangup - process vhangup * @tty: tty to hangup * * The user has asked via system call for the terminal to be hung up. * We do this synchronously so that when the syscall returns the process * is complete. That guarantee is necessary for security reasons. */ void tty_vhangup(struct tty_struct *tty) { tty_debug_hangup(tty, "vhangup\n"); __tty_hangup(tty, 0); } EXPORT_SYMBOL(tty_vhangup); /** * tty_vhangup_self - process vhangup for own ctty * * Perform a vhangup on the current controlling tty */ void tty_vhangup_self(void) { struct tty_struct *tty; tty = get_current_tty(); if (tty) { tty_vhangup(tty); tty_kref_put(tty); } } /** * tty_vhangup_session - hangup session leader exit * @tty: tty to hangup * * The session leader is exiting and hanging up its controlling terminal. * Every process in the foreground process group is signalled SIGHUP. * * We do this synchronously so that when the syscall returns the process * is complete. That guarantee is necessary for security reasons. */ static void tty_vhangup_session(struct tty_struct *tty) { tty_debug_hangup(tty, "session hangup\n"); __tty_hangup(tty, 1); } /** * tty_hung_up_p - was tty hung up * @filp: file pointer of tty * * Return true if the tty has been subject to a vhangup or a carrier * loss */ int tty_hung_up_p(struct file *filp) { return (filp->f_op == &hung_up_tty_fops); } EXPORT_SYMBOL(tty_hung_up_p); /** * disassociate_ctty - disconnect controlling tty * @on_exit: true if exiting so need to "hang up" the session * * This function is typically called only by the session leader, when * it wants to disassociate itself from its controlling tty. * * It performs the following functions: * (1) Sends a SIGHUP and SIGCONT to the foreground process group * (2) Clears the tty from being controlling the session * (3) Clears the controlling tty for all processes in the * session group. * * The argument on_exit is set to 1 if called when a process is * exiting; it is 0 if called by the ioctl TIOCNOTTY. * * Locking: * BTM is taken for hysterical raisins, and held when * called from no_tty(). * tty_mutex is taken to protect tty * ->siglock is taken to protect ->signal/->sighand * tasklist_lock is taken to walk process list for sessions * ->siglock is taken to protect ->signal/->sighand */ void disassociate_ctty(int on_exit) { struct tty_struct *tty; if (!current->signal->leader) return; tty = get_current_tty(); if (tty) { if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY) { tty_vhangup_session(tty); } else { struct pid *tty_pgrp = tty_get_pgrp(tty); if (tty_pgrp) { kill_pgrp(tty_pgrp, SIGHUP, on_exit); if (!on_exit) kill_pgrp(tty_pgrp, SIGCONT, on_exit); put_pid(tty_pgrp); } } tty_kref_put(tty); } else if (on_exit) { struct pid *old_pgrp; spin_lock_irq(&current->sighand->siglock); old_pgrp = current->signal->tty_old_pgrp; current->signal->tty_old_pgrp = NULL; spin_unlock_irq(&current->sighand->siglock); if (old_pgrp) { kill_pgrp(old_pgrp, SIGHUP, on_exit); kill_pgrp(old_pgrp, SIGCONT, on_exit); put_pid(old_pgrp); } return; } spin_lock_irq(&current->sighand->siglock); put_pid(current->signal->tty_old_pgrp); current->signal->tty_old_pgrp = NULL; tty = tty_kref_get(current->signal->tty); if (tty) { unsigned long flags; spin_lock_irqsave(&tty->ctrl_lock, flags); put_pid(tty->session); put_pid(tty->pgrp); tty->session = NULL; tty->pgrp = NULL; spin_unlock_irqrestore(&tty->ctrl_lock, flags); tty_kref_put(tty); } else tty_debug_hangup(tty, "no current tty\n"); spin_unlock_irq(&current->sighand->siglock); /* Now clear signal->tty under the lock */ read_lock(&tasklist_lock); session_clear_tty(task_session(current)); read_unlock(&tasklist_lock); } /** * * no_tty - Ensure the current process does not have a controlling tty */ void no_tty(void) { /* FIXME: Review locking here. The tty_lock never covered any race between a new association and proc_clear_tty but possible we need to protect against this anyway */ struct task_struct *tsk = current; disassociate_ctty(0); proc_clear_tty(tsk); } /** * stop_tty - propagate flow control * @tty: tty to stop * * Perform flow control to the driver. May be called * on an already stopped device and will not re-call the driver * method. * * This functionality is used by both the line disciplines for * halting incoming flow and by the driver. It may therefore be * called from any context, may be under the tty atomic_write_lock * but not always. * * Locking: * flow_lock */ void __stop_tty(struct tty_struct *tty) { if (tty->stopped) return; tty->stopped = 1; if (tty->ops->stop) tty->ops->stop(tty); } void stop_tty(struct tty_struct *tty) { unsigned long flags; spin_lock_irqsave(&tty->flow_lock, flags); __stop_tty(tty); spin_unlock_irqrestore(&tty->flow_lock, flags); } EXPORT_SYMBOL(stop_tty); /** * start_tty - propagate flow control * @tty: tty to start * * Start a tty that has been stopped if at all possible. If this * tty was previous stopped and is now being started, the driver * start method is invoked and the line discipline woken. * * Locking: * flow_lock */ void __start_tty(struct tty_struct *tty) { if (!tty->stopped || tty->flow_stopped) return; tty->stopped = 0; if (tty->ops->start) tty->ops->start(tty); tty_wakeup(tty); } void start_tty(struct tty_struct *tty) { unsigned long flags; spin_lock_irqsave(&tty->flow_lock, flags); __start_tty(tty); spin_unlock_irqrestore(&tty->flow_lock, flags); } EXPORT_SYMBOL(start_tty); static void tty_update_time(struct timespec *time) { unsigned long sec = get_seconds(); /* * We only care if the two values differ in anything other than the * lower three bits (i.e every 8 seconds). If so, then we can update * the time of the tty device, otherwise it could be construded as a * security leak to let userspace know the exact timing of the tty. */ if ((sec ^ time->tv_sec) & ~7) time->tv_sec = sec; } /** * tty_read - read method for tty device files * @file: pointer to tty file * @buf: user buffer * @count: size of user buffer * @ppos: unused * * Perform the read system call function on this terminal device. Checks * for hung up devices before calling the line discipline method. * * Locking: * Locks the line discipline internally while needed. Multiple * read calls may be outstanding in parallel. */ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int i; struct inode *inode = file_inode(file); struct tty_struct *tty = file_tty(file); struct tty_ldisc *ld; if (tty_paranoia_check(tty, inode, "tty_read")) return -EIO; if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags))) return -EIO; /* We want to wait for the line discipline to sort out in this situation */ ld = tty_ldisc_ref_wait(tty); if (ld->ops->read) i = ld->ops->read(tty, file, buf, count); else i = -EIO; tty_ldisc_deref(ld); if (i > 0) tty_update_time(&inode->i_atime); return i; } static void tty_write_unlock(struct tty_struct *tty) { mutex_unlock(&tty->atomic_write_lock); wake_up_interruptible_poll(&tty->write_wait, POLLOUT); } static int tty_write_lock(struct tty_struct *tty, int ndelay) { if (!mutex_trylock(&tty->atomic_write_lock)) { if (ndelay) return -EAGAIN; if (mutex_lock_interruptible(&tty->atomic_write_lock)) return -ERESTARTSYS; } return 0; } /* * Split writes up in sane blocksizes to avoid * denial-of-service type attacks */ static inline ssize_t do_tty_write( ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t), struct tty_struct *tty, struct file *file, const char __user *buf, size_t count) { ssize_t ret, written = 0; unsigned int chunk; ret = tty_write_lock(tty, file->f_flags & O_NDELAY); if (ret < 0) return ret; /* * We chunk up writes into a temporary buffer. This * simplifies low-level drivers immensely, since they * don't have locking issues and user mode accesses. * * But if TTY_NO_WRITE_SPLIT is set, we should use a * big chunk-size.. * * The default chunk-size is 2kB, because the NTTY * layer has problems with bigger chunks. It will * claim to be able to handle more characters than * it actually does. * * FIXME: This can probably go away now except that 64K chunks * are too likely to fail unless switched to vmalloc... */ chunk = 2048; if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags)) chunk = 65536; if (count < chunk) chunk = count; /* write_buf/write_cnt is protected by the atomic_write_lock mutex */ if (tty->write_cnt < chunk) { unsigned char *buf_chunk; if (chunk < 1024) chunk = 1024; buf_chunk = kmalloc(chunk, GFP_KERNEL); if (!buf_chunk) { ret = -ENOMEM; goto out; } kfree(tty->write_buf); tty->write_cnt = chunk; tty->write_buf = buf_chunk; } /* Do the write .. */ for (;;) { size_t size = count; if (size > chunk) size = chunk; ret = -EFAULT; if (copy_from_user(tty->write_buf, buf, size)) break; ret = write(tty, file, tty->write_buf, size); if (ret <= 0) break; written += ret; buf += ret; count -= ret; if (!count) break; ret = -ERESTARTSYS; if (signal_pending(current)) break; cond_resched(); } if (written) { tty_update_time(&file_inode(file)->i_mtime); ret = written; } out: tty_write_unlock(tty); return ret; } /** * tty_write_message - write a message to a certain tty, not just the console. * @tty: the destination tty_struct * @msg: the message to write * * This is used for messages that need to be redirected to a specific tty. * We don't put it into the syslog queue right now maybe in the future if * really needed. * * We must still hold the BTM and test the CLOSING flag for the moment. */ void tty_write_message(struct tty_struct *tty, char *msg) { if (tty) { mutex_lock(&tty->atomic_write_lock); tty_lock(tty); if (tty->ops->write && tty->count > 0) tty->ops->write(tty, msg, strlen(msg)); tty_unlock(tty); tty_write_unlock(tty); } return; } /** * tty_write - write method for tty device file * @file: tty file pointer * @buf: user data to write * @count: bytes to write * @ppos: unused * * Write data to a tty device via the line discipline. * * Locking: * Locks the line discipline as required * Writes to the tty driver are serialized by the atomic_write_lock * and are then processed in chunks to the device. The line discipline * write method will not be invoked in parallel for each device. */ static ssize_t tty_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct tty_struct *tty = file_tty(file); struct tty_ldisc *ld; ssize_t ret; if (tty_paranoia_check(tty, file_inode(file), "tty_write")) return -EIO; if (!tty || !tty->ops->write || (test_bit(TTY_IO_ERROR, &tty->flags))) return -EIO; /* Short term debug to catch buggy drivers */ if (tty->ops->write_room == NULL) tty_err(tty, "missing write_room method\n"); ld = tty_ldisc_ref_wait(tty); if (!ld->ops->write) ret = -EIO; else ret = do_tty_write(ld->ops->write, tty, file, buf, count); tty_ldisc_deref(ld); return ret; } ssize_t redirected_tty_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct file *p = NULL; spin_lock(&redirect_lock); if (redirect) p = get_file(redirect); spin_unlock(&redirect_lock); if (p) { ssize_t res; res = vfs_write(p, buf, count, &p->f_pos); fput(p); return res; } return tty_write(file, buf, count, ppos); } /** * tty_send_xchar - send priority character * * Send a high priority character to the tty even if stopped * * Locking: none for xchar method, write ordering for write method. */ int tty_send_xchar(struct tty_struct *tty, char ch) { int was_stopped = tty->stopped; if (tty->ops->send_xchar) { down_read(&tty->termios_rwsem); tty->ops->send_xchar(tty, ch); up_read(&tty->termios_rwsem); return 0; } if (tty_write_lock(tty, 0) < 0) return -ERESTARTSYS; down_read(&tty->termios_rwsem); if (was_stopped) start_tty(tty); tty->ops->write(tty, &ch, 1); if (was_stopped) stop_tty(tty); up_read(&tty->termios_rwsem); tty_write_unlock(tty); return 0; } static char ptychar[] = "pqrstuvwxyzabcde"; /** * pty_line_name - generate name for a pty * @driver: the tty driver in use * @index: the minor number * @p: output buffer of at least 6 bytes * * Generate a name from a driver reference and write it to the output * buffer. * * Locking: None */ static void pty_line_name(struct tty_driver *driver, int index, char *p) { int i = index + driver->name_base; /* ->name is initialized to "ttyp", but "tty" is expected */ sprintf(p, "%s%c%x", driver->subtype == PTY_TYPE_SLAVE ? "tty" : driver->name, ptychar[i >> 4 & 0xf], i & 0xf); } /** * tty_line_name - generate name for a tty * @driver: the tty driver in use * @index: the minor number * @p: output buffer of at least 7 bytes * * Generate a name from a driver reference and write it to the output * buffer. * * Locking: None */ static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p) { if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE) return sprintf(p, "%s", driver->name); else return sprintf(p, "%s%d", driver->name, index + driver->name_base); } /** * tty_driver_lookup_tty() - find an existing tty, if any * @driver: the driver for the tty * @idx: the minor number * * Return the tty, if found. If not found, return NULL or ERR_PTR() if the * driver lookup() method returns an error. * * Locking: tty_mutex must be held. If the tty is found, bump the tty kref. */ static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver, struct inode *inode, int idx) { struct tty_struct *tty; if (driver->ops->lookup) tty = driver->ops->lookup(driver, inode, idx); else tty = driver->ttys[idx]; if (!IS_ERR(tty)) tty_kref_get(tty); return tty; } /** * tty_init_termios - helper for termios setup * @tty: the tty to set up * * Initialise the termios structures for this tty. Thus runs under * the tty_mutex currently so we can be relaxed about ordering. */ int tty_init_termios(struct tty_struct *tty) { struct ktermios *tp; int idx = tty->index; if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) tty->termios = tty->driver->init_termios; else { /* Check for lazy saved data */ tp = tty->driver->termios[idx]; if (tp != NULL) tty->termios = *tp; else tty->termios = tty->driver->init_termios; } /* Compatibility until drivers always set this */ tty->termios.c_ispeed = tty_termios_input_baud_rate(&tty->termios); tty->termios.c_ospeed = tty_termios_baud_rate(&tty->termios); return 0; } EXPORT_SYMBOL_GPL(tty_init_termios); int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty) { int ret = tty_init_termios(tty); if (ret) return ret; tty_driver_kref_get(driver); tty->count++; driver->ttys[tty->index] = tty; return 0; } EXPORT_SYMBOL_GPL(tty_standard_install); /** * tty_driver_install_tty() - install a tty entry in the driver * @driver: the driver for the tty * @tty: the tty * * Install a tty object into the driver tables. The tty->index field * will be set by the time this is called. This method is responsible * for ensuring any need additional structures are allocated and * configured. * * Locking: tty_mutex for now */ static int tty_driver_install_tty(struct tty_driver *driver, struct tty_struct *tty) { return driver->ops->install ? driver->ops->install(driver, tty) : tty_standard_install(driver, tty); } /** * tty_driver_remove_tty() - remove a tty from the driver tables * @driver: the driver for the tty * @idx: the minor number * * Remvoe a tty object from the driver tables. The tty->index field * will be set by the time this is called. * * Locking: tty_mutex for now */ void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty) { if (driver->ops->remove) driver->ops->remove(driver, tty); else driver->ttys[tty->index] = NULL; } /* * tty_reopen() - fast re-open of an open tty * @tty - the tty to open * * Return 0 on success, -errno on error. * Re-opens on master ptys are not allowed and return -EIO. * * Locking: Caller must hold tty_lock */ static int tty_reopen(struct tty_struct *tty) { struct tty_driver *driver = tty->driver; if (driver->type == TTY_DRIVER_TYPE_PTY && driver->subtype == PTY_TYPE_MASTER) return -EIO; if (!tty->count) return -EAGAIN; if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN)) return -EBUSY; tty->count++; WARN_ON(!tty->ldisc); return 0; } /** * tty_init_dev - initialise a tty device * @driver: tty driver we are opening a device on * @idx: device index * @ret_tty: returned tty structure * * Prepare a tty device. This may not be a "new" clean device but * could also be an active device. The pty drivers require special * handling because of this. * * Locking: * The function is called under the tty_mutex, which * protects us from the tty struct or driver itself going away. * * On exit the tty device has the line discipline attached and * a reference count of 1. If a pair was created for pty/tty use * and the other was a pty master then it too has a reference count of 1. * * WSH 06/09/97: Rewritten to remove races and properly clean up after a * failed open. The new code protects the open with a mutex, so it's * really quite straightforward. The mutex locking can probably be * relaxed for the (most common) case of reopening a tty. */ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx) { struct tty_struct *tty; int retval; /* * First time open is complex, especially for PTY devices. * This code guarantees that either everything succeeds and the * TTY is ready for operation, or else the table slots are vacated * and the allocated memory released. (Except that the termios * and locked termios may be retained.) */ if (!try_module_get(driver->owner)) return ERR_PTR(-ENODEV); tty = alloc_tty_struct(driver, idx); if (!tty) { retval = -ENOMEM; goto err_module_put; } tty_lock(tty); retval = tty_driver_install_tty(driver, tty); if (retval < 0) goto err_deinit_tty; if (!tty->port) tty->port = driver->ports[idx]; WARN_RATELIMIT(!tty->port, "%s: %s driver does not set tty->port. This will crash the kernel later. Fix the driver!\n", __func__, tty->driver->name); tty->port->itty = tty; /* * Structures all installed ... call the ldisc open routines. * If we fail here just call release_tty to clean up. No need * to decrement the use counts, as release_tty doesn't care. */ retval = tty_ldisc_setup(tty, tty->link); if (retval) goto err_release_tty; /* Return the tty locked so that it cannot vanish under the caller */ return tty; err_deinit_tty: tty_unlock(tty); deinitialize_tty_struct(tty); free_tty_struct(tty); err_module_put: module_put(driver->owner); return ERR_PTR(retval); /* call the tty release_tty routine to clean out this slot */ err_release_tty: tty_unlock(tty); tty_info_ratelimited(tty, "ldisc open failed (%d), clearing slot %d\n", retval, idx); release_tty(tty, idx); return ERR_PTR(retval); } void tty_free_termios(struct tty_struct *tty) { struct ktermios *tp; int idx = tty->index; /* If the port is going to reset then it has no termios to save */ if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) return; /* Stash the termios data */ tp = tty->driver->termios[idx]; if (tp == NULL) { tp = kmalloc(sizeof(struct ktermios), GFP_KERNEL); if (tp == NULL) return; tty->driver->termios[idx] = tp; } *tp = tty->termios; } EXPORT_SYMBOL(tty_free_termios); /** * tty_flush_works - flush all works of a tty/pty pair * @tty: tty device to flush works for (or either end of a pty pair) * * Sync flush all works belonging to @tty (and the 'other' tty). */ static void tty_flush_works(struct tty_struct *tty) { flush_work(&tty->SAK_work); flush_work(&tty->hangup_work); if (tty->link) { flush_work(&tty->link->SAK_work); flush_work(&tty->link->hangup_work); } } /** * release_one_tty - release tty structure memory * @kref: kref of tty we are obliterating * * Releases memory associated with a tty structure, and clears out the * driver table slots. This function is called when a device is no longer * in use. It also gets called when setup of a device fails. * * Locking: * takes the file list lock internally when working on the list * of ttys that the driver keeps. * * This method gets called from a work queue so that the driver private * cleanup ops can sleep (needed for USB at least) */ static void release_one_tty(struct work_struct *work) { struct tty_struct *tty = container_of(work, struct tty_struct, hangup_work); struct tty_driver *driver = tty->driver; struct module *owner = driver->owner; if (tty->ops->cleanup) tty->ops->cleanup(tty); tty->magic = 0; tty_driver_kref_put(driver); module_put(owner); spin_lock(&tty_files_lock); list_del_init(&tty->tty_files); spin_unlock(&tty_files_lock); put_pid(tty->pgrp); put_pid(tty->session); free_tty_struct(tty); } static void queue_release_one_tty(struct kref *kref) { struct tty_struct *tty = container_of(kref, struct tty_struct, kref); /* The hangup queue is now free so we can reuse it rather than waste a chunk of memory for each port */ INIT_WORK(&tty->hangup_work, release_one_tty); schedule_work(&tty->hangup_work); } /** * tty_kref_put - release a tty kref * @tty: tty device * * Release a reference to a tty device and if need be let the kref * layer destruct the object for us */ void tty_kref_put(struct tty_struct *tty) { if (tty) kref_put(&tty->kref, queue_release_one_tty); } EXPORT_SYMBOL(tty_kref_put); /** * release_tty - release tty structure memory * * Release both @tty and a possible linked partner (think pty pair), * and decrement the refcount of the backing module. * * Locking: * tty_mutex * takes the file list lock internally when working on the list * of ttys that the driver keeps. * */ static void release_tty(struct tty_struct *tty, int idx) { /* This should always be true but check for the moment */ WARN_ON(tty->index != idx); WARN_ON(!mutex_is_locked(&tty_mutex)); if (tty->ops->shutdown) tty->ops->shutdown(tty); tty_free_termios(tty); tty_driver_remove_tty(tty->driver, tty); tty->port->itty = NULL; if (tty->link) tty->link->port->itty = NULL; tty_buffer_cancel_work(tty->port); tty_kref_put(tty->link); tty_kref_put(tty); } /** * tty_release_checks - check a tty before real release * @tty: tty to check * @o_tty: link of @tty (if any) * @idx: index of the tty * * Performs some paranoid checking before true release of the @tty. * This is a no-op unless TTY_PARANOIA_CHECK is defined. */ static int tty_release_checks(struct tty_struct *tty, int idx) { #ifdef TTY_PARANOIA_CHECK if (idx < 0 || idx >= tty->driver->num) { tty_debug(tty, "bad idx %d\n", idx); return -1; } /* not much to check for devpts */ if (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) return 0; if (tty != tty->driver->ttys[idx]) { tty_debug(tty, "bad driver table[%d] = %p\n", idx, tty->driver->ttys[idx]); return -1; } if (tty->driver->other) { struct tty_struct *o_tty = tty->link; if (o_tty != tty->driver->other->ttys[idx]) { tty_debug(tty, "bad other table[%d] = %p\n", idx, tty->driver->other->ttys[idx]); return -1; } if (o_tty->link != tty) { tty_debug(tty, "bad link = %p\n", o_tty->link); return -1; } } #endif return 0; } /** * tty_release - vfs callback for close * @inode: inode of tty * @filp: file pointer for handle to tty * * Called the last time each file handle is closed that references * this tty. There may however be several such references. * * Locking: * Takes bkl. See tty_release_dev * * Even releasing the tty structures is a tricky business.. We have * to be very careful that the structures are all released at the * same time, as interrupts might otherwise get the wrong pointers. * * WSH 09/09/97: rewritten to avoid some nasty race conditions that could * lead to double frees or releasing memory still in use. */ int tty_release(struct inode *inode, struct file *filp) { struct tty_struct *tty = file_tty(filp); struct tty_struct *o_tty = NULL; int do_sleep, final; int idx; long timeout = 0; int once = 1; if (tty_paranoia_check(tty, inode, __func__)) return 0; tty_lock(tty); check_tty_count(tty, __func__); __tty_fasync(-1, filp, 0); idx = tty->index; if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_MASTER) o_tty = tty->link; if (tty_release_checks(tty, idx)) { tty_unlock(tty); return 0; } tty_debug_hangup(tty, "releasing (count=%d)\n", tty->count); if (tty->ops->close) tty->ops->close(tty, filp); /* If tty is pty master, lock the slave pty (stable lock order) */ tty_lock_slave(o_tty); /* * Sanity check: if tty->count is going to zero, there shouldn't be * any waiters on tty->read_wait or tty->write_wait. We test the * wait queues and kick everyone out _before_ actually starting to * close. This ensures that we won't block while releasing the tty * structure. * * The test for the o_tty closing is necessary, since the master and * slave sides may close in any order. If the slave side closes out * first, its count will be one, since the master side holds an open. * Thus this test wouldn't be triggered at the time the slave closed, * so we do it now. */ while (1) { do_sleep = 0; if (tty->count <= 1) { if (waitqueue_active(&tty->read_wait)) { wake_up_poll(&tty->read_wait, POLLIN); do_sleep++; } if (waitqueue_active(&tty->write_wait)) { wake_up_poll(&tty->write_wait, POLLOUT); do_sleep++; } } if (o_tty && o_tty->count <= 1) { if (waitqueue_active(&o_tty->read_wait)) { wake_up_poll(&o_tty->read_wait, POLLIN); do_sleep++; } if (waitqueue_active(&o_tty->write_wait)) { wake_up_poll(&o_tty->write_wait, POLLOUT); do_sleep++; } } if (!do_sleep) break; if (once) { once = 0; tty_warn(tty, "read/write wait queue active!\n"); } schedule_timeout_killable(timeout); if (timeout < 120 * HZ) timeout = 2 * timeout + 1; else timeout = MAX_SCHEDULE_TIMEOUT; } if (o_tty) { if (--o_tty->count < 0) { tty_warn(tty, "bad slave count (%d)\n", o_tty->count); o_tty->count = 0; } } if (--tty->count < 0) { tty_warn(tty, "bad tty->count (%d)\n", tty->count); tty->count = 0; } /* * We've decremented tty->count, so we need to remove this file * descriptor off the tty->tty_files list; this serves two * purposes: * - check_tty_count sees the correct number of file descriptors * associated with this tty. * - do_tty_hangup no longer sees this file descriptor as * something that needs to be handled for hangups. */ tty_del_file(filp); /* * Perform some housekeeping before deciding whether to return. * * If _either_ side is closing, make sure there aren't any * processes that still think tty or o_tty is their controlling * tty. */ if (!tty->count) { read_lock(&tasklist_lock); session_clear_tty(tty->session); if (o_tty) session_clear_tty(o_tty->session); read_unlock(&tasklist_lock); } /* check whether both sides are closing ... */ final = !tty->count && !(o_tty && o_tty->count); tty_unlock_slave(o_tty); tty_unlock(tty); /* At this point, the tty->count == 0 should ensure a dead tty cannot be re-opened by a racing opener */ if (!final) return 0; tty_debug_hangup(tty, "final close\n"); /* * Ask the line discipline code to release its structures */ tty_ldisc_release(tty); /* Wait for pending work before tty destruction commmences */ tty_flush_works(tty); tty_debug_hangup(tty, "freeing structure\n"); /* * The release_tty function takes care of the details of clearing * the slots and preserving the termios structure. The tty_unlock_pair * should be safe as we keep a kref while the tty is locked (so the * unlock never unlocks a freed tty). */ mutex_lock(&tty_mutex); release_tty(tty, idx); mutex_unlock(&tty_mutex); return 0; } /** * tty_open_current_tty - get locked tty of current task * @device: device number * @filp: file pointer to tty * @return: locked tty of the current task iff @device is /dev/tty * * Performs a re-open of the current task's controlling tty. * * We cannot return driver and index like for the other nodes because * devpts will not work then. It expects inodes to be from devpts FS. */ static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp) { struct tty_struct *tty; int retval; if (device != MKDEV(TTYAUX_MAJOR, 0)) return NULL; tty = get_current_tty(); if (!tty) return ERR_PTR(-ENXIO); filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */ /* noctty = 1; */ tty_lock(tty); tty_kref_put(tty); /* safe to drop the kref now */ retval = tty_reopen(tty); if (retval < 0) { tty_unlock(tty); tty = ERR_PTR(retval); } return tty; } /** * tty_lookup_driver - lookup a tty driver for a given device file * @device: device number * @filp: file pointer to tty * @noctty: set if the device should not become a controlling tty * @index: index for the device in the @return driver * @return: driver for this inode (with increased refcount) * * If @return is not erroneous, the caller is responsible to decrement the * refcount by tty_driver_kref_put. * * Locking: tty_mutex protects get_tty_driver */ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp, int *noctty, int *index) { struct tty_driver *driver; switch (device) { #ifdef CONFIG_VT case MKDEV(TTY_MAJOR, 0): { extern struct tty_driver *console_driver; driver = tty_driver_kref_get(console_driver); *index = fg_console; *noctty = 1; break; } #endif case MKDEV(TTYAUX_MAJOR, 1): { struct tty_driver *console_driver = console_device(index); if (console_driver) { driver = tty_driver_kref_get(console_driver); if (driver) { /* Don't let /dev/console block */ filp->f_flags |= O_NONBLOCK; *noctty = 1; break; } } return ERR_PTR(-ENODEV); } default: driver = get_tty_driver(device, index); if (!driver) return ERR_PTR(-ENODEV); break; } return driver; } /** * tty_open - open a tty device * @inode: inode of device file * @filp: file pointer to tty * * tty_open and tty_release keep up the tty count that contains the * number of opens done on a tty. We cannot use the inode-count, as * different inodes might point to the same tty. * * Open-counting is needed for pty masters, as well as for keeping * track of serial lines: DTR is dropped when the last close happens. * (This is not done solely through tty->count, now. - Ted 1/27/92) * * The termios state of a pty is reset on first open so that * settings don't persist across reuse. * * Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev. * tty->count should protect the rest. * ->siglock protects ->signal/->sighand * * Note: the tty_unlock/lock cases without a ref are only safe due to * tty_mutex */ static int tty_open(struct inode *inode, struct file *filp) { struct tty_struct *tty; int noctty, retval; struct tty_driver *driver = NULL; int index; dev_t device = inode->i_rdev; unsigned saved_flags = filp->f_flags; nonseekable_open(inode, filp); retry_open: retval = tty_alloc_file(filp); if (retval) return -ENOMEM; noctty = filp->f_flags & O_NOCTTY; index = -1; retval = 0; tty = tty_open_current_tty(device, filp); if (!tty) { mutex_lock(&tty_mutex); driver = tty_lookup_driver(device, filp, &noctty, &index); if (IS_ERR(driver)) { retval = PTR_ERR(driver); goto err_unlock; } /* check whether we're reopening an existing tty */ tty = tty_driver_lookup_tty(driver, inode, index); if (IS_ERR(tty)) { retval = PTR_ERR(tty); goto err_unlock; } if (tty) { mutex_unlock(&tty_mutex); retval = tty_lock_interruptible(tty); if (retval) { if (retval == -EINTR) retval = -ERESTARTSYS; goto err_unref; } /* safe to drop the kref from tty_driver_lookup_tty() */ tty_kref_put(tty); retval = tty_reopen(tty); if (retval < 0) { tty_unlock(tty); tty = ERR_PTR(retval); } } else { /* Returns with the tty_lock held for now */ tty = tty_init_dev(driver, index); mutex_unlock(&tty_mutex); } tty_driver_kref_put(driver); } if (IS_ERR(tty)) { retval = PTR_ERR(tty); if (retval != -EAGAIN || signal_pending(current)) goto err_file; tty_free_file(filp); schedule(); goto retry_open; } tty_add_file(tty, filp); check_tty_count(tty, __func__); if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_MASTER) noctty = 1; tty_debug_hangup(tty, "opening (count=%d)\n", tty->count); if (tty->ops->open) retval = tty->ops->open(tty, filp); else retval = -ENODEV; filp->f_flags = saved_flags; if (retval) { tty_debug_hangup(tty, "open error %d, releasing\n", retval); tty_unlock(tty); /* need to call tty_release without BTM */ tty_release(inode, filp); if (retval != -ERESTARTSYS) return retval; if (signal_pending(current)) return retval; schedule(); /* * Need to reset f_op in case a hangup happened. */ if (tty_hung_up_p(filp)) filp->f_op = &tty_fops; goto retry_open; } clear_bit(TTY_HUPPED, &tty->flags); read_lock(&tasklist_lock); spin_lock_irq(&current->sighand->siglock); if (!noctty && current->signal->leader && !current->signal->tty && tty->session == NULL) { /* * Don't let a process that only has write access to the tty * obtain the privileges associated with having a tty as * controlling terminal (being able to reopen it with full * access through /dev/tty, being able to perform pushback). * Many distributions set the group of all ttys to "tty" and * grant write-only access to all terminals for setgid tty * binaries, which should not imply full privileges on all ttys. * * This could theoretically break old code that performs open() * on a write-only file descriptor. In that case, it might be * necessary to also permit this if * inode_permission(inode, MAY_READ) == 0. */ if (filp->f_mode & FMODE_READ) __proc_set_tty(tty); } spin_unlock_irq(&current->sighand->siglock); read_unlock(&tasklist_lock); tty_unlock(tty); return 0; err_unlock: mutex_unlock(&tty_mutex); err_unref: /* after locks to avoid deadlock */ if (!IS_ERR_OR_NULL(driver)) tty_driver_kref_put(driver); err_file: tty_free_file(filp); return retval; } /** * tty_poll - check tty status * @filp: file being polled * @wait: poll wait structures to update * * Call the line discipline polling method to obtain the poll * status of the device. * * Locking: locks called line discipline but ldisc poll method * may be re-entered freely by other callers. */ static unsigned int tty_poll(struct file *filp, poll_table *wait) { struct tty_struct *tty = file_tty(filp); struct tty_ldisc *ld; int ret = 0; if (tty_paranoia_check(tty, file_inode(filp), "tty_poll")) return 0; ld = tty_ldisc_ref_wait(tty); if (ld->ops->poll) ret = ld->ops->poll(tty, filp, wait); tty_ldisc_deref(ld); return ret; } static int __tty_fasync(int fd, struct file *filp, int on) { struct tty_struct *tty = file_tty(filp); struct tty_ldisc *ldisc; unsigned long flags; int retval = 0; if (tty_paranoia_check(tty, file_inode(filp), "tty_fasync")) goto out; retval = fasync_helper(fd, filp, on, &tty->fasync); if (retval <= 0) goto out; ldisc = tty_ldisc_ref(tty); if (ldisc) { if (ldisc->ops->fasync) ldisc->ops->fasync(tty, on); tty_ldisc_deref(ldisc); } if (on) { enum pid_type type; struct pid *pid; spin_lock_irqsave(&tty->ctrl_lock, flags); if (tty->pgrp) { pid = tty->pgrp; type = PIDTYPE_PGID; } else { pid = task_pid(current); type = PIDTYPE_PID; } get_pid(pid); spin_unlock_irqrestore(&tty->ctrl_lock, flags); __f_setown(filp, pid, type, 0); put_pid(pid); retval = 0; } out: return retval; } static int tty_fasync(int fd, struct file *filp, int on) { struct tty_struct *tty = file_tty(filp); int retval; tty_lock(tty); retval = __tty_fasync(fd, filp, on); tty_unlock(tty); return retval; } /** * tiocsti - fake input character * @tty: tty to fake input into * @p: pointer to character * * Fake input to a tty device. Does the necessary locking and * input management. * * FIXME: does not honour flow control ?? * * Locking: * Called functions take tty_ldiscs_lock * current->signal->tty check is safe without locks * * FIXME: may race normal receive processing */ static int tiocsti(struct tty_struct *tty, char __user *p) { char ch, mbz = 0; struct tty_ldisc *ld; if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(ch, p)) return -EFAULT; tty_audit_tiocsti(tty, ch); ld = tty_ldisc_ref_wait(tty); ld->ops->receive_buf(tty, &ch, &mbz, 1); tty_ldisc_deref(ld); return 0; } /** * tiocgwinsz - implement window query ioctl * @tty; tty * @arg: user buffer for result * * Copies the kernel idea of the window size into the user buffer. * * Locking: tty->winsize_mutex is taken to ensure the winsize data * is consistent. */ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg) { int err; mutex_lock(&tty->winsize_mutex); err = copy_to_user(arg, &tty->winsize, sizeof(*arg)); mutex_unlock(&tty->winsize_mutex); return err ? -EFAULT: 0; } /** * tty_do_resize - resize event * @tty: tty being resized * @rows: rows (character) * @cols: cols (character) * * Update the termios variables and send the necessary signals to * peform a terminal resize correctly */ int tty_do_resize(struct tty_struct *tty, struct winsize *ws) { struct pid *pgrp; /* Lock the tty */ mutex_lock(&tty->winsize_mutex); if (!memcmp(ws, &tty->winsize, sizeof(*ws))) goto done; /* Signal the foreground process group */ pgrp = tty_get_pgrp(tty); if (pgrp) kill_pgrp(pgrp, SIGWINCH, 1); put_pid(pgrp); tty->winsize = *ws; done: mutex_unlock(&tty->winsize_mutex); return 0; } EXPORT_SYMBOL(tty_do_resize); /** * tiocswinsz - implement window size set ioctl * @tty; tty side of tty * @arg: user buffer for result * * Copies the user idea of the window size to the kernel. Traditionally * this is just advisory information but for the Linux console it * actually has driver level meaning and triggers a VC resize. * * Locking: * Driver dependent. The default do_resize method takes the * tty termios mutex and ctrl_lock. The console takes its own lock * then calls into the default method. */ static int tiocswinsz(struct tty_struct *tty, struct winsize __user *arg) { struct winsize tmp_ws; if (copy_from_user(&tmp_ws, arg, sizeof(*arg))) return -EFAULT; if (tty->ops->resize) return tty->ops->resize(tty, &tmp_ws); else return tty_do_resize(tty, &tmp_ws); } /** * tioccons - allow admin to move logical console * @file: the file to become console * * Allow the administrator to move the redirected console device * * Locking: uses redirect_lock to guard the redirect information */ static int tioccons(struct file *file) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (file->f_op->write == redirected_tty_write) { struct file *f; spin_lock(&redirect_lock); f = redirect; redirect = NULL; spin_unlock(&redirect_lock); if (f) fput(f); return 0; } spin_lock(&redirect_lock); if (redirect) { spin_unlock(&redirect_lock); return -EBUSY; } redirect = get_file(file); spin_unlock(&redirect_lock); return 0; } /** * fionbio - non blocking ioctl * @file: file to set blocking value * @p: user parameter * * Historical tty interfaces had a blocking control ioctl before * the generic functionality existed. This piece of history is preserved * in the expected tty API of posix OS's. * * Locking: none, the open file handle ensures it won't go away. */ static int fionbio(struct file *file, int __user *p) { int nonblock; if (get_user(nonblock, p)) return -EFAULT; spin_lock(&file->f_lock); if (nonblock) file->f_flags |= O_NONBLOCK; else file->f_flags &= ~O_NONBLOCK; spin_unlock(&file->f_lock); return 0; } /** * tiocsctty - set controlling tty * @tty: tty structure * @arg: user argument * * This ioctl is used to manage job control. It permits a session * leader to set this tty as the controlling tty for the session. * * Locking: * Takes tty_lock() to serialize proc_set_tty() for this tty * Takes tasklist_lock internally to walk sessions * Takes ->siglock() when updating signal->tty */ static int tiocsctty(struct tty_struct *tty, struct file *file, int arg) { int ret = 0; tty_lock(tty); read_lock(&tasklist_lock); if (current->signal->leader && (task_session(current) == tty->session)) goto unlock; /* * The process must be a session leader and * not have a controlling tty already. */ if (!current->signal->leader || current->signal->tty) { ret = -EPERM; goto unlock; } if (tty->session) { /* * This tty is already the controlling * tty for another session group! */ if (arg == 1 && capable(CAP_SYS_ADMIN)) { /* * Steal it away */ session_clear_tty(tty->session); } else { ret = -EPERM; goto unlock; } } /* See the comment in tty_open(). */ if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) { ret = -EPERM; goto unlock; } proc_set_tty(tty); unlock: read_unlock(&tasklist_lock); tty_unlock(tty); return ret; } /** * tty_get_pgrp - return a ref counted pgrp pid * @tty: tty to read * * Returns a refcounted instance of the pid struct for the process * group controlling the tty. */ struct pid *tty_get_pgrp(struct tty_struct *tty) { unsigned long flags; struct pid *pgrp; spin_lock_irqsave(&tty->ctrl_lock, flags); pgrp = get_pid(tty->pgrp); spin_unlock_irqrestore(&tty->ctrl_lock, flags); return pgrp; } EXPORT_SYMBOL_GPL(tty_get_pgrp); /* * This checks not only the pgrp, but falls back on the pid if no * satisfactory pgrp is found. I dunno - gdb doesn't work correctly * without this... * * The caller must hold rcu lock or the tasklist lock. */ static struct pid *session_of_pgrp(struct pid *pgrp) { struct task_struct *p; struct pid *sid = NULL; p = pid_task(pgrp, PIDTYPE_PGID); if (p == NULL) p = pid_task(pgrp, PIDTYPE_PID); if (p != NULL) sid = task_session(p); return sid; } /** * tiocgpgrp - get process group * @tty: tty passed by user * @real_tty: tty side of the tty passed by the user if a pty else the tty * @p: returned pid * * Obtain the process group of the tty. If there is no process group * return an error. * * Locking: none. Reference to current->signal->tty is safe. */ static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) { struct pid *pid; int ret; /* * (tty == real_tty) is a cheap way of * testing if the tty is NOT a master pty. */ if (tty == real_tty && current->signal->tty != real_tty) return -ENOTTY; pid = tty_get_pgrp(real_tty); ret = put_user(pid_vnr(pid), p); put_pid(pid); return ret; } /** * tiocspgrp - attempt to set process group * @tty: tty passed by user * @real_tty: tty side device matching tty passed by user * @p: pid pointer * * Set the process group of the tty to the session passed. Only * permitted where the tty session is our session. * * Locking: RCU, ctrl lock */ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) { struct pid *pgrp; pid_t pgrp_nr; int retval = tty_check_change(real_tty); if (retval == -EIO) return -ENOTTY; if (retval) return retval; if (!current->signal->tty || (current->signal->tty != real_tty) || (real_tty->session != task_session(current))) return -ENOTTY; if (get_user(pgrp_nr, p)) return -EFAULT; if (pgrp_nr < 0) return -EINVAL; rcu_read_lock(); pgrp = find_vpid(pgrp_nr); retval = -ESRCH; if (!pgrp) goto out_unlock; retval = -EPERM; if (session_of_pgrp(pgrp) != task_session(current)) goto out_unlock; retval = 0; spin_lock_irq(&tty->ctrl_lock); put_pid(real_tty->pgrp); real_tty->pgrp = get_pid(pgrp); spin_unlock_irq(&tty->ctrl_lock); out_unlock: rcu_read_unlock(); return retval; } /** * tiocgsid - get session id * @tty: tty passed by user * @real_tty: tty side of the tty passed by the user if a pty else the tty * @p: pointer to returned session id * * Obtain the session id of the tty. If there is no session * return an error. * * Locking: none. Reference to current->signal->tty is safe. */ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) { /* * (tty == real_tty) is a cheap way of * testing if the tty is NOT a master pty. */ if (tty == real_tty && current->signal->tty != real_tty) return -ENOTTY; if (!real_tty->session) return -ENOTTY; return put_user(pid_vnr(real_tty->session), p); } /** * tiocsetd - set line discipline * @tty: tty device * @p: pointer to user data * * Set the line discipline according to user request. * * Locking: see tty_set_ldisc, this function is just a helper */ static int tiocsetd(struct tty_struct *tty, int __user *p) { int ldisc; int ret; if (get_user(ldisc, p)) return -EFAULT; ret = tty_set_ldisc(tty, ldisc); return ret; } /** * tiocgetd - get line discipline * @tty: tty device * @p: pointer to user data * * Retrieves the line discipline id directly from the ldisc. * * Locking: waits for ldisc reference (in case the line discipline * is changing or the tty is being hungup) */ static int tiocgetd(struct tty_struct *tty, int __user *p) { struct tty_ldisc *ld; int ret; ld = tty_ldisc_ref_wait(tty); ret = put_user(ld->ops->num, p); tty_ldisc_deref(ld); return ret; } /** * send_break - performed time break * @tty: device to break on * @duration: timeout in mS * * Perform a timed break on hardware that lacks its own driver level * timed break functionality. * * Locking: * atomic_write_lock serializes * */ static int send_break(struct tty_struct *tty, unsigned int duration) { int retval; if (tty->ops->break_ctl == NULL) return 0; if (tty->driver->flags & TTY_DRIVER_HARDWARE_BREAK) retval = tty->ops->break_ctl(tty, duration); else { /* Do the work ourselves */ if (tty_write_lock(tty, 0) < 0) return -EINTR; retval = tty->ops->break_ctl(tty, -1); if (retval) goto out; if (!signal_pending(current)) msleep_interruptible(duration); retval = tty->ops->break_ctl(tty, 0); out: tty_write_unlock(tty); if (signal_pending(current)) retval = -EINTR; } return retval; } /** * tty_tiocmget - get modem status * @tty: tty device * @file: user file pointer * @p: pointer to result * * Obtain the modem status bits from the tty driver if the feature * is supported. Return -EINVAL if it is not available. * * Locking: none (up to the driver) */ static int tty_tiocmget(struct tty_struct *tty, int __user *p) { int retval = -EINVAL; if (tty->ops->tiocmget) { retval = tty->ops->tiocmget(tty); if (retval >= 0) retval = put_user(retval, p); } return retval; } /** * tty_tiocmset - set modem status * @tty: tty device * @cmd: command - clear bits, set bits or set all * @p: pointer to desired bits * * Set the modem status bits from the tty driver if the feature * is supported. Return -EINVAL if it is not available. * * Locking: none (up to the driver) */ static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd, unsigned __user *p) { int retval; unsigned int set, clear, val; if (tty->ops->tiocmset == NULL) return -EINVAL; retval = get_user(val, p); if (retval) return retval; set = clear = 0; switch (cmd) { case TIOCMBIS: set = val; break; case TIOCMBIC: clear = val; break; case TIOCMSET: set = val; clear = ~val; break; } set &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP; clear &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP; return tty->ops->tiocmset(tty, set, clear); } static int tty_tiocgicount(struct tty_struct *tty, void __user *arg) { int retval = -EINVAL; struct serial_icounter_struct icount; memset(&icount, 0, sizeof(icount)); if (tty->ops->get_icount) retval = tty->ops->get_icount(tty, &icount); if (retval != 0) return retval; if (copy_to_user(arg, &icount, sizeof(icount))) return -EFAULT; return 0; } static void tty_warn_deprecated_flags(struct serial_struct __user *ss) { static DEFINE_RATELIMIT_STATE(depr_flags, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); char comm[TASK_COMM_LEN]; int flags; if (get_user(flags, &ss->flags)) return; flags &= ASYNC_DEPRECATED; if (flags && __ratelimit(&depr_flags)) pr_warning("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n", __func__, get_task_comm(comm, current), flags); } /* * if pty, return the slave side (real_tty) * otherwise, return self */ static struct tty_struct *tty_pair_get_tty(struct tty_struct *tty) { if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_MASTER) tty = tty->link; return tty; } /* * Split this up, as gcc can choke on it otherwise.. */ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct tty_struct *tty = file_tty(file); struct tty_struct *real_tty; void __user *p = (void __user *)arg; int retval; struct tty_ldisc *ld; if (tty_paranoia_check(tty, file_inode(file), "tty_ioctl")) return -EINVAL; real_tty = tty_pair_get_tty(tty); /* * Factor out some common prep work */ switch (cmd) { case TIOCSETD: case TIOCSBRK: case TIOCCBRK: case TCSBRK: case TCSBRKP: retval = tty_check_change(tty); if (retval) return retval; if (cmd != TIOCCBRK) { tty_wait_until_sent(tty, 0); if (signal_pending(current)) return -EINTR; } break; } /* * Now do the stuff. */ switch (cmd) { case TIOCSTI: return tiocsti(tty, p); case TIOCGWINSZ: return tiocgwinsz(real_tty, p); case TIOCSWINSZ: return tiocswinsz(real_tty, p); case TIOCCONS: return real_tty != tty ? -EINVAL : tioccons(file); case FIONBIO: return fionbio(file, p); case TIOCEXCL: set_bit(TTY_EXCLUSIVE, &tty->flags); return 0; case TIOCNXCL: clear_bit(TTY_EXCLUSIVE, &tty->flags); return 0; case TIOCGEXCL: { int excl = test_bit(TTY_EXCLUSIVE, &tty->flags); return put_user(excl, (int __user *)p); } case TIOCNOTTY: if (current->signal->tty != tty) return -ENOTTY; no_tty(); return 0; case TIOCSCTTY: return tiocsctty(real_tty, file, arg); case TIOCGPGRP: return tiocgpgrp(tty, real_tty, p); case TIOCSPGRP: return tiocspgrp(tty, real_tty, p); case TIOCGSID: return tiocgsid(tty, real_tty, p); case TIOCGETD: return tiocgetd(tty, p); case TIOCSETD: return tiocsetd(tty, p); case TIOCVHANGUP: if (!capable(CAP_SYS_ADMIN)) return -EPERM; tty_vhangup(tty); return 0; case TIOCGDEV: { unsigned int ret = new_encode_dev(tty_devnum(real_tty)); return put_user(ret, (unsigned int __user *)p); } /* * Break handling */ case TIOCSBRK: /* Turn break on, unconditionally */ if (tty->ops->break_ctl) return tty->ops->break_ctl(tty, -1); return 0; case TIOCCBRK: /* Turn break off, unconditionally */ if (tty->ops->break_ctl) return tty->ops->break_ctl(tty, 0); return 0; case TCSBRK: /* SVID version: non-zero arg --> no break */ /* non-zero arg means wait for all output data * to be sent (performed above) but don't send break. * This is used by the tcdrain() termios function. */ if (!arg) return send_break(tty, 250); return 0; case TCSBRKP: /* support for POSIX tcsendbreak() */ return send_break(tty, arg ? arg*100 : 250); case TIOCMGET: return tty_tiocmget(tty, p); case TIOCMSET: case TIOCMBIC: case TIOCMBIS: return tty_tiocmset(tty, cmd, p); case TIOCGICOUNT: retval = tty_tiocgicount(tty, p); /* For the moment allow fall through to the old method */ if (retval != -EINVAL) return retval; break; case TCFLSH: switch (arg) { case TCIFLUSH: case TCIOFLUSH: /* flush tty buffer and allow ldisc to process ioctl */ tty_buffer_flush(tty, NULL); break; } break; case TIOCSSERIAL: tty_warn_deprecated_flags(p); break; } if (tty->ops->ioctl) { retval = tty->ops->ioctl(tty, cmd, arg); if (retval != -ENOIOCTLCMD) return retval; } ld = tty_ldisc_ref_wait(tty); retval = -EINVAL; if (ld->ops->ioctl) { retval = ld->ops->ioctl(tty, file, cmd, arg); if (retval == -ENOIOCTLCMD) retval = -ENOTTY; } tty_ldisc_deref(ld); return retval; } #ifdef CONFIG_COMPAT static long tty_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct tty_struct *tty = file_tty(file); struct tty_ldisc *ld; int retval = -ENOIOCTLCMD; if (tty_paranoia_check(tty, file_inode(file), "tty_ioctl")) return -EINVAL; if (tty->ops->compat_ioctl) { retval = tty->ops->compat_ioctl(tty, cmd, arg); if (retval != -ENOIOCTLCMD) return retval; } ld = tty_ldisc_ref_wait(tty); if (ld->ops->compat_ioctl) retval = ld->ops->compat_ioctl(tty, file, cmd, arg); else retval = n_tty_compat_ioctl_helper(tty, file, cmd, arg); tty_ldisc_deref(ld); return retval; } #endif static int this_tty(const void *t, struct file *file, unsigned fd) { if (likely(file->f_op->read != tty_read)) return 0; return file_tty(file) != t ? 0 : fd + 1; } /* * This implements the "Secure Attention Key" --- the idea is to * prevent trojan horses by killing all processes associated with this * tty when the user hits the "Secure Attention Key". Required for * super-paranoid applications --- see the Orange Book for more details. * * This code could be nicer; ideally it should send a HUP, wait a few * seconds, then send a INT, and then a KILL signal. But you then * have to coordinate with the init process, since all processes associated * with the current tty must be dead before the new getty is allowed * to spawn. * * Now, if it would be correct ;-/ The current code has a nasty hole - * it doesn't catch files in flight. We may send the descriptor to ourselves * via AF_UNIX socket, close it and later fetch from socket. FIXME. * * Nasty bug: do_SAK is being called in interrupt context. This can * deadlock. We punt it up to process context. AKPM - 16Mar2001 */ void __do_SAK(struct tty_struct *tty) { #ifdef TTY_SOFT_SAK tty_hangup(tty); #else struct task_struct *g, *p; struct pid *session; int i; if (!tty) return; session = tty->session; tty_ldisc_flush(tty); tty_driver_flush_buffer(tty); read_lock(&tasklist_lock); /* Kill the entire session */ do_each_pid_task(session, PIDTYPE_SID, p) { tty_notice(tty, "SAK: killed process %d (%s): by session\n", task_pid_nr(p), p->comm); send_sig(SIGKILL, p, 1); } while_each_pid_task(session, PIDTYPE_SID, p); /* Now kill any processes that happen to have the tty open */ do_each_thread(g, p) { if (p->signal->tty == tty) { tty_notice(tty, "SAK: killed process %d (%s): by controlling tty\n", task_pid_nr(p), p->comm); send_sig(SIGKILL, p, 1); continue; } task_lock(p); i = iterate_fd(p->files, 0, this_tty, tty); if (i != 0) { tty_notice(tty, "SAK: killed process %d (%s): by fd#%d\n", task_pid_nr(p), p->comm, i - 1); force_sig(SIGKILL, p); } task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); #endif } static void do_SAK_work(struct work_struct *work) { struct tty_struct *tty = container_of(work, struct tty_struct, SAK_work); __do_SAK(tty); } /* * The tq handling here is a little racy - tty->SAK_work may already be queued. * Fortunately we don't need to worry, because if ->SAK_work is already queued, * the values which we write to it will be identical to the values which it * already has. --akpm */ void do_SAK(struct tty_struct *tty) { if (!tty) return; schedule_work(&tty->SAK_work); } EXPORT_SYMBOL(do_SAK); static int dev_match_devt(struct device *dev, const void *data) { const dev_t *devt = data; return dev->devt == *devt; } /* Must put_device() after it's unused! */ static struct device *tty_get_device(struct tty_struct *tty) { dev_t devt = tty_devnum(tty); return class_find_device(tty_class, NULL, &devt, dev_match_devt); } /** * alloc_tty_struct * * This subroutine allocates and initializes a tty structure. * * Locking: none - tty in question is not exposed at this point */ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx) { struct tty_struct *tty; tty = kzalloc(sizeof(*tty), GFP_KERNEL); if (!tty) return NULL; kref_init(&tty->kref); tty->magic = TTY_MAGIC; tty_ldisc_init(tty); tty->session = NULL; tty->pgrp = NULL; mutex_init(&tty->legacy_mutex); mutex_init(&tty->throttle_mutex); init_rwsem(&tty->termios_rwsem); mutex_init(&tty->winsize_mutex); init_ldsem(&tty->ldisc_sem); init_waitqueue_head(&tty->write_wait); init_waitqueue_head(&tty->read_wait); INIT_WORK(&tty->hangup_work, do_tty_hangup); mutex_init(&tty->atomic_write_lock); spin_lock_init(&tty->ctrl_lock); spin_lock_init(&tty->flow_lock); INIT_LIST_HEAD(&tty->tty_files); INIT_WORK(&tty->SAK_work, do_SAK_work); tty->driver = driver; tty->ops = driver->ops; tty->index = idx; tty_line_name(driver, idx, tty->name); tty->dev = tty_get_device(tty); return tty; } /** * deinitialize_tty_struct * @tty: tty to deinitialize * * This subroutine deinitializes a tty structure that has been newly * allocated but tty_release cannot be called on that yet. * * Locking: none - tty in question must not be exposed at this point */ void deinitialize_tty_struct(struct tty_struct *tty) { tty_ldisc_deinit(tty); } /** * tty_put_char - write one character to a tty * @tty: tty * @ch: character * * Write one byte to the tty using the provided put_char method * if present. Returns the number of characters successfully output. * * Note: the specific put_char operation in the driver layer may go * away soon. Don't call it directly, use this method */ int tty_put_char(struct tty_struct *tty, unsigned char ch) { if (tty->ops->put_char) return tty->ops->put_char(tty, ch); return tty->ops->write(tty, &ch, 1); } EXPORT_SYMBOL_GPL(tty_put_char); struct class *tty_class; static int tty_cdev_add(struct tty_driver *driver, dev_t dev, unsigned int index, unsigned int count) { int err; /* init here, since reused cdevs cause crashes */ driver->cdevs[index] = cdev_alloc(); if (!driver->cdevs[index]) return -ENOMEM; driver->cdevs[index]->ops = &tty_fops; driver->cdevs[index]->owner = driver->owner; err = cdev_add(driver->cdevs[index], dev, count); if (err) kobject_put(&driver->cdevs[index]->kobj); return err; } /** * tty_register_device - register a tty device * @driver: the tty driver that describes the tty device * @index: the index in the tty driver for this tty device * @device: a struct device that is associated with this tty device. * This field is optional, if there is no known struct device * for this tty device it can be set to NULL safely. * * Returns a pointer to the struct device for this tty device * (or ERR_PTR(-EFOO) on error). * * This call is required to be made to register an individual tty device * if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If * that bit is not set, this function should not be called by a tty * driver. * * Locking: ?? */ struct device *tty_register_device(struct tty_driver *driver, unsigned index, struct device *device) { return tty_register_device_attr(driver, index, device, NULL, NULL); } EXPORT_SYMBOL(tty_register_device); static void tty_device_create_release(struct device *dev) { dev_dbg(dev, "releasing...\n"); kfree(dev); } /** * tty_register_device_attr - register a tty device * @driver: the tty driver that describes the tty device * @index: the index in the tty driver for this tty device * @device: a struct device that is associated with this tty device. * This field is optional, if there is no known struct device * for this tty device it can be set to NULL safely. * @drvdata: Driver data to be set to device. * @attr_grp: Attribute group to be set on device. * * Returns a pointer to the struct device for this tty device * (or ERR_PTR(-EFOO) on error). * * This call is required to be made to register an individual tty device * if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If * that bit is not set, this function should not be called by a tty * driver. * * Locking: ?? */ struct device *tty_register_device_attr(struct tty_driver *driver, unsigned index, struct device *device, void *drvdata, const struct attribute_group **attr_grp) { char name[64]; dev_t devt = MKDEV(driver->major, driver->minor_start) + index; struct device *dev = NULL; int retval = -ENODEV; bool cdev = false; if (index >= driver->num) { pr_err("%s: Attempt to register invalid tty line number (%d)\n", driver->name, index); return ERR_PTR(-EINVAL); } if (driver->type == TTY_DRIVER_TYPE_PTY) pty_line_name(driver, index, name); else tty_line_name(driver, index, name); if (!(driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)) { retval = tty_cdev_add(driver, devt, index, 1); if (retval) goto error; cdev = true; } dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { retval = -ENOMEM; goto error; } dev->devt = devt; dev->class = tty_class; dev->parent = device; dev->release = tty_device_create_release; dev_set_name(dev, "%s", name); dev->groups = attr_grp; dev_set_drvdata(dev, drvdata); retval = device_register(dev); if (retval) goto error; return dev; error: put_device(dev); if (cdev) { cdev_del(driver->cdevs[index]); driver->cdevs[index] = NULL; } return ERR_PTR(retval); } EXPORT_SYMBOL_GPL(tty_register_device_attr); /** * tty_unregister_device - unregister a tty device * @driver: the tty driver that describes the tty device * @index: the index in the tty driver for this tty device * * If a tty device is registered with a call to tty_register_device() then * this function must be called when the tty device is gone. * * Locking: ?? */ void tty_unregister_device(struct tty_driver *driver, unsigned index) { device_destroy(tty_class, MKDEV(driver->major, driver->minor_start) + index); if (!(driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)) { cdev_del(driver->cdevs[index]); driver->cdevs[index] = NULL; } } EXPORT_SYMBOL(tty_unregister_device); /** * __tty_alloc_driver -- allocate tty driver * @lines: count of lines this driver can handle at most * @owner: module which is repsonsible for this driver * @flags: some of TTY_DRIVER_* flags, will be set in driver->flags * * This should not be called directly, some of the provided macros should be * used instead. Use IS_ERR and friends on @retval. */ struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner, unsigned long flags) { struct tty_driver *driver; unsigned int cdevs = 1; int err; if (!lines || (flags & TTY_DRIVER_UNNUMBERED_NODE && lines > 1)) return ERR_PTR(-EINVAL); driver = kzalloc(sizeof(struct tty_driver), GFP_KERNEL); if (!driver) return ERR_PTR(-ENOMEM); kref_init(&driver->kref); driver->magic = TTY_DRIVER_MAGIC; driver->num = lines; driver->owner = owner; driver->flags = flags; if (!(flags & TTY_DRIVER_DEVPTS_MEM)) { driver->ttys = kcalloc(lines, sizeof(*driver->ttys), GFP_KERNEL); driver->termios = kcalloc(lines, sizeof(*driver->termios), GFP_KERNEL); if (!driver->ttys || !driver->termios) { err = -ENOMEM; goto err_free_all; } } if (!(flags & TTY_DRIVER_DYNAMIC_ALLOC)) { driver->ports = kcalloc(lines, sizeof(*driver->ports), GFP_KERNEL); if (!driver->ports) { err = -ENOMEM; goto err_free_all; } cdevs = lines; } driver->cdevs = kcalloc(cdevs, sizeof(*driver->cdevs), GFP_KERNEL); if (!driver->cdevs) { err = -ENOMEM; goto err_free_all; } return driver; err_free_all: kfree(driver->ports); kfree(driver->ttys); kfree(driver->termios); kfree(driver->cdevs); kfree(driver); return ERR_PTR(err); } EXPORT_SYMBOL(__tty_alloc_driver); static void destruct_tty_driver(struct kref *kref) { struct tty_driver *driver = container_of(kref, struct tty_driver, kref); int i; struct ktermios *tp; if (driver->flags & TTY_DRIVER_INSTALLED) { /* * Free the termios and termios_locked structures because * we don't want to get memory leaks when modular tty * drivers are removed from the kernel. */ for (i = 0; i < driver->num; i++) { tp = driver->termios[i]; if (tp) { driver->termios[i] = NULL; kfree(tp); } if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV)) tty_unregister_device(driver, i); } proc_tty_unregister_driver(driver); if (driver->flags & TTY_DRIVER_DYNAMIC_ALLOC) cdev_del(driver->cdevs[0]); } kfree(driver->cdevs); kfree(driver->ports); kfree(driver->termios); kfree(driver->ttys); kfree(driver); } void tty_driver_kref_put(struct tty_driver *driver) { kref_put(&driver->kref, destruct_tty_driver); } EXPORT_SYMBOL(tty_driver_kref_put); void tty_set_operations(struct tty_driver *driver, const struct tty_operations *op) { driver->ops = op; }; EXPORT_SYMBOL(tty_set_operations); void put_tty_driver(struct tty_driver *d) { tty_driver_kref_put(d); } EXPORT_SYMBOL(put_tty_driver); /* * Called by a tty driver to register itself. */ int tty_register_driver(struct tty_driver *driver) { int error; int i; dev_t dev; struct device *d; if (!driver->major) { error = alloc_chrdev_region(&dev, driver->minor_start, driver->num, driver->name); if (!error) { driver->major = MAJOR(dev); driver->minor_start = MINOR(dev); } } else { dev = MKDEV(driver->major, driver->minor_start); error = register_chrdev_region(dev, driver->num, driver->name); } if (error < 0) goto err; if (driver->flags & TTY_DRIVER_DYNAMIC_ALLOC) { error = tty_cdev_add(driver, dev, 0, driver->num); if (error) goto err_unreg_char; } mutex_lock(&tty_mutex); list_add(&driver->tty_drivers, &tty_drivers); mutex_unlock(&tty_mutex); if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV)) { for (i = 0; i < driver->num; i++) { d = tty_register_device(driver, i, NULL); if (IS_ERR(d)) { error = PTR_ERR(d); goto err_unreg_devs; } } } proc_tty_register_driver(driver); driver->flags |= TTY_DRIVER_INSTALLED; return 0; err_unreg_devs: for (i--; i >= 0; i--) tty_unregister_device(driver, i); mutex_lock(&tty_mutex); list_del(&driver->tty_drivers); mutex_unlock(&tty_mutex); err_unreg_char: unregister_chrdev_region(dev, driver->num); err: return error; } EXPORT_SYMBOL(tty_register_driver); /* * Called by a tty driver to unregister itself. */ int tty_unregister_driver(struct tty_driver *driver) { #if 0 /* FIXME */ if (driver->refcount) return -EBUSY; #endif unregister_chrdev_region(MKDEV(driver->major, driver->minor_start), driver->num); mutex_lock(&tty_mutex); list_del(&driver->tty_drivers); mutex_unlock(&tty_mutex); return 0; } EXPORT_SYMBOL(tty_unregister_driver); dev_t tty_devnum(struct tty_struct *tty) { return MKDEV(tty->driver->major, tty->driver->minor_start) + tty->index; } EXPORT_SYMBOL(tty_devnum); void tty_default_fops(struct file_operations *fops) { *fops = tty_fops; } /* * Initialize the console device. This is called *early*, so * we can't necessarily depend on lots of kernel help here. * Just do some early initializations, and do the complex setup * later. */ void __init console_init(void) { initcall_t *call; /* Setup the default TTY line discipline. */ tty_ldisc_begin(); /* * set up the console device so that later boot sequences can * inform about problems etc.. */ call = __con_initcall_start; while (call < __con_initcall_end) { (*call)(); call++; } } static char *tty_devnode(struct device *dev, umode_t *mode) { if (!mode) return NULL; if (dev->devt == MKDEV(TTYAUX_MAJOR, 0) || dev->devt == MKDEV(TTYAUX_MAJOR, 2)) *mode = 0666; return NULL; } static int __init tty_class_init(void) { tty_class = class_create(THIS_MODULE, "tty"); if (IS_ERR(tty_class)) return PTR_ERR(tty_class); tty_class->devnode = tty_devnode; return 0; } postcore_initcall(tty_class_init); /* 3/2004 jmc: why do these devices exist? */ static struct cdev tty_cdev, console_cdev; static ssize_t show_cons_active(struct device *dev, struct device_attribute *attr, char *buf) { struct console *cs[16]; int i = 0; struct console *c; ssize_t count = 0; console_lock(); for_each_console(c) { if (!c->device) continue; if (!c->write) continue; if ((c->flags & CON_ENABLED) == 0) continue; cs[i++] = c; if (i >= ARRAY_SIZE(cs)) break; } while (i--) { int index = cs[i]->index; struct tty_driver *drv = cs[i]->device(cs[i], &index); /* don't resolve tty0 as some programs depend on it */ if (drv && (cs[i]->index > 0 || drv->major != TTY_MAJOR)) count += tty_line_name(drv, index, buf + count); else count += sprintf(buf + count, "%s%d", cs[i]->name, cs[i]->index); count += sprintf(buf + count, "%c", i ? ' ':'\n'); } console_unlock(); return count; } static DEVICE_ATTR(active, S_IRUGO, show_cons_active, NULL); static struct attribute *cons_dev_attrs[] = { &dev_attr_active.attr, NULL }; ATTRIBUTE_GROUPS(cons_dev); static struct device *consdev; void console_sysfs_notify(void) { if (consdev) sysfs_notify(&consdev->kobj, NULL, "active"); } /* * Ok, now we can initialize the rest of the tty devices and can count * on memory allocations, interrupts etc.. */ int __init tty_init(void) { cdev_init(&tty_cdev, &tty_fops); if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) || register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0) panic("Couldn't register /dev/tty driver\n"); device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 0), NULL, "tty"); cdev_init(&console_cdev, &console_fops); if (cdev_add(&console_cdev, MKDEV(TTYAUX_MAJOR, 1), 1) || register_chrdev_region(MKDEV(TTYAUX_MAJOR, 1), 1, "/dev/console") < 0) panic("Couldn't register /dev/console driver\n"); consdev = device_create_with_groups(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 1), NULL, cons_dev_groups, "console"); if (IS_ERR(consdev)) consdev = NULL; #ifdef CONFIG_VT vty_init(&console_fops); #endif return 0; }
./CrossVul/dataset_final_sorted/CWE-362/c/good_4741_0
crossvul-cpp_data_bad_3014_0
/* * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ #include "dm-core.h" #include "dm-rq.h" #include "dm-uevent.h" #include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/sched/signal.h> #include <linux/blkpg.h> #include <linux/bio.h> #include <linux/mempool.h> #include <linux/dax.h> #include <linux/slab.h> #include <linux/idr.h> #include <linux/uio.h> #include <linux/hdreg.h> #include <linux/delay.h> #include <linux/wait.h> #include <linux/pr.h> #include <linux/refcount.h> #define DM_MSG_PREFIX "core" /* * Cookies are numeric values sent with CHANGE and REMOVE * uevents while resuming, removing or renaming the device. */ #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" #define DM_COOKIE_LENGTH 24 static const char *_name = DM_NAME; static unsigned int major = 0; static unsigned int _major = 0; static DEFINE_IDR(_minor_idr); static DEFINE_SPINLOCK(_minor_lock); static void do_deferred_remove(struct work_struct *w); static DECLARE_WORK(deferred_remove_work, do_deferred_remove); static struct workqueue_struct *deferred_remove_workqueue; atomic_t dm_global_event_nr = ATOMIC_INIT(0); DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); void dm_issue_global_event(void) { atomic_inc(&dm_global_event_nr); wake_up(&dm_global_eventq); } /* * One of these is allocated per bio. */ struct dm_io { struct mapped_device *md; blk_status_t status; atomic_t io_count; struct bio *bio; unsigned long start_time; spinlock_t endio_lock; struct dm_stats_aux stats_aux; }; #define MINOR_ALLOCED ((void *)-1) /* * Bits for the md->flags field. */ #define DMF_BLOCK_IO_FOR_SUSPEND 0 #define DMF_SUSPENDED 1 #define DMF_FROZEN 2 #define DMF_FREEING 3 #define DMF_DELETING 4 #define DMF_NOFLUSH_SUSPENDING 5 #define DMF_DEFERRED_REMOVE 6 #define DMF_SUSPENDED_INTERNALLY 7 #define DM_NUMA_NODE NUMA_NO_NODE static int dm_numa_node = DM_NUMA_NODE; /* * For mempools pre-allocation at the table loading time. */ struct dm_md_mempools { mempool_t *io_pool; struct bio_set *bs; }; struct table_device { struct list_head list; refcount_t count; struct dm_dev dm_dev; }; static struct kmem_cache *_io_cache; static struct kmem_cache *_rq_tio_cache; static struct kmem_cache *_rq_cache; /* * Bio-based DM's mempools' reserved IOs set by the user. */ #define RESERVED_BIO_BASED_IOS 16 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; static int __dm_get_module_param_int(int *module_param, int min, int max) { int param = ACCESS_ONCE(*module_param); int modified_param = 0; bool modified = true; if (param < min) modified_param = min; else if (param > max) modified_param = max; else modified = false; if (modified) { (void)cmpxchg(module_param, param, modified_param); param = modified_param; } return param; } unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max) { unsigned param = ACCESS_ONCE(*module_param); unsigned modified_param = 0; if (!param) modified_param = def; else if (param > max) modified_param = max; if (modified_param) { (void)cmpxchg(module_param, param, modified_param); param = modified_param; } return param; } unsigned dm_get_reserved_bio_based_ios(void) { return __dm_get_module_param(&reserved_bio_based_ios, RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); } EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); static unsigned dm_get_numa_node(void) { return __dm_get_module_param_int(&dm_numa_node, DM_NUMA_NODE, num_online_nodes() - 1); } static int __init local_init(void) { int r = -ENOMEM; /* allocate a slab for the dm_ios */ _io_cache = KMEM_CACHE(dm_io, 0); if (!_io_cache) return r; _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); if (!_rq_tio_cache) goto out_free_io_cache; _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request), __alignof__(struct request), 0, NULL); if (!_rq_cache) goto out_free_rq_tio_cache; r = dm_uevent_init(); if (r) goto out_free_rq_cache; deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); if (!deferred_remove_workqueue) { r = -ENOMEM; goto out_uevent_exit; } _major = major; r = register_blkdev(_major, _name); if (r < 0) goto out_free_workqueue; if (!_major) _major = r; return 0; out_free_workqueue: destroy_workqueue(deferred_remove_workqueue); out_uevent_exit: dm_uevent_exit(); out_free_rq_cache: kmem_cache_destroy(_rq_cache); out_free_rq_tio_cache: kmem_cache_destroy(_rq_tio_cache); out_free_io_cache: kmem_cache_destroy(_io_cache); return r; } static void local_exit(void) { flush_scheduled_work(); destroy_workqueue(deferred_remove_workqueue); kmem_cache_destroy(_rq_cache); kmem_cache_destroy(_rq_tio_cache); kmem_cache_destroy(_io_cache); unregister_blkdev(_major, _name); dm_uevent_exit(); _major = 0; DMINFO("cleaned up"); } static int (*_inits[])(void) __initdata = { local_init, dm_target_init, dm_linear_init, dm_stripe_init, dm_io_init, dm_kcopyd_init, dm_interface_init, dm_statistics_init, }; static void (*_exits[])(void) = { local_exit, dm_target_exit, dm_linear_exit, dm_stripe_exit, dm_io_exit, dm_kcopyd_exit, dm_interface_exit, dm_statistics_exit, }; static int __init dm_init(void) { const int count = ARRAY_SIZE(_inits); int r, i; for (i = 0; i < count; i++) { r = _inits[i](); if (r) goto bad; } return 0; bad: while (i--) _exits[i](); return r; } static void __exit dm_exit(void) { int i = ARRAY_SIZE(_exits); while (i--) _exits[i](); /* * Should be empty by this point. */ idr_destroy(&_minor_idr); } /* * Block device functions */ int dm_deleting_md(struct mapped_device *md) { return test_bit(DMF_DELETING, &md->flags); } static int dm_blk_open(struct block_device *bdev, fmode_t mode) { struct mapped_device *md; spin_lock(&_minor_lock); md = bdev->bd_disk->private_data; if (!md) goto out; if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { md = NULL; goto out; } dm_get(md); atomic_inc(&md->open_count); out: spin_unlock(&_minor_lock); return md ? 0 : -ENXIO; } static void dm_blk_close(struct gendisk *disk, fmode_t mode) { struct mapped_device *md; spin_lock(&_minor_lock); md = disk->private_data; if (WARN_ON(!md)) goto out; if (atomic_dec_and_test(&md->open_count) && (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) queue_work(deferred_remove_workqueue, &deferred_remove_work); dm_put(md); out: spin_unlock(&_minor_lock); } int dm_open_count(struct mapped_device *md) { return atomic_read(&md->open_count); } /* * Guarantees nothing is using the device before it's deleted. */ int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) { int r = 0; spin_lock(&_minor_lock); if (dm_open_count(md)) { r = -EBUSY; if (mark_deferred) set_bit(DMF_DEFERRED_REMOVE, &md->flags); } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) r = -EEXIST; else set_bit(DMF_DELETING, &md->flags); spin_unlock(&_minor_lock); return r; } int dm_cancel_deferred_remove(struct mapped_device *md) { int r = 0; spin_lock(&_minor_lock); if (test_bit(DMF_DELETING, &md->flags)) r = -EBUSY; else clear_bit(DMF_DEFERRED_REMOVE, &md->flags); spin_unlock(&_minor_lock); return r; } static void do_deferred_remove(struct work_struct *w) { dm_deferred_remove(); } sector_t dm_get_size(struct mapped_device *md) { return get_capacity(md->disk); } struct request_queue *dm_get_md_queue(struct mapped_device *md) { return md->queue; } struct dm_stats *dm_get_stats(struct mapped_device *md) { return &md->stats; } static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct mapped_device *md = bdev->bd_disk->private_data; return dm_get_geometry(md, geo); } static int dm_grab_bdev_for_ioctl(struct mapped_device *md, struct block_device **bdev, fmode_t *mode) { struct dm_target *tgt; struct dm_table *map; int srcu_idx, r; retry: r = -ENOTTY; map = dm_get_live_table(md, &srcu_idx); if (!map || !dm_table_get_size(map)) goto out; /* We only support devices that have a single target */ if (dm_table_get_num_targets(map) != 1) goto out; tgt = dm_table_get_target(map, 0); if (!tgt->type->prepare_ioctl) goto out; if (dm_suspended_md(md)) { r = -EAGAIN; goto out; } r = tgt->type->prepare_ioctl(tgt, bdev, mode); if (r < 0) goto out; bdgrab(*bdev); dm_put_live_table(md, srcu_idx); return r; out: dm_put_live_table(md, srcu_idx); if (r == -ENOTCONN && !fatal_signal_pending(current)) { msleep(10); goto retry; } return r; } static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct mapped_device *md = bdev->bd_disk->private_data; int r; r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); if (r < 0) return r; if (r > 0) { /* * Target determined this ioctl is being issued against a * subset of the parent bdev; require extra privileges. */ if (!capable(CAP_SYS_RAWIO)) { DMWARN_LIMIT( "%s: sending ioctl %x to DM device without required privilege.", current->comm, cmd); r = -ENOIOCTLCMD; goto out; } } r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); out: bdput(bdev); return r; } static struct dm_io *alloc_io(struct mapped_device *md) { return mempool_alloc(md->io_pool, GFP_NOIO); } static void free_io(struct mapped_device *md, struct dm_io *io) { mempool_free(io, md->io_pool); } static void free_tio(struct dm_target_io *tio) { bio_put(&tio->clone); } int md_in_flight(struct mapped_device *md) { return atomic_read(&md->pending[READ]) + atomic_read(&md->pending[WRITE]); } static void start_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; struct bio *bio = io->bio; int cpu; int rw = bio_data_dir(bio); io->start_time = jiffies; cpu = part_stat_lock(); part_round_stats(md->queue, cpu, &dm_disk(md)->part0); part_stat_unlock(); atomic_set(&dm_disk(md)->part0.in_flight[rw], atomic_inc_return(&md->pending[rw])); if (unlikely(dm_stats_used(&md->stats))) dm_stats_account_io(&md->stats, bio_data_dir(bio), bio->bi_iter.bi_sector, bio_sectors(bio), false, 0, &io->stats_aux); } static void end_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; struct bio *bio = io->bio; unsigned long duration = jiffies - io->start_time; int pending; int rw = bio_data_dir(bio); generic_end_io_acct(md->queue, rw, &dm_disk(md)->part0, io->start_time); if (unlikely(dm_stats_used(&md->stats))) dm_stats_account_io(&md->stats, bio_data_dir(bio), bio->bi_iter.bi_sector, bio_sectors(bio), true, duration, &io->stats_aux); /* * After this is decremented the bio must not be touched if it is * a flush. */ pending = atomic_dec_return(&md->pending[rw]); atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); pending += atomic_read(&md->pending[rw^0x1]); /* nudge anyone waiting on suspend queue */ if (!pending) wake_up(&md->wait); } /* * Add the bio to the list of deferred io. */ static void queue_io(struct mapped_device *md, struct bio *bio) { unsigned long flags; spin_lock_irqsave(&md->deferred_lock, flags); bio_list_add(&md->deferred, bio); spin_unlock_irqrestore(&md->deferred_lock, flags); queue_work(md->wq, &md->work); } /* * Everyone (including functions in this file), should use this * function to access the md->map field, and make sure they call * dm_put_live_table() when finished. */ struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) { *srcu_idx = srcu_read_lock(&md->io_barrier); return srcu_dereference(md->map, &md->io_barrier); } void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) { srcu_read_unlock(&md->io_barrier, srcu_idx); } void dm_sync_table(struct mapped_device *md) { synchronize_srcu(&md->io_barrier); synchronize_rcu_expedited(); } /* * A fast alternative to dm_get_live_table/dm_put_live_table. * The caller must not block between these two functions. */ static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) { rcu_read_lock(); return rcu_dereference(md->map); } static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) { rcu_read_unlock(); } /* * Open a table device so we can use it as a map destination. */ static int open_table_device(struct table_device *td, dev_t dev, struct mapped_device *md) { static char *_claim_ptr = "I belong to device-mapper"; struct block_device *bdev; int r; BUG_ON(td->dm_dev.bdev); bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); if (IS_ERR(bdev)) return PTR_ERR(bdev); r = bd_link_disk_holder(bdev, dm_disk(md)); if (r) { blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); return r; } td->dm_dev.bdev = bdev; td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); return 0; } /* * Close a table device that we've been using. */ static void close_table_device(struct table_device *td, struct mapped_device *md) { if (!td->dm_dev.bdev) return; bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); put_dax(td->dm_dev.dax_dev); td->dm_dev.bdev = NULL; td->dm_dev.dax_dev = NULL; } static struct table_device *find_table_device(struct list_head *l, dev_t dev, fmode_t mode) { struct table_device *td; list_for_each_entry(td, l, list) if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) return td; return NULL; } int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, struct dm_dev **result) { int r; struct table_device *td; mutex_lock(&md->table_devices_lock); td = find_table_device(&md->table_devices, dev, mode); if (!td) { td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); if (!td) { mutex_unlock(&md->table_devices_lock); return -ENOMEM; } td->dm_dev.mode = mode; td->dm_dev.bdev = NULL; if ((r = open_table_device(td, dev, md))) { mutex_unlock(&md->table_devices_lock); kfree(td); return r; } format_dev_t(td->dm_dev.name, dev); refcount_set(&td->count, 1); list_add(&td->list, &md->table_devices); } else { refcount_inc(&td->count); } mutex_unlock(&md->table_devices_lock); *result = &td->dm_dev; return 0; } EXPORT_SYMBOL_GPL(dm_get_table_device); void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) { struct table_device *td = container_of(d, struct table_device, dm_dev); mutex_lock(&md->table_devices_lock); if (refcount_dec_and_test(&td->count)) { close_table_device(td, md); list_del(&td->list); kfree(td); } mutex_unlock(&md->table_devices_lock); } EXPORT_SYMBOL(dm_put_table_device); static void free_table_devices(struct list_head *devices) { struct list_head *tmp, *next; list_for_each_safe(tmp, next, devices) { struct table_device *td = list_entry(tmp, struct table_device, list); DMWARN("dm_destroy: %s still exists with %d references", td->dm_dev.name, refcount_read(&td->count)); kfree(td); } } /* * Get the geometry associated with a dm device */ int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) { *geo = md->geometry; return 0; } /* * Set the geometry of a device. */ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) { sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; if (geo->start > sz) { DMWARN("Start sector is beyond the geometry limits."); return -EINVAL; } md->geometry = *geo; return 0; } /*----------------------------------------------------------------- * CRUD START: * A more elegant soln is in the works that uses the queue * merge fn, unfortunately there are a couple of changes to * the block layer that I want to make for this. So in the * interests of getting something for people to use I give * you this clearly demarcated crap. *---------------------------------------------------------------*/ static int __noflush_suspending(struct mapped_device *md) { return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); } /* * Decrements the number of outstanding ios that a bio has been * cloned into, completing the original io if necc. */ static void dec_pending(struct dm_io *io, blk_status_t error) { unsigned long flags; blk_status_t io_error; struct bio *bio; struct mapped_device *md = io->md; /* Push-back supersedes any I/O errors */ if (unlikely(error)) { spin_lock_irqsave(&io->endio_lock, flags); if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) io->status = error; spin_unlock_irqrestore(&io->endio_lock, flags); } if (atomic_dec_and_test(&io->io_count)) { if (io->status == BLK_STS_DM_REQUEUE) { /* * Target requested pushing back the I/O. */ spin_lock_irqsave(&md->deferred_lock, flags); if (__noflush_suspending(md)) bio_list_add_head(&md->deferred, io->bio); else /* noflush suspend was interrupted. */ io->status = BLK_STS_IOERR; spin_unlock_irqrestore(&md->deferred_lock, flags); } io_error = io->status; bio = io->bio; end_io_acct(io); free_io(md, io); if (io_error == BLK_STS_DM_REQUEUE) return; if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { /* * Preflush done for flush with data, reissue * without REQ_PREFLUSH. */ bio->bi_opf &= ~REQ_PREFLUSH; queue_io(md, bio); } else { /* done with normal IO or empty flush */ bio->bi_status = io_error; bio_endio(bio); } } } void disable_write_same(struct mapped_device *md) { struct queue_limits *limits = dm_get_queue_limits(md); /* device doesn't really support WRITE SAME, disable it */ limits->max_write_same_sectors = 0; } void disable_write_zeroes(struct mapped_device *md) { struct queue_limits *limits = dm_get_queue_limits(md); /* device doesn't really support WRITE ZEROES, disable it */ limits->max_write_zeroes_sectors = 0; } static void clone_endio(struct bio *bio) { blk_status_t error = bio->bi_status; struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); struct dm_io *io = tio->io; struct mapped_device *md = tio->io->md; dm_endio_fn endio = tio->ti->type->end_io; if (unlikely(error == BLK_STS_TARGET)) { if (bio_op(bio) == REQ_OP_WRITE_SAME && !bio->bi_disk->queue->limits.max_write_same_sectors) disable_write_same(md); if (bio_op(bio) == REQ_OP_WRITE_ZEROES && !bio->bi_disk->queue->limits.max_write_zeroes_sectors) disable_write_zeroes(md); } if (endio) { int r = endio(tio->ti, bio, &error); switch (r) { case DM_ENDIO_REQUEUE: error = BLK_STS_DM_REQUEUE; /*FALLTHRU*/ case DM_ENDIO_DONE: break; case DM_ENDIO_INCOMPLETE: /* The target will handle the io */ return; default: DMWARN("unimplemented target endio return value: %d", r); BUG(); } } free_tio(tio); dec_pending(io, error); } /* * Return maximum size of I/O possible at the supplied sector up to the current * target boundary. */ static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) { sector_t target_offset = dm_target_offset(ti, sector); return ti->len - target_offset; } static sector_t max_io_len(sector_t sector, struct dm_target *ti) { sector_t len = max_io_len_target_boundary(sector, ti); sector_t offset, max_len; /* * Does the target need to split even further? */ if (ti->max_io_len) { offset = dm_target_offset(ti, sector); if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) max_len = sector_div(offset, ti->max_io_len); else max_len = offset & (ti->max_io_len - 1); max_len = ti->max_io_len - max_len; if (len > max_len) len = max_len; } return len; } int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) { if (len > UINT_MAX) { DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", (unsigned long long)len, UINT_MAX); ti->error = "Maximum size of target IO is too large"; return -EINVAL; } ti->max_io_len = (uint32_t) len; return 0; } EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, sector_t sector, int *srcu_idx) { struct dm_table *map; struct dm_target *ti; map = dm_get_live_table(md, srcu_idx); if (!map) return NULL; ti = dm_table_find_target(map, sector); if (!dm_target_is_valid(ti)) return NULL; return ti; } static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) { struct mapped_device *md = dax_get_private(dax_dev); sector_t sector = pgoff * PAGE_SECTORS; struct dm_target *ti; long len, ret = -EIO; int srcu_idx; ti = dm_dax_get_live_target(md, sector, &srcu_idx); if (!ti) goto out; if (!ti->type->direct_access) goto out; len = max_io_len(sector, ti) / PAGE_SECTORS; if (len < 1) goto out; nr_pages = min(len, nr_pages); if (ti->type->direct_access) ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); out: dm_put_live_table(md, srcu_idx); return ret; } static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { struct mapped_device *md = dax_get_private(dax_dev); sector_t sector = pgoff * PAGE_SECTORS; struct dm_target *ti; long ret = 0; int srcu_idx; ti = dm_dax_get_live_target(md, sector, &srcu_idx); if (!ti) goto out; if (!ti->type->dax_copy_from_iter) { ret = copy_from_iter(addr, bytes, i); goto out; } ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); out: dm_put_live_table(md, srcu_idx); return ret; } /* * A target may call dm_accept_partial_bio only from the map routine. It is * allowed for all bio types except REQ_PREFLUSH. * * dm_accept_partial_bio informs the dm that the target only wants to process * additional n_sectors sectors of the bio and the rest of the data should be * sent in a next bio. * * A diagram that explains the arithmetics: * +--------------------+---------------+-------+ * | 1 | 2 | 3 | * +--------------------+---------------+-------+ * * <-------------- *tio->len_ptr ---------------> * <------- bi_size -------> * <-- n_sectors --> * * Region 1 was already iterated over with bio_advance or similar function. * (it may be empty if the target doesn't use bio_advance) * Region 2 is the remaining bio size that the target wants to process. * (it may be empty if region 1 is non-empty, although there is no reason * to make it empty) * The target requires that region 3 is to be sent in the next bio. * * If the target wants to receive multiple copies of the bio (via num_*bios, etc), * the partially processed part (the sum of regions 1+2) must be the same for all * copies of the bio. */ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) { struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; BUG_ON(bio->bi_opf & REQ_PREFLUSH); BUG_ON(bi_size > *tio->len_ptr); BUG_ON(n_sectors > bi_size); *tio->len_ptr -= bi_size - n_sectors; bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; } EXPORT_SYMBOL_GPL(dm_accept_partial_bio); /* * The zone descriptors obtained with a zone report indicate * zone positions within the target device. The zone descriptors * must be remapped to match their position within the dm device. * A target may call dm_remap_zone_report after completion of a * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained * from the target device mapping to the dm device. */ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) { #ifdef CONFIG_BLK_DEV_ZONED struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); struct bio *report_bio = tio->io->bio; struct blk_zone_report_hdr *hdr = NULL; struct blk_zone *zone; unsigned int nr_rep = 0; unsigned int ofst; struct bio_vec bvec; struct bvec_iter iter; void *addr; if (bio->bi_status) return; /* * Remap the start sector of the reported zones. For sequential zones, * also remap the write pointer position. */ bio_for_each_segment(bvec, report_bio, iter) { addr = kmap_atomic(bvec.bv_page); /* Remember the report header in the first page */ if (!hdr) { hdr = addr; ofst = sizeof(struct blk_zone_report_hdr); } else ofst = 0; /* Set zones start sector */ while (hdr->nr_zones && ofst < bvec.bv_len) { zone = addr + ofst; if (zone->start >= start + ti->len) { hdr->nr_zones = 0; break; } zone->start = zone->start + ti->begin - start; if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { if (zone->cond == BLK_ZONE_COND_FULL) zone->wp = zone->start + zone->len; else if (zone->cond == BLK_ZONE_COND_EMPTY) zone->wp = zone->start; else zone->wp = zone->wp + ti->begin - start; } ofst += sizeof(struct blk_zone); hdr->nr_zones--; nr_rep++; } if (addr != hdr) kunmap_atomic(addr); if (!hdr->nr_zones) break; } if (hdr) { hdr->nr_zones = nr_rep; kunmap_atomic(hdr); } bio_advance(report_bio, report_bio->bi_iter.bi_size); #else /* !CONFIG_BLK_DEV_ZONED */ bio->bi_status = BLK_STS_NOTSUPP; #endif } EXPORT_SYMBOL_GPL(dm_remap_zone_report); /* * Flush current->bio_list when the target map method blocks. * This fixes deadlocks in snapshot and possibly in other targets. */ struct dm_offload { struct blk_plug plug; struct blk_plug_cb cb; }; static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule) { struct dm_offload *o = container_of(cb, struct dm_offload, cb); struct bio_list list; struct bio *bio; int i; INIT_LIST_HEAD(&o->cb.list); if (unlikely(!current->bio_list)) return; for (i = 0; i < 2; i++) { list = current->bio_list[i]; bio_list_init(&current->bio_list[i]); while ((bio = bio_list_pop(&list))) { struct bio_set *bs = bio->bi_pool; if (unlikely(!bs) || bs == fs_bio_set || !bs->rescue_workqueue) { bio_list_add(&current->bio_list[i], bio); continue; } spin_lock(&bs->rescue_lock); bio_list_add(&bs->rescue_list, bio); queue_work(bs->rescue_workqueue, &bs->rescue_work); spin_unlock(&bs->rescue_lock); } } } static void dm_offload_start(struct dm_offload *o) { blk_start_plug(&o->plug); o->cb.callback = flush_current_bio_list; list_add(&o->cb.list, &current->plug->cb_list); } static void dm_offload_end(struct dm_offload *o) { list_del(&o->cb.list); blk_finish_plug(&o->plug); } static void __map_bio(struct dm_target_io *tio) { int r; sector_t sector; struct dm_offload o; struct bio *clone = &tio->clone; struct dm_target *ti = tio->ti; clone->bi_end_io = clone_endio; /* * Map the clone. If r == 0 we don't need to do * anything, the target has assumed ownership of * this io. */ atomic_inc(&tio->io->io_count); sector = clone->bi_iter.bi_sector; dm_offload_start(&o); r = ti->type->map(ti, clone); dm_offload_end(&o); switch (r) { case DM_MAPIO_SUBMITTED: break; case DM_MAPIO_REMAPPED: /* the bio has been remapped so dispatch it */ trace_block_bio_remap(clone->bi_disk->queue, clone, bio_dev(tio->io->bio), sector); generic_make_request(clone); break; case DM_MAPIO_KILL: dec_pending(tio->io, BLK_STS_IOERR); free_tio(tio); break; case DM_MAPIO_REQUEUE: dec_pending(tio->io, BLK_STS_DM_REQUEUE); free_tio(tio); break; default: DMWARN("unimplemented target map return value: %d", r); BUG(); } } struct clone_info { struct mapped_device *md; struct dm_table *map; struct bio *bio; struct dm_io *io; sector_t sector; unsigned sector_count; }; static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) { bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_size = to_bytes(len); } /* * Creates a bio that consists of range of complete bvecs. */ static int clone_bio(struct dm_target_io *tio, struct bio *bio, sector_t sector, unsigned len) { struct bio *clone = &tio->clone; __bio_clone_fast(clone, bio); if (unlikely(bio_integrity(bio) != NULL)) { int r; if (unlikely(!dm_target_has_integrity(tio->ti->type) && !dm_target_passes_integrity(tio->ti->type))) { DMWARN("%s: the target %s doesn't support integrity data.", dm_device_name(tio->io->md), tio->ti->type->name); return -EIO; } r = bio_integrity_clone(clone, bio, GFP_NOIO); if (r < 0) return r; } if (bio_op(bio) != REQ_OP_ZONE_REPORT) bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); clone->bi_iter.bi_size = to_bytes(len); if (unlikely(bio_integrity(bio) != NULL)) bio_integrity_trim(clone); return 0; } static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, unsigned target_bio_nr) { struct dm_target_io *tio; struct bio *clone; clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); tio = container_of(clone, struct dm_target_io, clone); tio->io = ci->io; tio->ti = ti; tio->target_bio_nr = target_bio_nr; return tio; } static void __clone_and_map_simple_bio(struct clone_info *ci, struct dm_target *ti, unsigned target_bio_nr, unsigned *len) { struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr); struct bio *clone = &tio->clone; tio->len_ptr = len; __bio_clone_fast(clone, ci->bio); if (len) bio_setup_sector(clone, ci->sector, *len); __map_bio(tio); } static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, unsigned num_bios, unsigned *len) { unsigned target_bio_nr; for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); } static int __send_empty_flush(struct clone_info *ci) { unsigned target_nr = 0; struct dm_target *ti; BUG_ON(bio_has_data(ci->bio)); while ((ti = dm_table_get_target(ci->map, target_nr++))) __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); return 0; } static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, sector_t sector, unsigned *len) { struct bio *bio = ci->bio; struct dm_target_io *tio; unsigned target_bio_nr; unsigned num_target_bios = 1; int r = 0; /* * Does the target want to receive duplicate copies of the bio? */ if (bio_data_dir(bio) == WRITE && ti->num_write_bios) num_target_bios = ti->num_write_bios(ti, bio); for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { tio = alloc_tio(ci, ti, target_bio_nr); tio->len_ptr = len; r = clone_bio(tio, bio, sector, *len); if (r < 0) { free_tio(tio); break; } __map_bio(tio); } return r; } typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); static unsigned get_num_discard_bios(struct dm_target *ti) { return ti->num_discard_bios; } static unsigned get_num_write_same_bios(struct dm_target *ti) { return ti->num_write_same_bios; } static unsigned get_num_write_zeroes_bios(struct dm_target *ti) { return ti->num_write_zeroes_bios; } typedef bool (*is_split_required_fn)(struct dm_target *ti); static bool is_split_required_for_discard(struct dm_target *ti) { return ti->split_discard_bios; } static int __send_changing_extent_only(struct clone_info *ci, get_num_bios_fn get_num_bios, is_split_required_fn is_split_required) { struct dm_target *ti; unsigned len; unsigned num_bios; do { ti = dm_table_find_target(ci->map, ci->sector); if (!dm_target_is_valid(ti)) return -EIO; /* * Even though the device advertised support for this type of * request, that does not mean every target supports it, and * reconfiguration might also have changed that since the * check was performed. */ num_bios = get_num_bios ? get_num_bios(ti) : 0; if (!num_bios) return -EOPNOTSUPP; if (is_split_required && !is_split_required(ti)) len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); else len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); __send_duplicate_bios(ci, ti, num_bios, &len); ci->sector += len; } while (ci->sector_count -= len); return 0; } static int __send_discard(struct clone_info *ci) { return __send_changing_extent_only(ci, get_num_discard_bios, is_split_required_for_discard); } static int __send_write_same(struct clone_info *ci) { return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); } static int __send_write_zeroes(struct clone_info *ci) { return __send_changing_extent_only(ci, get_num_write_zeroes_bios, NULL); } /* * Select the correct strategy for processing a non-flush bio. */ static int __split_and_process_non_flush(struct clone_info *ci) { struct bio *bio = ci->bio; struct dm_target *ti; unsigned len; int r; if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) return __send_discard(ci); else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) return __send_write_same(ci); else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES)) return __send_write_zeroes(ci); ti = dm_table_find_target(ci->map, ci->sector); if (!dm_target_is_valid(ti)) return -EIO; if (bio_op(bio) == REQ_OP_ZONE_REPORT) len = ci->sector_count; else len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); if (r < 0) return r; ci->sector += len; ci->sector_count -= len; return 0; } /* * Entry point to split a bio into clones and submit them to the targets. */ static void __split_and_process_bio(struct mapped_device *md, struct dm_table *map, struct bio *bio) { struct clone_info ci; int error = 0; if (unlikely(!map)) { bio_io_error(bio); return; } ci.map = map; ci.md = md; ci.io = alloc_io(md); ci.io->status = 0; atomic_set(&ci.io->io_count, 1); ci.io->bio = bio; ci.io->md = md; spin_lock_init(&ci.io->endio_lock); ci.sector = bio->bi_iter.bi_sector; start_io_acct(ci.io); if (bio->bi_opf & REQ_PREFLUSH) { ci.bio = &ci.md->flush_bio; ci.sector_count = 0; error = __send_empty_flush(&ci); /* dec_pending submits any data associated with flush */ } else if (bio_op(bio) == REQ_OP_ZONE_RESET) { ci.bio = bio; ci.sector_count = 0; error = __split_and_process_non_flush(&ci); } else { ci.bio = bio; ci.sector_count = bio_sectors(bio); while (ci.sector_count && !error) error = __split_and_process_non_flush(&ci); } /* drop the extra reference count */ dec_pending(ci.io, errno_to_blk_status(error)); } /*----------------------------------------------------------------- * CRUD END *---------------------------------------------------------------*/ /* * The request function that just remaps the bio built up by * dm_merge_bvec. */ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) { int rw = bio_data_dir(bio); struct mapped_device *md = q->queuedata; int srcu_idx; struct dm_table *map; map = dm_get_live_table(md, &srcu_idx); generic_start_io_acct(q, rw, bio_sectors(bio), &dm_disk(md)->part0); /* if we're suspended, we have to queue this io for later */ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { dm_put_live_table(md, srcu_idx); if (!(bio->bi_opf & REQ_RAHEAD)) queue_io(md, bio); else bio_io_error(bio); return BLK_QC_T_NONE; } __split_and_process_bio(md, map, bio); dm_put_live_table(md, srcu_idx); return BLK_QC_T_NONE; } static int dm_any_congested(void *congested_data, int bdi_bits) { int r = bdi_bits; struct mapped_device *md = congested_data; struct dm_table *map; if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { if (dm_request_based(md)) { /* * With request-based DM we only need to check the * top-level queue for congestion. */ r = md->queue->backing_dev_info->wb.state & bdi_bits; } else { map = dm_get_live_table_fast(md); if (map) r = dm_table_any_congested(map, bdi_bits); dm_put_live_table_fast(md); } } return r; } /*----------------------------------------------------------------- * An IDR is used to keep track of allocated minor numbers. *---------------------------------------------------------------*/ static void free_minor(int minor) { spin_lock(&_minor_lock); idr_remove(&_minor_idr, minor); spin_unlock(&_minor_lock); } /* * See if the device with a specific minor # is free. */ static int specific_minor(int minor) { int r; if (minor >= (1 << MINORBITS)) return -EINVAL; idr_preload(GFP_KERNEL); spin_lock(&_minor_lock); r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); spin_unlock(&_minor_lock); idr_preload_end(); if (r < 0) return r == -ENOSPC ? -EBUSY : r; return 0; } static int next_free_minor(int *minor) { int r; idr_preload(GFP_KERNEL); spin_lock(&_minor_lock); r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); spin_unlock(&_minor_lock); idr_preload_end(); if (r < 0) return r; *minor = r; return 0; } static const struct block_device_operations dm_blk_dops; static const struct dax_operations dm_dax_ops; static void dm_wq_work(struct work_struct *work); void dm_init_md_queue(struct mapped_device *md) { /* * Request-based dm devices cannot be stacked on top of bio-based dm * devices. The type of this dm device may not have been decided yet. * The type is decided at the first table loading time. * To prevent problematic device stacking, clear the queue flag * for request stacking support until then. * * This queue is new, so no concurrency on the queue_flags. */ queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); /* * Initialize data that will only be used by a non-blk-mq DM queue * - must do so here (in alloc_dev callchain) before queue is used */ md->queue->queuedata = md; md->queue->backing_dev_info->congested_data = md; } void dm_init_normal_md_queue(struct mapped_device *md) { md->use_blk_mq = false; dm_init_md_queue(md); /* * Initialize aspects of queue that aren't relevant for blk-mq */ md->queue->backing_dev_info->congested_fn = dm_any_congested; } static void cleanup_mapped_device(struct mapped_device *md) { if (md->wq) destroy_workqueue(md->wq); if (md->kworker_task) kthread_stop(md->kworker_task); mempool_destroy(md->io_pool); if (md->bs) bioset_free(md->bs); if (md->dax_dev) { kill_dax(md->dax_dev); put_dax(md->dax_dev); md->dax_dev = NULL; } if (md->disk) { spin_lock(&_minor_lock); md->disk->private_data = NULL; spin_unlock(&_minor_lock); del_gendisk(md->disk); put_disk(md->disk); } if (md->queue) blk_cleanup_queue(md->queue); cleanup_srcu_struct(&md->io_barrier); if (md->bdev) { bdput(md->bdev); md->bdev = NULL; } dm_mq_cleanup_mapped_device(md); } /* * Allocate and initialise a blank device with a given minor. */ static struct mapped_device *alloc_dev(int minor) { int r, numa_node_id = dm_get_numa_node(); struct dax_device *dax_dev; struct mapped_device *md; void *old_md; md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); if (!md) { DMWARN("unable to allocate device, out of memory."); return NULL; } if (!try_module_get(THIS_MODULE)) goto bad_module_get; /* get a minor number for the dev */ if (minor == DM_ANY_MINOR) r = next_free_minor(&minor); else r = specific_minor(minor); if (r < 0) goto bad_minor; r = init_srcu_struct(&md->io_barrier); if (r < 0) goto bad_io_barrier; md->numa_node_id = numa_node_id; md->use_blk_mq = dm_use_blk_mq_default(); md->init_tio_pdu = false; md->type = DM_TYPE_NONE; mutex_init(&md->suspend_lock); mutex_init(&md->type_lock); mutex_init(&md->table_devices_lock); spin_lock_init(&md->deferred_lock); atomic_set(&md->holders, 1); atomic_set(&md->open_count, 0); atomic_set(&md->event_nr, 0); atomic_set(&md->uevent_seq, 0); INIT_LIST_HEAD(&md->uevent_list); INIT_LIST_HEAD(&md->table_devices); spin_lock_init(&md->uevent_lock); md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); if (!md->queue) goto bad; dm_init_md_queue(md); md->disk = alloc_disk_node(1, numa_node_id); if (!md->disk) goto bad; atomic_set(&md->pending[0], 0); atomic_set(&md->pending[1], 0); init_waitqueue_head(&md->wait); INIT_WORK(&md->work, dm_wq_work); init_waitqueue_head(&md->eventq); init_completion(&md->kobj_holder.completion); md->kworker_task = NULL; md->disk->major = _major; md->disk->first_minor = minor; md->disk->fops = &dm_blk_dops; md->disk->queue = md->queue; md->disk->private_data = md; sprintf(md->disk->disk_name, "dm-%d", minor); dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops); if (!dax_dev) goto bad; md->dax_dev = dax_dev; add_disk(md->disk); format_dev_t(md->name, MKDEV(_major, minor)); md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); if (!md->wq) goto bad; md->bdev = bdget_disk(md->disk, 0); if (!md->bdev) goto bad; bio_init(&md->flush_bio, NULL, 0); bio_set_dev(&md->flush_bio, md->bdev); md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; dm_stats_init(&md->stats); /* Populate the mapping, nobody knows we exist yet */ spin_lock(&_minor_lock); old_md = idr_replace(&_minor_idr, md, minor); spin_unlock(&_minor_lock); BUG_ON(old_md != MINOR_ALLOCED); return md; bad: cleanup_mapped_device(md); bad_io_barrier: free_minor(minor); bad_minor: module_put(THIS_MODULE); bad_module_get: kvfree(md); return NULL; } static void unlock_fs(struct mapped_device *md); static void free_dev(struct mapped_device *md) { int minor = MINOR(disk_devt(md->disk)); unlock_fs(md); cleanup_mapped_device(md); free_table_devices(&md->table_devices); dm_stats_cleanup(&md->stats); free_minor(minor); module_put(THIS_MODULE); kvfree(md); } static void __bind_mempools(struct mapped_device *md, struct dm_table *t) { struct dm_md_mempools *p = dm_table_get_md_mempools(t); if (md->bs) { /* The md already has necessary mempools. */ if (dm_table_bio_based(t)) { /* * Reload bioset because front_pad may have changed * because a different table was loaded. */ bioset_free(md->bs); md->bs = p->bs; p->bs = NULL; } /* * There's no need to reload with request-based dm * because the size of front_pad doesn't change. * Note for future: If you are to reload bioset, * prep-ed requests in the queue may refer * to bio from the old bioset, so you must walk * through the queue to unprep. */ goto out; } BUG_ON(!p || md->io_pool || md->bs); md->io_pool = p->io_pool; p->io_pool = NULL; md->bs = p->bs; p->bs = NULL; out: /* mempool bind completed, no longer need any mempools in the table */ dm_table_free_md_mempools(t); } /* * Bind a table to the device. */ static void event_callback(void *context) { unsigned long flags; LIST_HEAD(uevents); struct mapped_device *md = (struct mapped_device *) context; spin_lock_irqsave(&md->uevent_lock, flags); list_splice_init(&md->uevent_list, &uevents); spin_unlock_irqrestore(&md->uevent_lock, flags); dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); atomic_inc(&md->event_nr); wake_up(&md->eventq); dm_issue_global_event(); } /* * Protected by md->suspend_lock obtained by dm_swap_table(). */ static void __set_size(struct mapped_device *md, sector_t size) { lockdep_assert_held(&md->suspend_lock); set_capacity(md->disk, size); i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); } /* * Returns old map, which caller must destroy. */ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, struct queue_limits *limits) { struct dm_table *old_map; struct request_queue *q = md->queue; sector_t size; lockdep_assert_held(&md->suspend_lock); size = dm_table_get_size(t); /* * Wipe any geometry if the size of the table changed. */ if (size != dm_get_size(md)) memset(&md->geometry, 0, sizeof(md->geometry)); __set_size(md, size); dm_table_event_callback(t, event_callback, md); /* * The queue hasn't been stopped yet, if the old table type wasn't * for request-based during suspension. So stop it to prevent * I/O mapping before resume. * This must be done before setting the queue restrictions, * because request-based dm may be run just after the setting. */ if (dm_table_request_based(t)) { dm_stop_queue(q); /* * Leverage the fact that request-based DM targets are * immutable singletons and establish md->immutable_target * - used to optimize both dm_request_fn and dm_mq_queue_rq */ md->immutable_target = dm_table_get_immutable_target(t); } __bind_mempools(md, t); old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); rcu_assign_pointer(md->map, (void *)t); md->immutable_target_type = dm_table_get_immutable_target_type(t); dm_table_set_restrictions(t, q, limits); if (old_map) dm_sync_table(md); return old_map; } /* * Returns unbound table for the caller to free. */ static struct dm_table *__unbind(struct mapped_device *md) { struct dm_table *map = rcu_dereference_protected(md->map, 1); if (!map) return NULL; dm_table_event_callback(map, NULL, NULL); RCU_INIT_POINTER(md->map, NULL); dm_sync_table(md); return map; } /* * Constructor for a new device. */ int dm_create(int minor, struct mapped_device **result) { struct mapped_device *md; md = alloc_dev(minor); if (!md) return -ENXIO; dm_sysfs_init(md); *result = md; return 0; } /* * Functions to manage md->type. * All are required to hold md->type_lock. */ void dm_lock_md_type(struct mapped_device *md) { mutex_lock(&md->type_lock); } void dm_unlock_md_type(struct mapped_device *md) { mutex_unlock(&md->type_lock); } void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) { BUG_ON(!mutex_is_locked(&md->type_lock)); md->type = type; } enum dm_queue_mode dm_get_md_type(struct mapped_device *md) { return md->type; } struct target_type *dm_get_immutable_target_type(struct mapped_device *md) { return md->immutable_target_type; } /* * The queue_limits are only valid as long as you have a reference * count on 'md'. */ struct queue_limits *dm_get_queue_limits(struct mapped_device *md) { BUG_ON(!atomic_read(&md->holders)); return &md->queue->limits; } EXPORT_SYMBOL_GPL(dm_get_queue_limits); /* * Setup the DM device's queue based on md's type */ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) { int r; enum dm_queue_mode type = dm_get_md_type(md); switch (type) { case DM_TYPE_REQUEST_BASED: r = dm_old_init_request_queue(md, t); if (r) { DMERR("Cannot initialize queue for request-based mapped device"); return r; } break; case DM_TYPE_MQ_REQUEST_BASED: r = dm_mq_init_request_queue(md, t); if (r) { DMERR("Cannot initialize queue for request-based dm-mq mapped device"); return r; } break; case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: dm_init_normal_md_queue(md); blk_queue_make_request(md->queue, dm_make_request); /* * DM handles splitting bios as needed. Free the bio_split bioset * since it won't be used (saves 1 process per bio-based DM device). */ bioset_free(md->queue->bio_split); md->queue->bio_split = NULL; if (type == DM_TYPE_DAX_BIO_BASED) queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue); break; case DM_TYPE_NONE: WARN_ON_ONCE(true); break; } return 0; } struct mapped_device *dm_get_md(dev_t dev) { struct mapped_device *md; unsigned minor = MINOR(dev); if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) return NULL; spin_lock(&_minor_lock); md = idr_find(&_minor_idr, minor); if (md) { if ((md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || dm_deleting_md(md) || test_bit(DMF_FREEING, &md->flags))) { md = NULL; goto out; } dm_get(md); } out: spin_unlock(&_minor_lock); return md; } EXPORT_SYMBOL_GPL(dm_get_md); void *dm_get_mdptr(struct mapped_device *md) { return md->interface_ptr; } void dm_set_mdptr(struct mapped_device *md, void *ptr) { md->interface_ptr = ptr; } void dm_get(struct mapped_device *md) { atomic_inc(&md->holders); BUG_ON(test_bit(DMF_FREEING, &md->flags)); } int dm_hold(struct mapped_device *md) { spin_lock(&_minor_lock); if (test_bit(DMF_FREEING, &md->flags)) { spin_unlock(&_minor_lock); return -EBUSY; } dm_get(md); spin_unlock(&_minor_lock); return 0; } EXPORT_SYMBOL_GPL(dm_hold); const char *dm_device_name(struct mapped_device *md) { return md->name; } EXPORT_SYMBOL_GPL(dm_device_name); static void __dm_destroy(struct mapped_device *md, bool wait) { struct request_queue *q = dm_get_md_queue(md); struct dm_table *map; int srcu_idx; might_sleep(); spin_lock(&_minor_lock); idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); blk_set_queue_dying(q); if (dm_request_based(md) && md->kworker_task) kthread_flush_worker(&md->kworker); /* * Take suspend_lock so that presuspend and postsuspend methods * do not race with internal suspend. */ mutex_lock(&md->suspend_lock); map = dm_get_live_table(md, &srcu_idx); if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); dm_table_postsuspend_targets(map); } /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ dm_put_live_table(md, srcu_idx); mutex_unlock(&md->suspend_lock); /* * Rare, but there may be I/O requests still going to complete, * for example. Wait for all references to disappear. * No one should increment the reference count of the mapped_device, * after the mapped_device state becomes DMF_FREEING. */ if (wait) while (atomic_read(&md->holders)) msleep(1); else if (atomic_read(&md->holders)) DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", dm_device_name(md), atomic_read(&md->holders)); dm_sysfs_exit(md); dm_table_destroy(__unbind(md)); free_dev(md); } void dm_destroy(struct mapped_device *md) { __dm_destroy(md, true); } void dm_destroy_immediate(struct mapped_device *md) { __dm_destroy(md, false); } void dm_put(struct mapped_device *md) { atomic_dec(&md->holders); } EXPORT_SYMBOL_GPL(dm_put); static int dm_wait_for_completion(struct mapped_device *md, long task_state) { int r = 0; DEFINE_WAIT(wait); while (1) { prepare_to_wait(&md->wait, &wait, task_state); if (!md_in_flight(md)) break; if (signal_pending_state(task_state, current)) { r = -EINTR; break; } io_schedule(); } finish_wait(&md->wait, &wait); return r; } /* * Process the deferred bios */ static void dm_wq_work(struct work_struct *work) { struct mapped_device *md = container_of(work, struct mapped_device, work); struct bio *c; int srcu_idx; struct dm_table *map; map = dm_get_live_table(md, &srcu_idx); while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { spin_lock_irq(&md->deferred_lock); c = bio_list_pop(&md->deferred); spin_unlock_irq(&md->deferred_lock); if (!c) break; if (dm_request_based(md)) generic_make_request(c); else __split_and_process_bio(md, map, c); } dm_put_live_table(md, srcu_idx); } static void dm_queue_flush(struct mapped_device *md) { clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); smp_mb__after_atomic(); queue_work(md->wq, &md->work); } /* * Swap in a new table, returning the old one for the caller to destroy. */ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) { struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); struct queue_limits limits; int r; mutex_lock(&md->suspend_lock); /* device must be suspended */ if (!dm_suspended_md(md)) goto out; /* * If the new table has no data devices, retain the existing limits. * This helps multipath with queue_if_no_path if all paths disappear, * then new I/O is queued based on these limits, and then some paths * reappear. */ if (dm_table_has_no_data_devices(table)) { live_map = dm_get_live_table_fast(md); if (live_map) limits = md->queue->limits; dm_put_live_table_fast(md); } if (!live_map) { r = dm_calculate_queue_limits(table, &limits); if (r) { map = ERR_PTR(r); goto out; } } map = __bind(md, table, &limits); dm_issue_global_event(); out: mutex_unlock(&md->suspend_lock); return map; } /* * Functions to lock and unlock any filesystem running on the * device. */ static int lock_fs(struct mapped_device *md) { int r; WARN_ON(md->frozen_sb); md->frozen_sb = freeze_bdev(md->bdev); if (IS_ERR(md->frozen_sb)) { r = PTR_ERR(md->frozen_sb); md->frozen_sb = NULL; return r; } set_bit(DMF_FROZEN, &md->flags); return 0; } static void unlock_fs(struct mapped_device *md) { if (!test_bit(DMF_FROZEN, &md->flags)) return; thaw_bdev(md->bdev, md->frozen_sb); md->frozen_sb = NULL; clear_bit(DMF_FROZEN, &md->flags); } /* * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY * * If __dm_suspend returns 0, the device is completely quiescent * now. There is no request-processing activity. All new requests * are being added to md->deferred list. */ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, unsigned suspend_flags, long task_state, int dmf_suspended_flag) { bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; int r; lockdep_assert_held(&md->suspend_lock); /* * DMF_NOFLUSH_SUSPENDING must be set before presuspend. * This flag is cleared before dm_suspend returns. */ if (noflush) set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); else pr_debug("%s: suspending with flush\n", dm_device_name(md)); /* * This gets reverted if there's an error later and the targets * provide the .presuspend_undo hook. */ dm_table_presuspend_targets(map); /* * Flush I/O to the device. * Any I/O submitted after lock_fs() may not be flushed. * noflush takes precedence over do_lockfs. * (lock_fs() flushes I/Os and waits for them to complete.) */ if (!noflush && do_lockfs) { r = lock_fs(md); if (r) { dm_table_presuspend_undo_targets(map); return r; } } /* * Here we must make sure that no processes are submitting requests * to target drivers i.e. no one may be executing * __split_and_process_bio. This is called from dm_request and * dm_wq_work. * * To get all processes out of __split_and_process_bio in dm_request, * we take the write lock. To prevent any process from reentering * __split_and_process_bio from dm_request and quiesce the thread * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call * flush_workqueue(md->wq). */ set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); if (map) synchronize_srcu(&md->io_barrier); /* * Stop md->queue before flushing md->wq in case request-based * dm defers requests to md->wq from md->queue. */ if (dm_request_based(md)) { dm_stop_queue(md->queue); if (md->kworker_task) kthread_flush_worker(&md->kworker); } flush_workqueue(md->wq); /* * At this point no more requests are entering target request routines. * We call dm_wait_for_completion to wait for all existing requests * to finish. */ r = dm_wait_for_completion(md, task_state); if (!r) set_bit(dmf_suspended_flag, &md->flags); if (noflush) clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); if (map) synchronize_srcu(&md->io_barrier); /* were we interrupted ? */ if (r < 0) { dm_queue_flush(md); if (dm_request_based(md)) dm_start_queue(md->queue); unlock_fs(md); dm_table_presuspend_undo_targets(map); /* pushback list is already flushed, so skip flush */ } return r; } /* * We need to be able to change a mapping table under a mounted * filesystem. For example we might want to move some data in * the background. Before the table can be swapped with * dm_bind_table, dm_suspend must be called to flush any in * flight bios and ensure that any further io gets deferred. */ /* * Suspend mechanism in request-based dm. * * 1. Flush all I/Os by lock_fs() if needed. * 2. Stop dispatching any I/O by stopping the request_queue. * 3. Wait for all in-flight I/Os to be completed or requeued. * * To abort suspend, start the request_queue. */ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) { struct dm_table *map = NULL; int r = 0; retry: mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); if (dm_suspended_md(md)) { r = -EINVAL; goto out_unlock; } if (dm_suspended_internally_md(md)) { /* already internally suspended, wait for internal resume */ mutex_unlock(&md->suspend_lock); r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); if (r) return r; goto retry; } map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); if (r) goto out_unlock; dm_table_postsuspend_targets(map); out_unlock: mutex_unlock(&md->suspend_lock); return r; } static int __dm_resume(struct mapped_device *md, struct dm_table *map) { if (map) { int r = dm_table_resume_targets(map); if (r) return r; } dm_queue_flush(md); /* * Flushing deferred I/Os must be done after targets are resumed * so that mapping of targets can work correctly. * Request-based dm is queueing the deferred I/Os in its request_queue. */ if (dm_request_based(md)) dm_start_queue(md->queue); unlock_fs(md); return 0; } int dm_resume(struct mapped_device *md) { int r; struct dm_table *map = NULL; retry: r = -EINVAL; mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); if (!dm_suspended_md(md)) goto out; if (dm_suspended_internally_md(md)) { /* already internally suspended, wait for internal resume */ mutex_unlock(&md->suspend_lock); r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); if (r) return r; goto retry; } map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); if (!map || !dm_table_get_size(map)) goto out; r = __dm_resume(md, map); if (r) goto out; clear_bit(DMF_SUSPENDED, &md->flags); out: mutex_unlock(&md->suspend_lock); return r; } /* * Internal suspend/resume works like userspace-driven suspend. It waits * until all bios finish and prevents issuing new bios to the target drivers. * It may be used only from the kernel. */ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) { struct dm_table *map = NULL; lockdep_assert_held(&md->suspend_lock); if (md->internal_suspend_count++) return; /* nested internal suspend */ if (dm_suspended_md(md)) { set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); return; /* nest suspend */ } map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); /* * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend * would require changing .presuspend to return an error -- avoid this * until there is a need for more elaborate variants of internal suspend. */ (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, DMF_SUSPENDED_INTERNALLY); dm_table_postsuspend_targets(map); } static void __dm_internal_resume(struct mapped_device *md) { BUG_ON(!md->internal_suspend_count); if (--md->internal_suspend_count) return; /* resume from nested internal suspend */ if (dm_suspended_md(md)) goto done; /* resume from nested suspend */ /* * NOTE: existing callers don't need to call dm_table_resume_targets * (which may fail -- so best to avoid it for now by passing NULL map) */ (void) __dm_resume(md, NULL); done: clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); smp_mb__after_atomic(); wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); } void dm_internal_suspend_noflush(struct mapped_device *md) { mutex_lock(&md->suspend_lock); __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); mutex_unlock(&md->suspend_lock); } EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); void dm_internal_resume(struct mapped_device *md) { mutex_lock(&md->suspend_lock); __dm_internal_resume(md); mutex_unlock(&md->suspend_lock); } EXPORT_SYMBOL_GPL(dm_internal_resume); /* * Fast variants of internal suspend/resume hold md->suspend_lock, * which prevents interaction with userspace-driven suspend. */ void dm_internal_suspend_fast(struct mapped_device *md) { mutex_lock(&md->suspend_lock); if (dm_suspended_md(md) || dm_suspended_internally_md(md)) return; set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); synchronize_srcu(&md->io_barrier); flush_workqueue(md->wq); dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); void dm_internal_resume_fast(struct mapped_device *md) { if (dm_suspended_md(md) || dm_suspended_internally_md(md)) goto done; dm_queue_flush(md); done: mutex_unlock(&md->suspend_lock); } EXPORT_SYMBOL_GPL(dm_internal_resume_fast); /*----------------------------------------------------------------- * Event notification. *---------------------------------------------------------------*/ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, unsigned cookie) { char udev_cookie[DM_COOKIE_LENGTH]; char *envp[] = { udev_cookie, NULL }; if (!cookie) return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); else { snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", DM_COOKIE_ENV_VAR_NAME, cookie); return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp); } } uint32_t dm_next_uevent_seq(struct mapped_device *md) { return atomic_add_return(1, &md->uevent_seq); } uint32_t dm_get_event_nr(struct mapped_device *md) { return atomic_read(&md->event_nr); } int dm_wait_event(struct mapped_device *md, int event_nr) { return wait_event_interruptible(md->eventq, (event_nr != atomic_read(&md->event_nr))); } void dm_uevent_add(struct mapped_device *md, struct list_head *elist) { unsigned long flags; spin_lock_irqsave(&md->uevent_lock, flags); list_add(elist, &md->uevent_list); spin_unlock_irqrestore(&md->uevent_lock, flags); } /* * The gendisk is only valid as long as you have a reference * count on 'md'. */ struct gendisk *dm_disk(struct mapped_device *md) { return md->disk; } EXPORT_SYMBOL_GPL(dm_disk); struct kobject *dm_kobject(struct mapped_device *md) { return &md->kobj_holder.kobj; } struct mapped_device *dm_get_from_kobject(struct kobject *kobj) { struct mapped_device *md; md = container_of(kobj, struct mapped_device, kobj_holder.kobj); if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) return NULL; dm_get(md); return md; } int dm_suspended_md(struct mapped_device *md) { return test_bit(DMF_SUSPENDED, &md->flags); } int dm_suspended_internally_md(struct mapped_device *md) { return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); } int dm_test_deferred_remove_flag(struct mapped_device *md) { return test_bit(DMF_DEFERRED_REMOVE, &md->flags); } int dm_suspended(struct dm_target *ti) { return dm_suspended_md(dm_table_get_md(ti->table)); } EXPORT_SYMBOL_GPL(dm_suspended); int dm_noflush_suspending(struct dm_target *ti) { return __noflush_suspending(dm_table_get_md(ti->table)); } EXPORT_SYMBOL_GPL(dm_noflush_suspending); struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, unsigned integrity, unsigned per_io_data_size) { struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); unsigned int pool_size = 0; unsigned int front_pad; if (!pools) return NULL; switch (type) { case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: pool_size = dm_get_reserved_bio_based_ios(); front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache); if (!pools->io_pool) goto out; break; case DM_TYPE_REQUEST_BASED: case DM_TYPE_MQ_REQUEST_BASED: pool_size = dm_get_reserved_rq_based_ios(); front_pad = offsetof(struct dm_rq_clone_bio_info, clone); /* per_io_data_size is used for blk-mq pdu at queue allocation */ break; default: BUG(); } pools->bs = bioset_create(pool_size, front_pad, BIOSET_NEED_RESCUER); if (!pools->bs) goto out; if (integrity && bioset_integrity_create(pools->bs, pool_size)) goto out; return pools; out: dm_free_md_mempools(pools); return NULL; } void dm_free_md_mempools(struct dm_md_mempools *pools) { if (!pools) return; mempool_destroy(pools->io_pool); if (pools->bs) bioset_free(pools->bs); kfree(pools); } struct dm_pr { u64 old_key; u64 new_key; u32 flags; bool fail_early; }; static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, void *data) { struct mapped_device *md = bdev->bd_disk->private_data; struct dm_table *table; struct dm_target *ti; int ret = -ENOTTY, srcu_idx; table = dm_get_live_table(md, &srcu_idx); if (!table || !dm_table_get_size(table)) goto out; /* We only support devices that have a single target */ if (dm_table_get_num_targets(table) != 1) goto out; ti = dm_table_get_target(table, 0); ret = -EINVAL; if (!ti->type->iterate_devices) goto out; ret = ti->type->iterate_devices(ti, fn, data); out: dm_put_live_table(md, srcu_idx); return ret; } /* * For register / unregister we need to manually call out to every path. */ static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct dm_pr *pr = data; const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; if (!ops || !ops->pr_register) return -EOPNOTSUPP; return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); } static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, u32 flags) { struct dm_pr pr = { .old_key = old_key, .new_key = new_key, .flags = flags, .fail_early = true, }; int ret; ret = dm_call_pr(bdev, __dm_pr_register, &pr); if (ret && new_key) { /* unregister all paths if we failed to register any path */ pr.old_key = new_key; pr.new_key = 0; pr.flags = 0; pr.fail_early = false; dm_call_pr(bdev, __dm_pr_register, &pr); } return ret; } static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, u32 flags) { struct mapped_device *md = bdev->bd_disk->private_data; const struct pr_ops *ops; fmode_t mode; int r; r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); if (r < 0) return r; ops = bdev->bd_disk->fops->pr_ops; if (ops && ops->pr_reserve) r = ops->pr_reserve(bdev, key, type, flags); else r = -EOPNOTSUPP; bdput(bdev); return r; } static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) { struct mapped_device *md = bdev->bd_disk->private_data; const struct pr_ops *ops; fmode_t mode; int r; r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); if (r < 0) return r; ops = bdev->bd_disk->fops->pr_ops; if (ops && ops->pr_release) r = ops->pr_release(bdev, key, type); else r = -EOPNOTSUPP; bdput(bdev); return r; } static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, enum pr_type type, bool abort) { struct mapped_device *md = bdev->bd_disk->private_data; const struct pr_ops *ops; fmode_t mode; int r; r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); if (r < 0) return r; ops = bdev->bd_disk->fops->pr_ops; if (ops && ops->pr_preempt) r = ops->pr_preempt(bdev, old_key, new_key, type, abort); else r = -EOPNOTSUPP; bdput(bdev); return r; } static int dm_pr_clear(struct block_device *bdev, u64 key) { struct mapped_device *md = bdev->bd_disk->private_data; const struct pr_ops *ops; fmode_t mode; int r; r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); if (r < 0) return r; ops = bdev->bd_disk->fops->pr_ops; if (ops && ops->pr_clear) r = ops->pr_clear(bdev, key); else r = -EOPNOTSUPP; bdput(bdev); return r; } static const struct pr_ops dm_pr_ops = { .pr_register = dm_pr_register, .pr_reserve = dm_pr_reserve, .pr_release = dm_pr_release, .pr_preempt = dm_pr_preempt, .pr_clear = dm_pr_clear, }; static const struct block_device_operations dm_blk_dops = { .open = dm_blk_open, .release = dm_blk_close, .ioctl = dm_blk_ioctl, .getgeo = dm_blk_getgeo, .pr_ops = &dm_pr_ops, .owner = THIS_MODULE }; static const struct dax_operations dm_dax_ops = { .direct_access = dm_dax_direct_access, .copy_from_iter = dm_dax_copy_from_iter, }; /* * module hooks */ module_init(dm_init); module_exit(dm_exit); module_param(major, uint, 0); MODULE_PARM_DESC(major, "The major number of the device mapper"); module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); MODULE_DESCRIPTION(DM_NAME " driver"); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-362/c/bad_3014_0
crossvul-cpp_data_good_3275_1
/* * inode.c - part of debugfs, a tiny little debug file system * * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2004 IBM Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * debugfs is for people to use instead of /proc or /sys. * See Documentation/DocBook/kernel-api for more details. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/kobject.h> #include <linux/namei.h> #include <linux/debugfs.h> #include <linux/fsnotify.h> #include <linux/string.h> #include <linux/seq_file.h> #include <linux/parser.h> #include <linux/magic.h> #include <linux/slab.h> #include <linux/srcu.h> #include "internal.h" #define DEBUGFS_DEFAULT_MODE 0700 DEFINE_SRCU(debugfs_srcu); static struct vfsmount *debugfs_mount; static int debugfs_mount_count; static bool debugfs_registered; static struct inode *debugfs_get_inode(struct super_block *sb) { struct inode *inode = new_inode(sb); if (inode) { inode->i_ino = get_next_ino(); inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); } return inode; } struct debugfs_mount_opts { kuid_t uid; kgid_t gid; umode_t mode; }; enum { Opt_uid, Opt_gid, Opt_mode, Opt_err }; static const match_table_t tokens = { {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_mode, "mode=%o"}, {Opt_err, NULL} }; struct debugfs_fs_info { struct debugfs_mount_opts mount_opts; }; static int debugfs_parse_options(char *data, struct debugfs_mount_opts *opts) { substring_t args[MAX_OPT_ARGS]; int option; int token; kuid_t uid; kgid_t gid; char *p; opts->mode = DEBUGFS_DEFAULT_MODE; while ((p = strsep(&data, ",")) != NULL) { if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_uid: if (match_int(&args[0], &option)) return -EINVAL; uid = make_kuid(current_user_ns(), option); if (!uid_valid(uid)) return -EINVAL; opts->uid = uid; break; case Opt_gid: if (match_int(&args[0], &option)) return -EINVAL; gid = make_kgid(current_user_ns(), option); if (!gid_valid(gid)) return -EINVAL; opts->gid = gid; break; case Opt_mode: if (match_octal(&args[0], &option)) return -EINVAL; opts->mode = option & S_IALLUGO; break; /* * We might like to report bad mount options here; * but traditionally debugfs has ignored all mount options */ } } return 0; } static int debugfs_apply_options(struct super_block *sb) { struct debugfs_fs_info *fsi = sb->s_fs_info; struct inode *inode = d_inode(sb->s_root); struct debugfs_mount_opts *opts = &fsi->mount_opts; inode->i_mode &= ~S_IALLUGO; inode->i_mode |= opts->mode; inode->i_uid = opts->uid; inode->i_gid = opts->gid; return 0; } static int debugfs_remount(struct super_block *sb, int *flags, char *data) { int err; struct debugfs_fs_info *fsi = sb->s_fs_info; sync_filesystem(sb); err = debugfs_parse_options(data, &fsi->mount_opts); if (err) goto fail; debugfs_apply_options(sb); fail: return err; } static int debugfs_show_options(struct seq_file *m, struct dentry *root) { struct debugfs_fs_info *fsi = root->d_sb->s_fs_info; struct debugfs_mount_opts *opts = &fsi->mount_opts; if (!uid_eq(opts->uid, GLOBAL_ROOT_UID)) seq_printf(m, ",uid=%u", from_kuid_munged(&init_user_ns, opts->uid)); if (!gid_eq(opts->gid, GLOBAL_ROOT_GID)) seq_printf(m, ",gid=%u", from_kgid_munged(&init_user_ns, opts->gid)); if (opts->mode != DEBUGFS_DEFAULT_MODE) seq_printf(m, ",mode=%o", opts->mode); return 0; } static void debugfs_evict_inode(struct inode *inode) { truncate_inode_pages_final(&inode->i_data); clear_inode(inode); if (S_ISLNK(inode->i_mode)) kfree(inode->i_link); } static const struct super_operations debugfs_super_operations = { .statfs = simple_statfs, .remount_fs = debugfs_remount, .show_options = debugfs_show_options, .evict_inode = debugfs_evict_inode, }; static struct vfsmount *debugfs_automount(struct path *path) { debugfs_automount_t f; f = (debugfs_automount_t)path->dentry->d_fsdata; return f(path->dentry, d_inode(path->dentry)->i_private); } static const struct dentry_operations debugfs_dops = { .d_delete = always_delete_dentry, .d_automount = debugfs_automount, }; static int debug_fill_super(struct super_block *sb, void *data, int silent) { static const struct tree_descr debug_files[] = {{""}}; struct debugfs_fs_info *fsi; int err; save_mount_options(sb, data); fsi = kzalloc(sizeof(struct debugfs_fs_info), GFP_KERNEL); sb->s_fs_info = fsi; if (!fsi) { err = -ENOMEM; goto fail; } err = debugfs_parse_options(data, &fsi->mount_opts); if (err) goto fail; err = simple_fill_super(sb, DEBUGFS_MAGIC, debug_files); if (err) goto fail; sb->s_op = &debugfs_super_operations; sb->s_d_op = &debugfs_dops; debugfs_apply_options(sb); return 0; fail: kfree(fsi); sb->s_fs_info = NULL; return err; } static struct dentry *debug_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_single(fs_type, flags, data, debug_fill_super); } static struct file_system_type debug_fs_type = { .owner = THIS_MODULE, .name = "debugfs", .mount = debug_mount, .kill_sb = kill_litter_super, }; MODULE_ALIAS_FS("debugfs"); /** * debugfs_lookup() - look up an existing debugfs file * @name: a pointer to a string containing the name of the file to look up. * @parent: a pointer to the parent dentry of the file. * * This function will return a pointer to a dentry if it succeeds. If the file * doesn't exist or an error occurs, %NULL will be returned. The returned * dentry must be passed to dput() when it is no longer needed. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *debugfs_lookup(const char *name, struct dentry *parent) { struct dentry *dentry; if (IS_ERR(parent)) return NULL; if (!parent) parent = debugfs_mount->mnt_root; inode_lock(d_inode(parent)); dentry = lookup_one_len(name, parent, strlen(name)); inode_unlock(d_inode(parent)); if (IS_ERR(dentry)) return NULL; if (!d_really_is_positive(dentry)) { dput(dentry); return NULL; } return dentry; } EXPORT_SYMBOL_GPL(debugfs_lookup); static struct dentry *start_creating(const char *name, struct dentry *parent) { struct dentry *dentry; int error; pr_debug("debugfs: creating file '%s'\n",name); if (IS_ERR(parent)) return parent; error = simple_pin_fs(&debug_fs_type, &debugfs_mount, &debugfs_mount_count); if (error) return ERR_PTR(error); /* If the parent is not specified, we create it in the root. * We need the root dentry to do this, which is in the super * block. A pointer to that is in the struct vfsmount that we * have around. */ if (!parent) parent = debugfs_mount->mnt_root; inode_lock(d_inode(parent)); dentry = lookup_one_len(name, parent, strlen(name)); if (!IS_ERR(dentry) && d_really_is_positive(dentry)) { dput(dentry); dentry = ERR_PTR(-EEXIST); } if (IS_ERR(dentry)) { inode_unlock(d_inode(parent)); simple_release_fs(&debugfs_mount, &debugfs_mount_count); } return dentry; } static struct dentry *failed_creating(struct dentry *dentry) { inode_unlock(d_inode(dentry->d_parent)); dput(dentry); simple_release_fs(&debugfs_mount, &debugfs_mount_count); return NULL; } static struct dentry *end_creating(struct dentry *dentry) { inode_unlock(d_inode(dentry->d_parent)); return dentry; } static struct dentry *__debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *proxy_fops, const struct file_operations *real_fops) { struct dentry *dentry; struct inode *inode; if (!(mode & S_IFMT)) mode |= S_IFREG; BUG_ON(!S_ISREG(mode)); dentry = start_creating(name, parent); if (IS_ERR(dentry)) return NULL; inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) return failed_creating(dentry); inode->i_mode = mode; inode->i_private = data; inode->i_fop = proxy_fops; dentry->d_fsdata = (void *)real_fops; d_instantiate(dentry, inode); fsnotify_create(d_inode(dentry->d_parent), dentry); return end_creating(dentry); } /** * debugfs_create_file - create a file in the debugfs filesystem * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is NULL, then the * file will be created in the root of the debugfs filesystem. * @data: a pointer to something that the caller will want to get to later * on. The inode.i_private pointer will point to this value on * the open() call. * @fops: a pointer to a struct file_operations that should be used for * this file. * * This is the basic "create a file" function for debugfs. It allows for a * wide range of flexibility in creating a file, or a directory (if you want * to create a directory, the debugfs_create_dir() function is * recommended to be used instead.) * * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the debugfs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, * you are responsible here.) If an error occurs, %NULL will be returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { return __debugfs_create_file(name, mode, parent, data, fops ? &debugfs_full_proxy_file_operations : &debugfs_noop_file_operations, fops); } EXPORT_SYMBOL_GPL(debugfs_create_file); /** * debugfs_create_file_unsafe - create a file in the debugfs filesystem * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is NULL, then the * file will be created in the root of the debugfs filesystem. * @data: a pointer to something that the caller will want to get to later * on. The inode.i_private pointer will point to this value on * the open() call. * @fops: a pointer to a struct file_operations that should be used for * this file. * * debugfs_create_file_unsafe() is completely analogous to * debugfs_create_file(), the only difference being that the fops * handed it will not get protected against file removals by the * debugfs core. * * It is your responsibility to protect your struct file_operation * methods against file removals by means of debugfs_use_file_start() * and debugfs_use_file_finish(). ->open() is still protected by * debugfs though. * * Any struct file_operations defined by means of * DEFINE_DEBUGFS_ATTRIBUTE() is protected against file removals and * thus, may be used here. */ struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { return __debugfs_create_file(name, mode, parent, data, fops ? &debugfs_open_proxy_file_operations : &debugfs_noop_file_operations, fops); } EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe); /** * debugfs_create_file_size - create a file in the debugfs filesystem * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is NULL, then the * file will be created in the root of the debugfs filesystem. * @data: a pointer to something that the caller will want to get to later * on. The inode.i_private pointer will point to this value on * the open() call. * @fops: a pointer to a struct file_operations that should be used for * this file. * @file_size: initial file size * * This is the basic "create a file" function for debugfs. It allows for a * wide range of flexibility in creating a file, or a directory (if you want * to create a directory, the debugfs_create_dir() function is * recommended to be used instead.) * * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the debugfs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, * you are responsible here.) If an error occurs, %NULL will be returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *debugfs_create_file_size(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops, loff_t file_size) { struct dentry *de = debugfs_create_file(name, mode, parent, data, fops); if (de) d_inode(de)->i_size = file_size; return de; } EXPORT_SYMBOL_GPL(debugfs_create_file_size); /** * debugfs_create_dir - create a directory in the debugfs filesystem * @name: a pointer to a string containing the name of the directory to * create. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is NULL, then the * directory will be created in the root of the debugfs filesystem. * * This function creates a directory in debugfs with the given name. * * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the debugfs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, * you are responsible here.) If an error occurs, %NULL will be returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) { struct dentry *dentry = start_creating(name, parent); struct inode *inode; if (IS_ERR(dentry)) return NULL; inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) return failed_creating(dentry); inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); d_instantiate(dentry, inode); inc_nlink(d_inode(dentry->d_parent)); fsnotify_mkdir(d_inode(dentry->d_parent), dentry); return end_creating(dentry); } EXPORT_SYMBOL_GPL(debugfs_create_dir); /** * debugfs_create_automount - create automount point in the debugfs filesystem * @name: a pointer to a string containing the name of the file to create. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is NULL, then the * file will be created in the root of the debugfs filesystem. * @f: function to be called when pathname resolution steps on that one. * @data: opaque argument to pass to f(). * * @f should return what ->d_automount() would. */ struct dentry *debugfs_create_automount(const char *name, struct dentry *parent, debugfs_automount_t f, void *data) { struct dentry *dentry = start_creating(name, parent); struct inode *inode; if (IS_ERR(dentry)) return NULL; inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) return failed_creating(dentry); make_empty_dir_inode(inode); inode->i_flags |= S_AUTOMOUNT; inode->i_private = data; dentry->d_fsdata = (void *)f; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); d_instantiate(dentry, inode); inc_nlink(d_inode(dentry->d_parent)); fsnotify_mkdir(d_inode(dentry->d_parent), dentry); return end_creating(dentry); } EXPORT_SYMBOL(debugfs_create_automount); /** * debugfs_create_symlink- create a symbolic link in the debugfs filesystem * @name: a pointer to a string containing the name of the symbolic link to * create. * @parent: a pointer to the parent dentry for this symbolic link. This * should be a directory dentry if set. If this parameter is NULL, * then the symbolic link will be created in the root of the debugfs * filesystem. * @target: a pointer to a string containing the path to the target of the * symbolic link. * * This function creates a symbolic link with the given name in debugfs that * links to the given target path. * * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the debugfs_remove() function when the symbolic * link is to be removed (no automatic cleanup happens if your module is * unloaded, you are responsible here.) If an error occurs, %NULL will be * returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, const char *target) { struct dentry *dentry; struct inode *inode; char *link = kstrdup(target, GFP_KERNEL); if (!link) return NULL; dentry = start_creating(name, parent); if (IS_ERR(dentry)) { kfree(link); return NULL; } inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) { kfree(link); return failed_creating(dentry); } inode->i_mode = S_IFLNK | S_IRWXUGO; inode->i_op = &simple_symlink_inode_operations; inode->i_link = link; d_instantiate(dentry, inode); return end_creating(dentry); } EXPORT_SYMBOL_GPL(debugfs_create_symlink); static int __debugfs_remove(struct dentry *dentry, struct dentry *parent) { int ret = 0; if (simple_positive(dentry)) { dget(dentry); if (d_is_dir(dentry)) ret = simple_rmdir(d_inode(parent), dentry); else simple_unlink(d_inode(parent), dentry); if (!ret) d_delete(dentry); dput(dentry); } return ret; } /** * debugfs_remove - removes a file or directory from the debugfs filesystem * @dentry: a pointer to a the dentry of the file or directory to be * removed. If this parameter is NULL or an error value, nothing * will be done. * * This function removes a file or directory in debugfs that was previously * created with a call to another debugfs function (like * debugfs_create_file() or variants thereof.) * * This function is required to be called in order for the file to be * removed, no automatic cleanup of files will happen when a module is * removed, you are responsible here. */ void debugfs_remove(struct dentry *dentry) { struct dentry *parent; int ret; if (IS_ERR_OR_NULL(dentry)) return; parent = dentry->d_parent; inode_lock(d_inode(parent)); ret = __debugfs_remove(dentry, parent); inode_unlock(d_inode(parent)); if (!ret) simple_release_fs(&debugfs_mount, &debugfs_mount_count); synchronize_srcu(&debugfs_srcu); } EXPORT_SYMBOL_GPL(debugfs_remove); /** * debugfs_remove_recursive - recursively removes a directory * @dentry: a pointer to a the dentry of the directory to be removed. If this * parameter is NULL or an error value, nothing will be done. * * This function recursively removes a directory tree in debugfs that * was previously created with a call to another debugfs function * (like debugfs_create_file() or variants thereof.) * * This function is required to be called in order for the file to be * removed, no automatic cleanup of files will happen when a module is * removed, you are responsible here. */ void debugfs_remove_recursive(struct dentry *dentry) { struct dentry *child, *parent; if (IS_ERR_OR_NULL(dentry)) return; parent = dentry; down: inode_lock(d_inode(parent)); loop: /* * The parent->d_subdirs is protected by the d_lock. Outside that * lock, the child can be unlinked and set to be freed which can * use the d_u.d_child as the rcu head and corrupt this list. */ spin_lock(&parent->d_lock); list_for_each_entry(child, &parent->d_subdirs, d_child) { if (!simple_positive(child)) continue; /* perhaps simple_empty(child) makes more sense */ if (!list_empty(&child->d_subdirs)) { spin_unlock(&parent->d_lock); inode_unlock(d_inode(parent)); parent = child; goto down; } spin_unlock(&parent->d_lock); if (!__debugfs_remove(child, parent)) simple_release_fs(&debugfs_mount, &debugfs_mount_count); /* * The parent->d_lock protects agaist child from unlinking * from d_subdirs. When releasing the parent->d_lock we can * no longer trust that the next pointer is valid. * Restart the loop. We'll skip this one with the * simple_positive() check. */ goto loop; } spin_unlock(&parent->d_lock); inode_unlock(d_inode(parent)); child = parent; parent = parent->d_parent; inode_lock(d_inode(parent)); if (child != dentry) /* go up */ goto loop; if (!__debugfs_remove(child, parent)) simple_release_fs(&debugfs_mount, &debugfs_mount_count); inode_unlock(d_inode(parent)); synchronize_srcu(&debugfs_srcu); } EXPORT_SYMBOL_GPL(debugfs_remove_recursive); /** * debugfs_rename - rename a file/directory in the debugfs filesystem * @old_dir: a pointer to the parent dentry for the renamed object. This * should be a directory dentry. * @old_dentry: dentry of an object to be renamed. * @new_dir: a pointer to the parent dentry where the object should be * moved. This should be a directory dentry. * @new_name: a pointer to a string containing the target name. * * This function renames a file/directory in debugfs. The target must not * exist for rename to succeed. * * This function will return a pointer to old_dentry (which is updated to * reflect renaming) if it succeeds. If an error occurs, %NULL will be * returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, const char *new_name) { int error; struct dentry *dentry = NULL, *trap; struct name_snapshot old_name; trap = lock_rename(new_dir, old_dir); /* Source or destination directories don't exist? */ if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir)) goto exit; /* Source does not exist, cyclic rename, or mountpoint? */ if (d_really_is_negative(old_dentry) || old_dentry == trap || d_mountpoint(old_dentry)) goto exit; dentry = lookup_one_len(new_name, new_dir, strlen(new_name)); /* Lookup failed, cyclic rename or target exists? */ if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry)) goto exit; take_dentry_name_snapshot(&old_name, old_dentry); error = simple_rename(d_inode(old_dir), old_dentry, d_inode(new_dir), dentry, 0); if (error) { release_dentry_name_snapshot(&old_name); goto exit; } d_move(old_dentry, dentry); fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name.name, d_is_dir(old_dentry), NULL, old_dentry); release_dentry_name_snapshot(&old_name); unlock_rename(new_dir, old_dir); dput(dentry); return old_dentry; exit: if (dentry && !IS_ERR(dentry)) dput(dentry); unlock_rename(new_dir, old_dir); return NULL; } EXPORT_SYMBOL_GPL(debugfs_rename); /** * debugfs_initialized - Tells whether debugfs has been registered */ bool debugfs_initialized(void) { return debugfs_registered; } EXPORT_SYMBOL_GPL(debugfs_initialized); static int __init debugfs_init(void) { int retval; retval = sysfs_create_mount_point(kernel_kobj, "debug"); if (retval) return retval; retval = register_filesystem(&debug_fs_type); if (retval) sysfs_remove_mount_point(kernel_kobj, "debug"); else debugfs_registered = true; return retval; } core_initcall(debugfs_init);
./CrossVul/dataset_final_sorted/CWE-362/c/good_3275_1
crossvul-cpp_data_good_1757_1
/* * linux/ipc/shm.c * Copyright (C) 1992, 1993 Krishna Balasubramanian * Many improvements/fixes by Bruno Haible. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. * * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> * BIGMEM support, Andrea Arcangeli <andrea@suse.de> * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> * HIGHMEM support, Ingo Molnar <mingo@redhat.com> * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> * * support for audit of ipc object properties and permission changes * Dustin Kirkland <dustin.kirkland@us.ibm.com> * * namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> * * Better ipc lock (kern_ipc_perm.lock) handling * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013. */ #include <linux/slab.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/shm.h> #include <linux/init.h> #include <linux/file.h> #include <linux/mman.h> #include <linux/shmem_fs.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/capability.h> #include <linux/ptrace.h> #include <linux/seq_file.h> #include <linux/rwsem.h> #include <linux/nsproxy.h> #include <linux/mount.h> #include <linux/ipc_namespace.h> #include <linux/uaccess.h> #include "util.h" struct shm_file_data { int id; struct ipc_namespace *ns; struct file *file; const struct vm_operations_struct *vm_ops; }; #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) static const struct file_operations shm_file_operations; static const struct vm_operations_struct shm_vm_ops; #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) #define shm_unlock(shp) \ ipc_unlock(&(shp)->shm_perm) static int newseg(struct ipc_namespace *, struct ipc_params *); static void shm_open(struct vm_area_struct *vma); static void shm_close(struct vm_area_struct *vma); static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp); #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it); #endif void shm_init_ns(struct ipc_namespace *ns) { ns->shm_ctlmax = SHMMAX; ns->shm_ctlall = SHMALL; ns->shm_ctlmni = SHMMNI; ns->shm_rmid_forced = 0; ns->shm_tot = 0; ipc_init_ids(&shm_ids(ns)); } /* * Called with shm_ids.rwsem (writer) and the shp structure locked. * Only shm_ids.rwsem remains locked on exit. */ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_nattch) { shp->shm_perm.mode |= SHM_DEST; /* Do not find it any more */ shp->shm_perm.key = IPC_PRIVATE; shm_unlock(shp); } else shm_destroy(ns, shp); } #ifdef CONFIG_IPC_NS void shm_exit_ns(struct ipc_namespace *ns) { free_ipcs(ns, &shm_ids(ns), do_shm_rmid); idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); } #endif static int __init ipc_ns_init(void) { shm_init_ns(&init_ipc_ns); return 0; } pure_initcall(ipc_ns_init); void __init shm_init(void) { ipc_init_proc_interface("sysvipc/shm", #if BITS_PER_LONG <= 32 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", #else " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", #endif IPC_SHM_IDS, sysvipc_shm_proc_show); } static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct shmid_kernel, shm_perm); } static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct shmid_kernel, shm_perm); } /* * shm_lock_(check_) routines are called in the paths where the rwsem * is not necessarily held. */ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); /* * We raced in the idr lookup or with shm_destroy(). Either way, the * ID is busted. */ WARN_ON(IS_ERR(ipcp)); return container_of(ipcp, struct shmid_kernel, shm_perm); } static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) { rcu_read_lock(); ipc_lock_object(&ipcp->shm_perm); } static void shm_rcu_free(struct rcu_head *head) { struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); struct shmid_kernel *shp = ipc_rcu_to_struct(p); security_shm_free(shp); ipc_rcu_free(head); } static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) { list_del(&s->shm_clist); ipc_rmid(&shm_ids(ns), &s->shm_perm); } /* This is called by fork, once for every shm attach. */ static void shm_open(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; shp = shm_lock(sfd->ns, sfd->id); shp->shm_atim = get_seconds(); shp->shm_lprid = task_tgid_vnr(current); shp->shm_nattch++; shm_unlock(shp); } /* * shm_destroy - free the struct shmid_kernel * * @ns: namespace * @shp: struct to free * * It has to be called with shp and shm_ids.rwsem (writer) locked, * but returns with shp unlocked and freed. */ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { struct file *shm_file; shm_file = shp->shm_file; shp->shm_file = NULL; ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; shm_rmid(ns, shp); shm_unlock(shp); if (!is_file_hugepages(shm_file)) shmem_lock(shm_file, 0, shp->mlock_user); else if (shp->mlock_user) user_shm_unlock(i_size_read(file_inode(shm_file)), shp->mlock_user); fput(shm_file); ipc_rcu_putref(shp, shm_rcu_free); } /* * shm_may_destroy - identifies whether shm segment should be destroyed now * * Returns true if and only if there are no active users of the segment and * one of the following is true: * * 1) shmctl(id, IPC_RMID, NULL) was called for this shp * * 2) sysctl kernel.shm_rmid_forced is set to 1. */ static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { return (shp->shm_nattch == 0) && (ns->shm_rmid_forced || (shp->shm_perm.mode & SHM_DEST)); } /* * remove the attach descriptor vma. * free memory for segment if it is marked destroyed. * The descriptor has already been removed from the current->mm->mmap list * and will later be kfree()d. */ static void shm_close(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; struct ipc_namespace *ns = sfd->ns; down_write(&shm_ids(ns).rwsem); /* remove from the list of attaches of the shm segment */ shp = shm_lock(ns, sfd->id); shp->shm_lprid = task_tgid_vnr(current); shp->shm_dtim = get_seconds(); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); up_write(&shm_ids(ns).rwsem); } /* Called with ns->shm_ids(ns).rwsem locked */ static int shm_try_destroy_orphaned(int id, void *p, void *data) { struct ipc_namespace *ns = data; struct kern_ipc_perm *ipcp = p; struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); /* * We want to destroy segments without users and with already * exit'ed originating process. * * As shp->* are changed under rwsem, it's safe to skip shp locking. */ if (shp->shm_creator != NULL) return 0; if (shm_may_destroy(ns, shp)) { shm_lock_by_ptr(shp); shm_destroy(ns, shp); } return 0; } void shm_destroy_orphaned(struct ipc_namespace *ns) { down_write(&shm_ids(ns).rwsem); if (shm_ids(ns).in_use) idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); up_write(&shm_ids(ns).rwsem); } /* Locking assumes this will only be called with task == current */ void exit_shm(struct task_struct *task) { struct ipc_namespace *ns = task->nsproxy->ipc_ns; struct shmid_kernel *shp, *n; if (list_empty(&task->sysvshm.shm_clist)) return; /* * If kernel.shm_rmid_forced is not set then only keep track of * which shmids are orphaned, so that a later set of the sysctl * can clean them up. */ if (!ns->shm_rmid_forced) { down_read(&shm_ids(ns).rwsem); list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist) shp->shm_creator = NULL; /* * Only under read lock but we are only called on current * so no entry on the list will be shared. */ list_del(&task->sysvshm.shm_clist); up_read(&shm_ids(ns).rwsem); return; } /* * Destroy all already created segments, that were not yet mapped, * and mark any mapped as orphan to cover the sysctl toggling. * Destroy is skipped if shm_may_destroy() returns false. */ down_write(&shm_ids(ns).rwsem); list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) { shp->shm_creator = NULL; if (shm_may_destroy(ns, shp)) { shm_lock_by_ptr(shp); shm_destroy(ns, shp); } } /* Remove the list head from any segments still attached. */ list_del(&task->sysvshm.shm_clist); up_write(&shm_ids(ns).rwsem); } static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); return sfd->vm_ops->fault(vma, vmf); } #ifdef CONFIG_NUMA static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); int err = 0; if (sfd->vm_ops->set_policy) err = sfd->vm_ops->set_policy(vma, new); return err; } static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, unsigned long addr) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct mempolicy *pol = NULL; if (sfd->vm_ops->get_policy) pol = sfd->vm_ops->get_policy(vma, addr); else if (vma->vm_policy) pol = vma->vm_policy; return pol; } #endif static int shm_mmap(struct file *file, struct vm_area_struct *vma) { struct shm_file_data *sfd = shm_file_data(file); int ret; ret = sfd->file->f_op->mmap(sfd->file, vma); if (ret != 0) return ret; sfd->vm_ops = vma->vm_ops; #ifdef CONFIG_MMU WARN_ON(!sfd->vm_ops->fault); #endif vma->vm_ops = &shm_vm_ops; shm_open(vma); return ret; } static int shm_release(struct inode *ino, struct file *file) { struct shm_file_data *sfd = shm_file_data(file); put_ipc_ns(sfd->ns); shm_file_data(file) = NULL; kfree(sfd); return 0; } static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct shm_file_data *sfd = shm_file_data(file); if (!sfd->file->f_op->fsync) return -EINVAL; return sfd->file->f_op->fsync(sfd->file, start, end, datasync); } static long shm_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct shm_file_data *sfd = shm_file_data(file); if (!sfd->file->f_op->fallocate) return -EOPNOTSUPP; return sfd->file->f_op->fallocate(file, mode, offset, len); } static unsigned long shm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct shm_file_data *sfd = shm_file_data(file); return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, pgoff, flags); } static const struct file_operations shm_file_operations = { .mmap = shm_mmap, .fsync = shm_fsync, .release = shm_release, #ifndef CONFIG_MMU .get_unmapped_area = shm_get_unmapped_area, #endif .llseek = noop_llseek, .fallocate = shm_fallocate, }; static const struct file_operations shm_file_operations_huge = { .mmap = shm_mmap, .fsync = shm_fsync, .release = shm_release, .get_unmapped_area = shm_get_unmapped_area, .llseek = noop_llseek, .fallocate = shm_fallocate, }; int is_file_shm_hugepages(struct file *file) { return file->f_op == &shm_file_operations_huge; } static const struct vm_operations_struct shm_vm_ops = { .open = shm_open, /* callback for a new vm-area open */ .close = shm_close, /* callback for when the vm-area is released */ .fault = shm_fault, #if defined(CONFIG_NUMA) .set_policy = shm_set_policy, .get_policy = shm_get_policy, #endif }; /** * newseg - Create a new shared memory segment * @ns: namespace * @params: ptr to the structure that contains key, size and shmflg * * Called with shm_ids.rwsem held as a writer. */ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) { key_t key = params->key; int shmflg = params->flg; size_t size = params->u.size; int error; struct shmid_kernel *shp; size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; struct file *file; char name[13]; int id; vm_flags_t acctflag = 0; if (size < SHMMIN || size > ns->shm_ctlmax) return -EINVAL; if (numpages << PAGE_SHIFT < size) return -ENOSPC; if (ns->shm_tot + numpages < ns->shm_tot || ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; shp = ipc_rcu_alloc(sizeof(*shp)); if (!shp) return -ENOMEM; shp->shm_perm.key = key; shp->shm_perm.mode = (shmflg & S_IRWXUGO); shp->mlock_user = NULL; shp->shm_perm.security = NULL; error = security_shm_alloc(shp); if (error) { ipc_rcu_putref(shp, ipc_rcu_free); return error; } sprintf(name, "SYSV%08x", key); if (shmflg & SHM_HUGETLB) { struct hstate *hs; size_t hugesize; hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); if (!hs) { error = -EINVAL; goto no_file; } hugesize = ALIGN(size, huge_page_size(hs)); /* hugetlb_file_setup applies strict accounting */ if (shmflg & SHM_NORESERVE) acctflag = VM_NORESERVE; file = hugetlb_file_setup(name, hugesize, acctflag, &shp->mlock_user, HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); } else { /* * Do not allow no accounting for OVERCOMMIT_NEVER, even * if it's asked for. */ if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) acctflag = VM_NORESERVE; file = shmem_kernel_file_setup(name, size, acctflag); } error = PTR_ERR(file); if (IS_ERR(file)) goto no_file; shp->shm_cprid = task_tgid_vnr(current); shp->shm_lprid = 0; shp->shm_atim = shp->shm_dtim = 0; shp->shm_ctim = get_seconds(); shp->shm_segsz = size; shp->shm_nattch = 0; shp->shm_file = file; shp->shm_creator = current; id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); if (id < 0) { error = id; goto no_id; } list_add(&shp->shm_clist, &current->sysvshm.shm_clist); /* * shmid gets reported as "inode#" in /proc/pid/maps. * proc-ps tools use this. Changing this will break them. */ file_inode(file)->i_ino = shp->shm_perm.id; ns->shm_tot += numpages; error = shp->shm_perm.id; ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); return error; no_id: if (is_file_hugepages(file) && shp->mlock_user) user_shm_unlock(size, shp->mlock_user); fput(file); no_file: ipc_rcu_putref(shp, shm_rcu_free); return error; } /* * Called with shm_ids.rwsem and ipcp locked. */ static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); return security_shm_associate(shp, shmflg); } /* * Called with shm_ids.rwsem and ipcp locked. */ static inline int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_segsz < params->u.size) return -EINVAL; return 0; } SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) { struct ipc_namespace *ns; static const struct ipc_ops shm_ops = { .getnew = newseg, .associate = shm_security, .more_checks = shm_more_checks, }; struct ipc_params shm_params; ns = current->nsproxy->ipc_ns; shm_params.key = key; shm_params.flg = shmflg; shm_params.u.size = size; return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); } static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) { switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shmid_ds out; memset(&out, 0, sizeof(out)); ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); out.shm_segsz = in->shm_segsz; out.shm_atime = in->shm_atime; out.shm_dtime = in->shm_dtime; out.shm_ctime = in->shm_ctime; out.shm_cpid = in->shm_cpid; out.shm_lpid = in->shm_lpid; out.shm_nattch = in->shm_nattch; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } static inline unsigned long copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) { switch (version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; return 0; case IPC_OLD: { struct shmid_ds tbuf_old; if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; out->shm_perm.uid = tbuf_old.shm_perm.uid; out->shm_perm.gid = tbuf_old.shm_perm.gid; out->shm_perm.mode = tbuf_old.shm_perm.mode; return 0; } default: return -EINVAL; } } static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) { switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shminfo out; if (in->shmmax > INT_MAX) out.shmmax = INT_MAX; else out.shmmax = (int)in->shmmax; out.shmmin = in->shmmin; out.shmmni = in->shmmni; out.shmseg = in->shmseg; out.shmall = in->shmall; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } /* * Calculate and add used RSS and swap pages of a shm. * Called with shm_ids.rwsem held as a reader */ static void shm_add_rss_swap(struct shmid_kernel *shp, unsigned long *rss_add, unsigned long *swp_add) { struct inode *inode; inode = file_inode(shp->shm_file); if (is_file_hugepages(shp->shm_file)) { struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_file(shp->shm_file); *rss_add += pages_per_huge_page(h) * mapping->nrpages; } else { #ifdef CONFIG_SHMEM struct shmem_inode_info *info = SHMEM_I(inode); spin_lock(&info->lock); *rss_add += inode->i_mapping->nrpages; *swp_add += info->swapped; spin_unlock(&info->lock); #else *rss_add += inode->i_mapping->nrpages; #endif } } /* * Called with shm_ids.rwsem held as a reader */ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, unsigned long *swp) { int next_id; int total, in_use; *rss = 0; *swp = 0; in_use = shm_ids(ns).in_use; for (total = 0, next_id = 0; total < in_use; next_id++) { struct kern_ipc_perm *ipc; struct shmid_kernel *shp; ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); if (ipc == NULL) continue; shp = container_of(ipc, struct shmid_kernel, shm_perm); shm_add_rss_swap(shp, rss, swp); total++; } } /* * This function handles some shmctl commands which require the rwsem * to be held in write mode. * NOTE: no locks must be held, the rwsem is taken inside this function. */ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, struct shmid_ds __user *buf, int version) { struct kern_ipc_perm *ipcp; struct shmid64_ds shmid64; struct shmid_kernel *shp; int err; if (cmd == IPC_SET) { if (copy_shmid_from_user(&shmid64, buf, version)) return -EFAULT; } down_write(&shm_ids(ns).rwsem); rcu_read_lock(); ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0); if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); goto out_unlock1; } shp = container_of(ipcp, struct shmid_kernel, shm_perm); err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock1; switch (cmd) { case IPC_RMID: ipc_lock_object(&shp->shm_perm); /* do_shm_rmid unlocks the ipc object and rcu */ do_shm_rmid(ns, ipcp); goto out_up; case IPC_SET: ipc_lock_object(&shp->shm_perm); err = ipc_update_perm(&shmid64.shm_perm, ipcp); if (err) goto out_unlock0; shp->shm_ctim = get_seconds(); break; default: err = -EINVAL; goto out_unlock1; } out_unlock0: ipc_unlock_object(&shp->shm_perm); out_unlock1: rcu_read_unlock(); out_up: up_write(&shm_ids(ns).rwsem); return err; } static int shmctl_nolock(struct ipc_namespace *ns, int shmid, int cmd, int version, void __user *buf) { int err; struct shmid_kernel *shp; /* preliminary security checks for *_INFO */ if (cmd == IPC_INFO || cmd == SHM_INFO) { err = security_shm_shmctl(NULL, cmd); if (err) return err; } switch (cmd) { case IPC_INFO: { struct shminfo64 shminfo; memset(&shminfo, 0, sizeof(shminfo)); shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; shminfo.shmmax = ns->shm_ctlmax; shminfo.shmall = ns->shm_ctlall; shminfo.shmmin = SHMMIN; if (copy_shminfo_to_user(buf, &shminfo, version)) return -EFAULT; down_read(&shm_ids(ns).rwsem); err = ipc_get_maxid(&shm_ids(ns)); up_read(&shm_ids(ns).rwsem); if (err < 0) err = 0; goto out; } case SHM_INFO: { struct shm_info shm_info; memset(&shm_info, 0, sizeof(shm_info)); down_read(&shm_ids(ns).rwsem); shm_info.used_ids = shm_ids(ns).in_use; shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp); shm_info.shm_tot = ns->shm_tot; shm_info.swap_attempts = 0; shm_info.swap_successes = 0; err = ipc_get_maxid(&shm_ids(ns)); up_read(&shm_ids(ns).rwsem); if (copy_to_user(buf, &shm_info, sizeof(shm_info))) { err = -EFAULT; goto out; } err = err < 0 ? 0 : err; goto out; } case SHM_STAT: case IPC_STAT: { struct shmid64_ds tbuf; int result; rcu_read_lock(); if (cmd == SHM_STAT) { shp = shm_obtain_object(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } result = shp->shm_perm.id; } else { shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } result = 0; } err = -EACCES; if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) goto out_unlock; err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock; memset(&tbuf, 0, sizeof(tbuf)); kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); tbuf.shm_segsz = shp->shm_segsz; tbuf.shm_atime = shp->shm_atim; tbuf.shm_dtime = shp->shm_dtim; tbuf.shm_ctime = shp->shm_ctim; tbuf.shm_cpid = shp->shm_cprid; tbuf.shm_lpid = shp->shm_lprid; tbuf.shm_nattch = shp->shm_nattch; rcu_read_unlock(); if (copy_shmid_to_user(buf, &tbuf, version)) err = -EFAULT; else err = result; goto out; } default: return -EINVAL; } out_unlock: rcu_read_unlock(); out: return err; } SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) { struct shmid_kernel *shp; int err, version; struct ipc_namespace *ns; if (cmd < 0 || shmid < 0) return -EINVAL; version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; switch (cmd) { case IPC_INFO: case SHM_INFO: case SHM_STAT: case IPC_STAT: return shmctl_nolock(ns, shmid, cmd, version, buf); case IPC_RMID: case IPC_SET: return shmctl_down(ns, shmid, cmd, buf, version); case SHM_LOCK: case SHM_UNLOCK: { struct file *shm_file; rcu_read_lock(); shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock1; } audit_ipc_obj(&(shp->shm_perm)); err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock1; ipc_lock_object(&shp->shm_perm); /* check if shm_destroy() is tearing down shp */ if (!ipc_valid_object(&shp->shm_perm)) { err = -EIDRM; goto out_unlock0; } if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { kuid_t euid = current_euid(); if (!uid_eq(euid, shp->shm_perm.uid) && !uid_eq(euid, shp->shm_perm.cuid)) { err = -EPERM; goto out_unlock0; } if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) { err = -EPERM; goto out_unlock0; } } shm_file = shp->shm_file; if (is_file_hugepages(shm_file)) goto out_unlock0; if (cmd == SHM_LOCK) { struct user_struct *user = current_user(); err = shmem_lock(shm_file, 1, user); if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { shp->shm_perm.mode |= SHM_LOCKED; shp->mlock_user = user; } goto out_unlock0; } /* SHM_UNLOCK */ if (!(shp->shm_perm.mode & SHM_LOCKED)) goto out_unlock0; shmem_lock(shm_file, 0, shp->mlock_user); shp->shm_perm.mode &= ~SHM_LOCKED; shp->mlock_user = NULL; get_file(shm_file); ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); shmem_unlock_mapping(shm_file->f_mapping); fput(shm_file); return err; } default: return -EINVAL; } out_unlock0: ipc_unlock_object(&shp->shm_perm); out_unlock1: rcu_read_unlock(); return err; } /* * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. * * NOTE! Despite the name, this is NOT a direct system call entrypoint. The * "raddr" thing points to kernel space, and there has to be a wrapper around * this. */ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, unsigned long shmlba) { struct shmid_kernel *shp; unsigned long addr; unsigned long size; struct file *file; int err; unsigned long flags; unsigned long prot; int acc_mode; struct ipc_namespace *ns; struct shm_file_data *sfd; struct path path; fmode_t f_mode; unsigned long populate = 0; err = -EINVAL; if (shmid < 0) goto out; else if ((addr = (ulong)shmaddr)) { if (addr & (shmlba - 1)) { if (shmflg & SHM_RND) addr &= ~(shmlba - 1); /* round down */ else #ifndef __ARCH_FORCE_SHMLBA if (addr & ~PAGE_MASK) #endif goto out; } flags = MAP_SHARED | MAP_FIXED; } else { if ((shmflg & SHM_REMAP)) goto out; flags = MAP_SHARED; } if (shmflg & SHM_RDONLY) { prot = PROT_READ; acc_mode = S_IRUGO; f_mode = FMODE_READ; } else { prot = PROT_READ | PROT_WRITE; acc_mode = S_IRUGO | S_IWUGO; f_mode = FMODE_READ | FMODE_WRITE; } if (shmflg & SHM_EXEC) { prot |= PROT_EXEC; acc_mode |= S_IXUGO; } /* * We cannot rely on the fs check since SYSV IPC does have an * additional creator id... */ ns = current->nsproxy->ipc_ns; rcu_read_lock(); shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } err = -EACCES; if (ipcperms(ns, &shp->shm_perm, acc_mode)) goto out_unlock; err = security_shm_shmat(shp, shmaddr, shmflg); if (err) goto out_unlock; ipc_lock_object(&shp->shm_perm); /* check if shm_destroy() is tearing down shp */ if (!ipc_valid_object(&shp->shm_perm)) { ipc_unlock_object(&shp->shm_perm); err = -EIDRM; goto out_unlock; } path = shp->shm_file->f_path; path_get(&path); shp->shm_nattch++; size = i_size_read(d_inode(path.dentry)); ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); err = -ENOMEM; sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); if (!sfd) { path_put(&path); goto out_nattch; } file = alloc_file(&path, f_mode, is_file_hugepages(shp->shm_file) ? &shm_file_operations_huge : &shm_file_operations); err = PTR_ERR(file); if (IS_ERR(file)) { kfree(sfd); path_put(&path); goto out_nattch; } file->private_data = sfd; file->f_mapping = shp->shm_file->f_mapping; sfd->id = shp->shm_perm.id; sfd->ns = get_ipc_ns(ns); sfd->file = shp->shm_file; sfd->vm_ops = NULL; err = security_mmap_file(file, prot, flags); if (err) goto out_fput; down_write(&current->mm->mmap_sem); if (addr && !(shmflg & SHM_REMAP)) { err = -EINVAL; if (addr + size < addr) goto invalid; if (find_vma_intersection(current->mm, addr, addr + size)) goto invalid; } addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate); *raddr = addr; err = 0; if (IS_ERR_VALUE(addr)) err = (long)addr; invalid: up_write(&current->mm->mmap_sem); if (populate) mm_populate(addr, populate); out_fput: fput(file); out_nattch: down_write(&shm_ids(ns).rwsem); shp = shm_lock(ns, shmid); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); up_write(&shm_ids(ns).rwsem); return err; out_unlock: rcu_read_unlock(); out: return err; } SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) { unsigned long ret; long err; err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); if (err) return err; force_successful_syscall_return(); return (long)ret; } /* * detach and kill segment if marked destroyed. * The work is done in shm_close. */ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long addr = (unsigned long)shmaddr; int retval = -EINVAL; #ifdef CONFIG_MMU loff_t size = 0; struct file *file; struct vm_area_struct *next; #endif if (addr & ~PAGE_MASK) return retval; down_write(&mm->mmap_sem); /* * This function tries to be smart and unmap shm segments that * were modified by partial mlock or munmap calls: * - It first determines the size of the shm segment that should be * unmapped: It searches for a vma that is backed by shm and that * started at address shmaddr. It records it's size and then unmaps * it. * - Then it unmaps all shm vmas that started at shmaddr and that * are within the initially determined size and that are from the * same shm segment from which we determined the size. * Errors from do_munmap are ignored: the function only fails if * it's called with invalid parameters or if it's called to unmap * a part of a vma. Both calls in this function are for full vmas, * the parameters are directly copied from the vma itself and always * valid - therefore do_munmap cannot fail. (famous last words?) */ /* * If it had been mremap()'d, the starting address would not * match the usual checks anyway. So assume all vma's are * above the starting address given. */ vma = find_vma(mm, addr); #ifdef CONFIG_MMU while (vma) { next = vma->vm_next; /* * Check if the starting address would match, i.e. it's * a fragment created by mprotect() and/or munmap(), or it * otherwise it starts at this address with no hassles. */ if ((vma->vm_ops == &shm_vm_ops) && (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { /* * Record the file of the shm segment being * unmapped. With mremap(), someone could place * page from another segment but with equal offsets * in the range we are unmapping. */ file = vma->vm_file; size = i_size_read(file_inode(vma->vm_file)); do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); /* * We discovered the size of the shm segment, so * break out of here and fall through to the next * loop that uses the size information to stop * searching for matching vma's. */ retval = 0; vma = next; break; } vma = next; } /* * We need look no further than the maximum address a fragment * could possibly have landed at. Also cast things to loff_t to * prevent overflows and make comparisons vs. equal-width types. */ size = PAGE_ALIGN(size); while (vma && (loff_t)(vma->vm_end - addr) <= size) { next = vma->vm_next; /* finding a matching vma now does not alter retval */ if ((vma->vm_ops == &shm_vm_ops) && ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) && (vma->vm_file == file)) do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); vma = next; } #else /* CONFIG_MMU */ /* under NOMMU conditions, the exact address to be destroyed must be * given */ if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); retval = 0; } #endif up_write(&mm->mmap_sem); return retval; } #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it) { struct user_namespace *user_ns = seq_user_ns(s); struct shmid_kernel *shp = it; unsigned long rss = 0, swp = 0; shm_add_rss_swap(shp, &rss, &swp); #if BITS_PER_LONG <= 32 #define SIZE_SPEC "%10lu" #else #define SIZE_SPEC "%21lu" #endif seq_printf(s, "%10d %10d %4o " SIZE_SPEC " %5u %5u " "%5lu %5u %5u %5u %5u %10lu %10lu %10lu " SIZE_SPEC " " SIZE_SPEC "\n", shp->shm_perm.key, shp->shm_perm.id, shp->shm_perm.mode, shp->shm_segsz, shp->shm_cprid, shp->shm_lprid, shp->shm_nattch, from_kuid_munged(user_ns, shp->shm_perm.uid), from_kgid_munged(user_ns, shp->shm_perm.gid), from_kuid_munged(user_ns, shp->shm_perm.cuid), from_kgid_munged(user_ns, shp->shm_perm.cgid), shp->shm_atim, shp->shm_dtim, shp->shm_ctim, rss * PAGE_SIZE, swp * PAGE_SIZE); return 0; } #endif
./CrossVul/dataset_final_sorted/CWE-362/c/good_1757_1
crossvul-cpp_data_bad_1664_4
/* BEGIN_ICS_COPYRIGHT5 **************************************** Copyright (c) 2015, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ** END_ICS_COPYRIGHT5 ****************************************/ #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/un.h> #include <sys/stat.h> #include <signal.h> #include <unistd.h> #include "hsm_com_client_api.h" #include "hsm_config_client_api.h" #include "hsm_config_client_data.h" #include "iba/public/ibyteswap.h" #include "ssapi_internal.h" fm_mgr_config_errno_t fm_mgr_config_mgr_connect ( fm_config_conx_hdl *hdl, fm_mgr_type_t mgr ) { char s_path[256]; char c_path[256]; char *mgr_prefix; p_hsm_com_client_hdl_t *mgr_hdl; pid_t pid; memset(s_path,0,sizeof(s_path)); memset(c_path,0,sizeof(c_path)); pid = getpid(); switch ( mgr ) { case FM_MGR_SM: mgr_prefix = HSM_FM_SCK_SM; mgr_hdl = &hdl->sm_hdl; break; case FM_MGR_PM: mgr_prefix = HSM_FM_SCK_PM; mgr_hdl = &hdl->pm_hdl; break; case FM_MGR_FE: mgr_prefix = HSM_FM_SCK_FE; mgr_hdl = &hdl->fe_hdl; break; default: return FM_CONF_INIT_ERR; } // Fill in the paths for the server and client sockets. sprintf(s_path,"%s%s%d",HSM_FM_SCK_PREFIX,mgr_prefix,hdl->instance); sprintf(c_path,"%s%s%d_C_%lu",HSM_FM_SCK_PREFIX,mgr_prefix, hdl->instance, (long unsigned)pid); if ( *mgr_hdl == NULL ) { if ( hcom_client_init(mgr_hdl,s_path,c_path,32768) != HSM_COM_OK ) { return FM_CONF_INIT_ERR; } } if ( hcom_client_connect(*mgr_hdl) == HSM_COM_OK ) { hdl->conx_mask |= mgr; return FM_CONF_OK; } return FM_CONF_CONX_ERR; } // init fm_mgr_config_errno_t fm_mgr_config_init ( OUT p_fm_config_conx_hdlt *p_hdl, IN int instance, OPTIONAL IN char *rem_address, OPTIONAL IN char *community ) { fm_config_conx_hdl *hdl; fm_mgr_config_errno_t res = FM_CONF_OK; if ( (hdl = calloc(1,sizeof(fm_config_conx_hdl))) == NULL ) { res = FM_CONF_NO_MEM; goto cleanup; } hdl->instance = instance; *p_hdl = hdl; // connect to the snmp agent via localhost? if(!rem_address || (strcmp(rem_address,"localhost") == 0)) { if ( fm_mgr_config_mgr_connect(hdl, FM_MGR_SM) == FM_CONF_INIT_ERR ) { res = FM_CONF_INIT_ERR; goto cleanup; } if ( fm_mgr_config_mgr_connect(hdl, FM_MGR_PM) == FM_CONF_INIT_ERR ) { res = FM_CONF_INIT_ERR; goto cleanup; } if ( fm_mgr_config_mgr_connect(hdl, FM_MGR_FE) == FM_CONF_INIT_ERR ) { res = FM_CONF_INIT_ERR; goto cleanup; } } return res; cleanup: if ( hdl ) { free(hdl); hdl = NULL; } return res; } // connect fm_mgr_config_errno_t fm_mgr_config_connect ( IN p_fm_config_conx_hdlt p_hdl ) { fm_config_conx_hdl *hdl = p_hdl; fm_mgr_config_errno_t res = FM_CONF_OK; int fail_count = 0; if ( (res = fm_mgr_config_mgr_connect(hdl, FM_MGR_SM)) == FM_CONF_INIT_ERR ) { res = FM_CONF_INIT_ERR; goto cleanup; } if(res != FM_CONF_OK) fail_count++; if ( (res = fm_mgr_config_mgr_connect(hdl, FM_MGR_PM)) == FM_CONF_INIT_ERR ) { res = FM_CONF_INIT_ERR; goto cleanup; } if(res != FM_CONF_OK) fail_count++; if ( (res = fm_mgr_config_mgr_connect(hdl, FM_MGR_FE)) == FM_CONF_INIT_ERR ) { res = FM_CONF_INIT_ERR; goto cleanup; } if(res != FM_CONF_OK) fail_count++; if(fail_count < 4){ res = FM_CONF_OK; } cleanup: return res; } fm_mgr_config_errno_t fm_mgr_general_query ( IN p_hsm_com_client_hdl_t client_hdl, IN fm_mgr_action_t action, IN fm_datatype_t data_type_id, IN int data_len, OUT void *data, OUT fm_msg_ret_code_t *ret_code ) { fm_config_datagram_t *dg; hsm_com_datagram_t com_dg; int len; hsm_com_errno_t com_res; len = data_len + (sizeof(fm_config_datagram_header_t)); if ( (dg = malloc(len)) == NULL ) { return FM_CONF_NO_MEM; } dg->header.action = action; dg->header.data_id = data_type_id; dg->header.data_len = data_len; memcpy(&dg->data[0],data,data_len); com_dg.buf = (char*)dg; com_dg.buf_size = len; com_dg.data_len = len; if ( (com_res = hcom_client_send_data(client_hdl,60,&com_dg,&com_dg)) != HSM_COM_OK ) { free(dg); if(com_res == HSM_COM_NOT_CONNECTED) { return FM_CONF_ERR_DISC; } return FM_CONF_NO_RESP; } if ( ret_code ) *ret_code = dg->header.ret_code; if ( dg->header.ret_code != FM_RET_OK ) { free(dg); return FM_CONF_ERROR; } else { if ( dg->header.data_len != data_len ) { free(dg); if ( ret_code ) *ret_code = FM_RET_BAD_RET_LEN; return FM_CONF_ERR_LEN; } memcpy(data,&dg->data[0],data_len); } free(dg); return FM_CONF_OK; } fm_mgr_config_errno_t fm_mgr_status_query ( IN p_hsm_com_client_hdl_t client_hdl, IN fm_mgr_action_t action, IN fm_datatype_t data_type_id, IN int data_len, OUT void *data, OUT fm_msg_ret_code_t *ret_code ) { fm_config_datagram_t *dg; hsm_com_datagram_t com_dg; int len; hsm_com_errno_t com_res; len = data_len + (sizeof(fm_config_datagram_header_t)); if ( (dg = malloc(len)) == NULL ) { return FM_CONF_NO_MEM; } dg->header.action = action; dg->header.data_id = data_type_id; dg->header.data_len = data_len; memcpy(&dg->data[0],data,data_len); com_dg.buf = (char*)dg; com_dg.buf_size = len; com_dg.data_len = len; if ( (com_res = hcom_client_send_data(client_hdl,10,&com_dg,&com_dg)) != HSM_COM_OK ) { free(dg); if(com_res == HSM_COM_NOT_CONNECTED) { return FM_CONF_ERR_DISC; } return FM_CONF_NO_RESP; } if ( ret_code ) *ret_code = dg->header.ret_code; if ( dg->header.ret_code != FM_RET_OK ) { free(dg); return FM_CONF_ERROR; } else { if ( dg->header.data_len != data_len ) { free(dg); if ( ret_code ) *ret_code = FM_RET_BAD_RET_LEN; return FM_CONF_ERR_LEN; } memcpy(data,&dg->data[0],data_len); } free(dg); return FM_CONF_OK; } p_hsm_com_client_hdl_t get_mgr_hdl ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_type_t mgr ) { switch ( mgr ) { case FM_MGR_SM: return hdl->sm_hdl; case FM_MGR_PM: return hdl->pm_hdl; case FM_MGR_FE: return hdl->fe_hdl; default: return NULL; } return NULL; } /* * simple local instance only queries */ fm_mgr_config_errno_t fm_mgr_simple_query ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_action_t action, IN fm_datatype_t data_type_id, IN fm_mgr_type_t mgr, IN int data_len, OUT void *data, OUT fm_msg_ret_code_t *ret_code ) { p_hsm_com_client_hdl_t client_hdl; if ( (client_hdl = get_mgr_hdl(hdl,mgr)) != NULL ) { return fm_mgr_general_query(client_hdl,action,data_type_id,data_len,data,ret_code); } return FM_CONF_ERROR; } fm_mgr_config_errno_t fm_mgr_commong_cfg_query ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_type_t mgr, IN fm_mgr_action_t action, OUT fm_config_common_t *info, OUT fm_msg_ret_code_t *ret_code ) { p_hsm_com_client_hdl_t client_hdl; if ( (client_hdl = get_mgr_hdl(hdl,mgr)) != NULL ) { return fm_mgr_general_query(client_hdl,action,FM_DT_COMMON,sizeof(fm_config_common_t),info,ret_code); } return FM_CONF_ERROR; } fm_mgr_config_errno_t fm_mgr_fe_cfg_query ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_action_t action, OUT fe_config_t *info, OUT fm_msg_ret_code_t *ret_code ) { p_hsm_com_client_hdl_t client_hdl; if ( (client_hdl = get_mgr_hdl(hdl,FM_MGR_FE)) != NULL ) { return fm_mgr_general_query(client_hdl,action,FM_DT_FE_CFG,sizeof(fe_config_t),info,ret_code); } return FM_CONF_ERROR; } fm_mgr_config_errno_t fm_mgr_pm_cfg_query ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_action_t action, OUT pm_config_t *info, OUT fm_msg_ret_code_t *ret_code ) { p_hsm_com_client_hdl_t client_hdl; if ( (client_hdl = get_mgr_hdl(hdl,FM_MGR_PM)) != NULL ) { return fm_mgr_general_query(client_hdl,action,FM_DT_PM_CFG,sizeof(pm_config_t),info,ret_code); } return FM_CONF_ERROR; } fm_mgr_config_errno_t fm_mgr_sm_cfg_query ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_action_t action, OUT sm_config_t *info, OUT fm_msg_ret_code_t *ret_code ) { p_hsm_com_client_hdl_t client_hdl; if ( (client_hdl = get_mgr_hdl(hdl,FM_MGR_SM)) != NULL ) { return fm_mgr_general_query(client_hdl,action,FM_DT_SM_CFG,sizeof(sm_config_t),info,ret_code); } return FM_CONF_ERROR; } fm_mgr_config_errno_t fm_sm_status_query ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_action_t action, OUT fm_sm_status_t *info, OUT fm_msg_ret_code_t *ret_code ) { p_hsm_com_client_hdl_t client_hdl; if ( (client_hdl = get_mgr_hdl(hdl,FM_MGR_SM)) != NULL ) { return fm_mgr_general_query(client_hdl,action,FM_DT_SM_STATUS,sizeof(*info),info,ret_code); } return FM_CONF_ERROR; } fm_mgr_config_errno_t fm_pm_status_query ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_action_t action, OUT fm_pm_status_t *info, OUT fm_msg_ret_code_t *ret_code ) { p_hsm_com_client_hdl_t client_hdl; if ( (client_hdl = get_mgr_hdl(hdl,FM_MGR_PM)) != NULL ) { return fm_mgr_general_query(client_hdl,action,FM_DT_PM_STATUS,sizeof(*info),info,ret_code); } return FM_CONF_ERROR; } fm_mgr_config_errno_t fm_fe_status_query ( IN p_fm_config_conx_hdlt hdl, IN fm_mgr_action_t action, OUT fm_fe_status_t *info, OUT fm_msg_ret_code_t *ret_code ) { p_hsm_com_client_hdl_t client_hdl; if ( (client_hdl = get_mgr_hdl(hdl,FM_MGR_FE)) != NULL ) { return fm_mgr_general_query(client_hdl,action,FM_DT_FE_STATUS,sizeof(*info),info,ret_code); } return FM_CONF_ERROR; } const char* fm_mgr_get_error_str ( IN fm_mgr_config_errno_t err ) { switch(err){ case FM_CONF_ERR_LEN: return "Response data legth invalid"; case FM_CONF_ERR_VERSION: return "Client/Server version mismatch"; case FM_CONF_ERR_DISC: return "Not connected"; case FM_CONF_TEST: return "Test message"; case FM_CONF_OK: return "Ok"; case FM_CONF_ERROR: return "General error"; case FM_CONF_NO_RESOURCES: return "Out of resources"; case FM_CONF_NO_MEM: return "No memory"; case FM_CONF_PATH_ERR: return "Invalid path"; case FM_CONF_BAD: return "Bad argument"; case FM_CONF_BIND_ERR: return "Could not bind socket"; case FM_CONF_SOCK_ERR: return "Could not create socket"; case FM_CONF_CHMOD_ERR: return "Invalid permissions on socket"; case FM_CONF_CONX_ERR: return "Connection Error"; case FM_CONF_SEND_ERR: return "Send error"; case FM_CONF_INIT_ERR: return "Could not initalize resource"; case FM_CONF_NO_RESP: return "No Response"; case FM_CONF_MAX_ERROR_NUM: default: return "Unknown error"; } return "Unknown error"; } const char* fm_mgr_get_resp_error_str ( IN fm_msg_ret_code_t err ) { switch(err){ case FM_RET_BAD_RET_LEN: return "Return data length invalid"; case FM_RET_OK: return "Ok"; case FM_RET_DT_NOT_SUPPORTED: return "Data type not supported"; case FM_RET_ACT_NOT_SUPPORTED: return "Action not supported"; case FM_RET_INVALID: return "Invalid"; case FM_RET_BAD_LEN: return "Send data length invalid"; case FM_RET_BUSY: return "Busy"; case FM_RET_NOT_FOUND: return "Record not found"; case FM_RET_NO_NEXT: return "No next instance"; case FM_RET_NOT_MASTER: return "SM is not master"; case FM_RET_NOSUCHOBJECT: return "SNMP Err: No such object"; case FM_RET_NOSUCHINSTANCE: return "SNMP Err: No such instance"; case FM_RET_ENDOFMIBVIEW: return "SNMP Err: End of view"; case FM_RET_ERR_NOERROR: return "SNMP Err: No error"; case FM_RET_ERR_TOOBIG: return "SNMP Err: Object too big"; case FM_RET_ERR_NOSUCHNAME: return "SNMP Err: no such name"; case FM_RET_ERR_BADVALUE: return "SNMP Err: Bad value"; case FM_RET_ERR_READONLY: return "SNMP Err: Read only"; case FM_RET_ERR_GENERR: return "SNMP Err: General Error"; case FM_RET_ERR_NOACCESS: return "SNMP Err: No Access"; case FM_RET_ERR_WRONGTYPE: return "SNMP Err: Wrong Type"; case FM_RET_ERR_WRONGLENGTH: return "SNMP Err: Wrong length"; case FM_RET_ERR_WRONGENCODING: return "SNMP Err: Wrong encoding"; case FM_RET_ERR_WRONGVALUE: return "SNMP Err: Wrong Value"; case FM_RET_ERR_NOCREATION: return "SNMP Err: No Creation"; case FM_RET_ERR_INCONSISTENTVALUE: return "SNMP Err: Inconsistent value"; case FM_RET_ERR_RESOURCEUNAVAILABLE: return "SNMP Err: Resource Unavailable"; case FM_RET_ERR_COMMITFAILED: return "SNMP Err: Commit failed"; case FM_RET_ERR_UNDOFAILED: return "SNMP Err: Undo failed"; case FM_RET_ERR_AUTHORIZATIONERROR: return "SNMP Err: Authorization error"; case FM_RET_ERR_NOTWRITABLE: return "SNMP Err: Not Writable"; case FM_RET_TIMEOUT: return "SNMP Err: Timeout"; case FM_RET_UNKNOWN_DT: return "Unknown Datatype"; case FM_RET_END_OF_TABLE: return "End of Table"; case FM_RET_INTERNAL_ERR: return "Internal Error"; case FM_RET_CONX_CLOSED: return "Connection Closed"; } return "Unknown code"; } /* The current error map is copied to the pointer provided */ fm_mgr_config_errno_t fm_mgr_config_get_error_map ( IN p_fm_config_conx_hdlt hdl, OUT fm_error_map_t *error_map ) { if(error_map){ memcpy(error_map,&hdl->error_map,sizeof(fm_error_map_t)); return FM_CONF_OK; } return FM_CONF_ERROR; } fm_mgr_config_errno_t fm_mgr_config_clear_error_map ( IN p_fm_config_conx_hdlt hdl ) { if(hdl->error_map.err_set) memset(&hdl->error_map,0,sizeof(hdl->error_map)); return FM_CONF_OK; } /* The current error map is copied to the pointer provided */ fm_mgr_config_errno_t fm_mgr_config_get_error_map_entry ( IN p_fm_config_conx_hdlt hdl, IN uint64_t mask, OUT fm_mgr_config_errno_t *error_code ) { int i; for(i=0;i<64;i++){ if(mask & 0x01){ if((mask >> 1)){ return FM_CONF_ERROR; } *error_code = hdl->error_map.map[i]; return FM_CONF_OK; } mask >>= 1; } return FM_CONF_ERROR; } /* The current error map is copied to the pointer provided */ fm_mgr_config_errno_t fm_mgr_config_set_error_map_entry ( IN p_fm_config_conx_hdlt hdl, IN uint64_t mask, IN fm_mgr_config_errno_t error_code ) { int i; for(i=0;i<64;i++){ if(mask & 0x01){ if((mask >> 1)){ return FM_CONF_ERROR; } hdl->error_map.err_set = 1; hdl->error_map.map[i] = error_code; return FM_CONF_OK; } mask >>= 1; } return FM_CONF_ERROR; }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_1664_4
crossvul-cpp_data_good_1625_0
/* * Ldisc rw semaphore * * The ldisc semaphore is semantically a rw_semaphore but which enforces * an alternate policy, namely: * 1) Supports lock wait timeouts * 2) Write waiter has priority * 3) Downgrading is not supported * * Implementation notes: * 1) Upper half of semaphore count is a wait count (differs from rwsem * in that rwsem normalizes the upper half to the wait bias) * 2) Lacks overflow checking * * The generic counting was copied and modified from include/asm-generic/rwsem.h * by Paul Mackerras <paulus@samba.org>. * * The scheduling policy was copied and modified from lib/rwsem.c * Written by David Howells (dhowells@redhat.com). * * This implementation incorporates the write lock stealing work of * Michel Lespinasse <walken@google.com>. * * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com> * * This file may be redistributed under the terms of the GNU General Public * License v2. */ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/atomic.h> #include <linux/tty.h> #include <linux/sched.h> #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __acq(l, s, t, r, c, n, i) \ lock_acquire(&(l)->dep_map, s, t, r, c, n, i) # define __rel(l, n, i) \ lock_release(&(l)->dep_map, n, i) # ifdef CONFIG_PROVE_LOCKING # define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 2, NULL, i) # define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 2, n, i) # define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 2, NULL, i) # define lockdep_release(l, n, i) __rel(l, n, i) # else # define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i) # define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i) # define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i) # define lockdep_release(l, n, i) __rel(l, n, i) # endif #else # define lockdep_acquire(l, s, t, i) do { } while (0) # define lockdep_acquire_nest(l, s, t, n, i) do { } while (0) # define lockdep_acquire_read(l, s, t, i) do { } while (0) # define lockdep_release(l, n, i) do { } while (0) #endif #ifdef CONFIG_LOCK_STAT # define lock_stat(_lock, stat) lock_##stat(&(_lock)->dep_map, _RET_IP_) #else # define lock_stat(_lock, stat) do { } while (0) #endif #if BITS_PER_LONG == 64 # define LDSEM_ACTIVE_MASK 0xffffffffL #else # define LDSEM_ACTIVE_MASK 0x0000ffffL #endif #define LDSEM_UNLOCKED 0L #define LDSEM_ACTIVE_BIAS 1L #define LDSEM_WAIT_BIAS (-LDSEM_ACTIVE_MASK-1) #define LDSEM_READ_BIAS LDSEM_ACTIVE_BIAS #define LDSEM_WRITE_BIAS (LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS) struct ldsem_waiter { struct list_head list; struct task_struct *task; }; static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem) { return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); } /* * ldsem_cmpxchg() updates @*old with the last-known sem->count value. * Returns 1 if count was successfully changed; @*old will have @new value. * Returns 0 if count was not changed; @*old will have most recent sem->count */ static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem) { long tmp = atomic_long_cmpxchg(&sem->count, *old, new); if (tmp == *old) { *old = new; return 1; } else { *old = tmp; return 0; } } /* * Initialize an ldsem: */ void __init_ldsem(struct ld_semaphore *sem, const char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held semaphore: */ debug_check_no_locks_freed((void *)sem, sizeof(*sem)); lockdep_init_map(&sem->dep_map, name, key, 0); #endif sem->count = LDSEM_UNLOCKED; sem->wait_readers = 0; raw_spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->read_wait); INIT_LIST_HEAD(&sem->write_wait); } static void __ldsem_wake_readers(struct ld_semaphore *sem) { struct ldsem_waiter *waiter, *next; struct task_struct *tsk; long adjust, count; /* Try to grant read locks to all readers on the read wait list. * Note the 'active part' of the count is incremented by * the number of readers before waking any processes up. */ adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS); count = ldsem_atomic_update(adjust, sem); do { if (count > 0) break; if (ldsem_cmpxchg(&count, count - adjust, sem)) return; } while (1); list_for_each_entry_safe(waiter, next, &sem->read_wait, list) { tsk = waiter->task; smp_mb(); waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); } INIT_LIST_HEAD(&sem->read_wait); sem->wait_readers = 0; } static inline int writer_trylock(struct ld_semaphore *sem) { /* only wake this writer if the active part of the count can be * transitioned from 0 -> 1 */ long count = ldsem_atomic_update(LDSEM_ACTIVE_BIAS, sem); do { if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) return 1; if (ldsem_cmpxchg(&count, count - LDSEM_ACTIVE_BIAS, sem)) return 0; } while (1); } static void __ldsem_wake_writer(struct ld_semaphore *sem) { struct ldsem_waiter *waiter; waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list); wake_up_process(waiter->task); } /* * handle the lock release when processes blocked on it that can now run * - if we come here from up_xxxx(), then: * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) * - the spinlock must be held by the caller * - woken process blocks are discarded from the list after having task zeroed */ static void __ldsem_wake(struct ld_semaphore *sem) { if (!list_empty(&sem->write_wait)) __ldsem_wake_writer(sem); else if (!list_empty(&sem->read_wait)) __ldsem_wake_readers(sem); } static void ldsem_wake(struct ld_semaphore *sem) { unsigned long flags; raw_spin_lock_irqsave(&sem->wait_lock, flags); __ldsem_wake(sem); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); } /* * wait for the read lock to be granted */ static struct ld_semaphore __sched * down_read_failed(struct ld_semaphore *sem, long count, long timeout) { struct ldsem_waiter waiter; struct task_struct *tsk = current; long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS; /* set up my own style of waitqueue */ raw_spin_lock_irq(&sem->wait_lock); /* Try to reverse the lock attempt but if the count has changed * so that reversing fails, check if there are are no waiters, * and early-out if not */ do { if (ldsem_cmpxchg(&count, count + adjust, sem)) break; if (count > 0) { raw_spin_unlock_irq(&sem->wait_lock); return sem; } } while (1); list_add_tail(&waiter.list, &sem->read_wait); sem->wait_readers++; waiter.task = tsk; get_task_struct(tsk); /* if there are no active locks, wake the new lock owner(s) */ if ((count & LDSEM_ACTIVE_MASK) == 0) __ldsem_wake(sem); raw_spin_unlock_irq(&sem->wait_lock); /* wait to be given the lock */ for (;;) { set_task_state(tsk, TASK_UNINTERRUPTIBLE); if (!waiter.task) break; if (!timeout) break; timeout = schedule_timeout(timeout); } __set_task_state(tsk, TASK_RUNNING); if (!timeout) { /* lock timed out but check if this task was just * granted lock ownership - if so, pretend there * was no timeout; otherwise, cleanup lock wait */ raw_spin_lock_irq(&sem->wait_lock); if (waiter.task) { ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem); list_del(&waiter.list); raw_spin_unlock_irq(&sem->wait_lock); put_task_struct(waiter.task); return NULL; } raw_spin_unlock_irq(&sem->wait_lock); } return sem; } /* * wait for the write lock to be granted */ static struct ld_semaphore __sched * down_write_failed(struct ld_semaphore *sem, long count, long timeout) { struct ldsem_waiter waiter; struct task_struct *tsk = current; long adjust = -LDSEM_ACTIVE_BIAS; int locked = 0; /* set up my own style of waitqueue */ raw_spin_lock_irq(&sem->wait_lock); /* Try to reverse the lock attempt but if the count has changed * so that reversing fails, check if the lock is now owned, * and early-out if so */ do { if (ldsem_cmpxchg(&count, count + adjust, sem)) break; if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) { raw_spin_unlock_irq(&sem->wait_lock); return sem; } } while (1); list_add_tail(&waiter.list, &sem->write_wait); waiter.task = tsk; set_task_state(tsk, TASK_UNINTERRUPTIBLE); for (;;) { if (!timeout) break; raw_spin_unlock_irq(&sem->wait_lock); timeout = schedule_timeout(timeout); raw_spin_lock_irq(&sem->wait_lock); set_task_state(tsk, TASK_UNINTERRUPTIBLE); if ((locked = writer_trylock(sem))) break; } if (!locked) ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem); list_del(&waiter.list); raw_spin_unlock_irq(&sem->wait_lock); __set_task_state(tsk, TASK_RUNNING); /* lock wait may have timed out */ if (!locked) return NULL; return sem; } static inline int __ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout) { long count; lockdep_acquire_read(sem, subclass, 0, _RET_IP_); count = ldsem_atomic_update(LDSEM_READ_BIAS, sem); if (count <= 0) { lock_stat(sem, contended); if (!down_read_failed(sem, count, timeout)) { lockdep_release(sem, 1, _RET_IP_); return 0; } } lock_stat(sem, acquired); return 1; } static inline int __ldsem_down_write_nested(struct ld_semaphore *sem, int subclass, long timeout) { long count; lockdep_acquire(sem, subclass, 0, _RET_IP_); count = ldsem_atomic_update(LDSEM_WRITE_BIAS, sem); if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) { lock_stat(sem, contended); if (!down_write_failed(sem, count, timeout)) { lockdep_release(sem, 1, _RET_IP_); return 0; } } lock_stat(sem, acquired); return 1; } /* * lock for reading -- returns 1 if successful, 0 if timed out */ int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout) { might_sleep(); return __ldsem_down_read_nested(sem, 0, timeout); } /* * trylock for reading -- returns 1 if successful, 0 if contention */ int ldsem_down_read_trylock(struct ld_semaphore *sem) { long count = sem->count; while (count >= 0) { if (ldsem_cmpxchg(&count, count + LDSEM_READ_BIAS, sem)) { lockdep_acquire_read(sem, 0, 1, _RET_IP_); lock_stat(sem, acquired); return 1; } } return 0; } /* * lock for writing -- returns 1 if successful, 0 if timed out */ int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout) { might_sleep(); return __ldsem_down_write_nested(sem, 0, timeout); } /* * trylock for writing -- returns 1 if successful, 0 if contention */ int ldsem_down_write_trylock(struct ld_semaphore *sem) { long count = sem->count; while ((count & LDSEM_ACTIVE_MASK) == 0) { if (ldsem_cmpxchg(&count, count + LDSEM_WRITE_BIAS, sem)) { lockdep_acquire(sem, 0, 1, _RET_IP_); lock_stat(sem, acquired); return 1; } } return 0; } /* * release a read lock */ void ldsem_up_read(struct ld_semaphore *sem) { long count; lockdep_release(sem, 1, _RET_IP_); count = ldsem_atomic_update(-LDSEM_READ_BIAS, sem); if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0) ldsem_wake(sem); } /* * release a write lock */ void ldsem_up_write(struct ld_semaphore *sem) { long count; lockdep_release(sem, 1, _RET_IP_); count = ldsem_atomic_update(-LDSEM_WRITE_BIAS, sem); if (count < 0) ldsem_wake(sem); } #ifdef CONFIG_DEBUG_LOCK_ALLOC int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout) { might_sleep(); return __ldsem_down_read_nested(sem, subclass, timeout); } int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass, long timeout) { might_sleep(); return __ldsem_down_write_nested(sem, subclass, timeout); } #endif
./CrossVul/dataset_final_sorted/CWE-362/c/good_1625_0
crossvul-cpp_data_bad_5829_0
/* * linux/ipc/shm.c * Copyright (C) 1992, 1993 Krishna Balasubramanian * Many improvements/fixes by Bruno Haible. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. * * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> * BIGMEM support, Andrea Arcangeli <andrea@suse.de> * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> * HIGHMEM support, Ingo Molnar <mingo@redhat.com> * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> * * support for audit of ipc object properties and permission changes * Dustin Kirkland <dustin.kirkland@us.ibm.com> * * namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> * * Better ipc lock (kern_ipc_perm.lock) handling * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013. */ #include <linux/slab.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/shm.h> #include <linux/init.h> #include <linux/file.h> #include <linux/mman.h> #include <linux/shmem_fs.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/capability.h> #include <linux/ptrace.h> #include <linux/seq_file.h> #include <linux/rwsem.h> #include <linux/nsproxy.h> #include <linux/mount.h> #include <linux/ipc_namespace.h> #include <asm/uaccess.h> #include "util.h" struct shm_file_data { int id; struct ipc_namespace *ns; struct file *file; const struct vm_operations_struct *vm_ops; }; #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) static const struct file_operations shm_file_operations; static const struct vm_operations_struct shm_vm_ops; #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) #define shm_unlock(shp) \ ipc_unlock(&(shp)->shm_perm) static int newseg(struct ipc_namespace *, struct ipc_params *); static void shm_open(struct vm_area_struct *vma); static void shm_close(struct vm_area_struct *vma); static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it); #endif void shm_init_ns(struct ipc_namespace *ns) { ns->shm_ctlmax = SHMMAX; ns->shm_ctlall = SHMALL; ns->shm_ctlmni = SHMMNI; ns->shm_rmid_forced = 0; ns->shm_tot = 0; ipc_init_ids(&shm_ids(ns)); } /* * Called with shm_ids.rwsem (writer) and the shp structure locked. * Only shm_ids.rwsem remains locked on exit. */ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_nattch){ shp->shm_perm.mode |= SHM_DEST; /* Do not find it any more */ shp->shm_perm.key = IPC_PRIVATE; shm_unlock(shp); } else shm_destroy(ns, shp); } #ifdef CONFIG_IPC_NS void shm_exit_ns(struct ipc_namespace *ns) { free_ipcs(ns, &shm_ids(ns), do_shm_rmid); idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); } #endif static int __init ipc_ns_init(void) { shm_init_ns(&init_ipc_ns); return 0; } pure_initcall(ipc_ns_init); void __init shm_init (void) { ipc_init_proc_interface("sysvipc/shm", #if BITS_PER_LONG <= 32 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", #else " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", #endif IPC_SHM_IDS, sysvipc_shm_proc_show); } static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct shmid_kernel, shm_perm); } static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct shmid_kernel, shm_perm); } /* * shm_lock_(check_) routines are called in the paths where the rwsem * is not necessarily held. */ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); if (IS_ERR(ipcp)) return (struct shmid_kernel *)ipcp; return container_of(ipcp, struct shmid_kernel, shm_perm); } static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) { rcu_read_lock(); ipc_lock_object(&ipcp->shm_perm); } static void shm_rcu_free(struct rcu_head *head) { struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); struct shmid_kernel *shp = ipc_rcu_to_struct(p); security_shm_free(shp); ipc_rcu_free(head); } static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) { ipc_rmid(&shm_ids(ns), &s->shm_perm); } /* This is called by fork, once for every shm attach. */ static void shm_open(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; shp = shm_lock(sfd->ns, sfd->id); BUG_ON(IS_ERR(shp)); shp->shm_atim = get_seconds(); shp->shm_lprid = task_tgid_vnr(current); shp->shm_nattch++; shm_unlock(shp); } /* * shm_destroy - free the struct shmid_kernel * * @ns: namespace * @shp: struct to free * * It has to be called with shp and shm_ids.rwsem (writer) locked, * but returns with shp unlocked and freed. */ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; shm_rmid(ns, shp); shm_unlock(shp); if (!is_file_hugepages(shp->shm_file)) shmem_lock(shp->shm_file, 0, shp->mlock_user); else if (shp->mlock_user) user_shm_unlock(file_inode(shp->shm_file)->i_size, shp->mlock_user); fput (shp->shm_file); ipc_rcu_putref(shp, shm_rcu_free); } /* * shm_may_destroy - identifies whether shm segment should be destroyed now * * Returns true if and only if there are no active users of the segment and * one of the following is true: * * 1) shmctl(id, IPC_RMID, NULL) was called for this shp * * 2) sysctl kernel.shm_rmid_forced is set to 1. */ static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { return (shp->shm_nattch == 0) && (ns->shm_rmid_forced || (shp->shm_perm.mode & SHM_DEST)); } /* * remove the attach descriptor vma. * free memory for segment if it is marked destroyed. * The descriptor has already been removed from the current->mm->mmap list * and will later be kfree()d. */ static void shm_close(struct vm_area_struct *vma) { struct file * file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; struct ipc_namespace *ns = sfd->ns; down_write(&shm_ids(ns).rwsem); /* remove from the list of attaches of the shm segment */ shp = shm_lock(ns, sfd->id); BUG_ON(IS_ERR(shp)); shp->shm_lprid = task_tgid_vnr(current); shp->shm_dtim = get_seconds(); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); up_write(&shm_ids(ns).rwsem); } /* Called with ns->shm_ids(ns).rwsem locked */ static int shm_try_destroy_current(int id, void *p, void *data) { struct ipc_namespace *ns = data; struct kern_ipc_perm *ipcp = p; struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_creator != current) return 0; /* * Mark it as orphaned to destroy the segment when * kernel.shm_rmid_forced is changed. * It is noop if the following shm_may_destroy() returns true. */ shp->shm_creator = NULL; /* * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID * is not set, it shouldn't be deleted here. */ if (!ns->shm_rmid_forced) return 0; if (shm_may_destroy(ns, shp)) { shm_lock_by_ptr(shp); shm_destroy(ns, shp); } return 0; } /* Called with ns->shm_ids(ns).rwsem locked */ static int shm_try_destroy_orphaned(int id, void *p, void *data) { struct ipc_namespace *ns = data; struct kern_ipc_perm *ipcp = p; struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); /* * We want to destroy segments without users and with already * exit'ed originating process. * * As shp->* are changed under rwsem, it's safe to skip shp locking. */ if (shp->shm_creator != NULL) return 0; if (shm_may_destroy(ns, shp)) { shm_lock_by_ptr(shp); shm_destroy(ns, shp); } return 0; } void shm_destroy_orphaned(struct ipc_namespace *ns) { down_write(&shm_ids(ns).rwsem); if (shm_ids(ns).in_use) idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); up_write(&shm_ids(ns).rwsem); } void exit_shm(struct task_struct *task) { struct ipc_namespace *ns = task->nsproxy->ipc_ns; if (shm_ids(ns).in_use == 0) return; /* Destroy all already created segments, but not mapped yet */ down_write(&shm_ids(ns).rwsem); if (shm_ids(ns).in_use) idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns); up_write(&shm_ids(ns).rwsem); } static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); return sfd->vm_ops->fault(vma, vmf); } #ifdef CONFIG_NUMA static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); int err = 0; if (sfd->vm_ops->set_policy) err = sfd->vm_ops->set_policy(vma, new); return err; } static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, unsigned long addr) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct mempolicy *pol = NULL; if (sfd->vm_ops->get_policy) pol = sfd->vm_ops->get_policy(vma, addr); else if (vma->vm_policy) pol = vma->vm_policy; return pol; } #endif static int shm_mmap(struct file * file, struct vm_area_struct * vma) { struct shm_file_data *sfd = shm_file_data(file); int ret; ret = sfd->file->f_op->mmap(sfd->file, vma); if (ret != 0) return ret; sfd->vm_ops = vma->vm_ops; #ifdef CONFIG_MMU BUG_ON(!sfd->vm_ops->fault); #endif vma->vm_ops = &shm_vm_ops; shm_open(vma); return ret; } static int shm_release(struct inode *ino, struct file *file) { struct shm_file_data *sfd = shm_file_data(file); put_ipc_ns(sfd->ns); shm_file_data(file) = NULL; kfree(sfd); return 0; } static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct shm_file_data *sfd = shm_file_data(file); if (!sfd->file->f_op->fsync) return -EINVAL; return sfd->file->f_op->fsync(sfd->file, start, end, datasync); } static long shm_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct shm_file_data *sfd = shm_file_data(file); if (!sfd->file->f_op->fallocate) return -EOPNOTSUPP; return sfd->file->f_op->fallocate(file, mode, offset, len); } static unsigned long shm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct shm_file_data *sfd = shm_file_data(file); return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, pgoff, flags); } static const struct file_operations shm_file_operations = { .mmap = shm_mmap, .fsync = shm_fsync, .release = shm_release, #ifndef CONFIG_MMU .get_unmapped_area = shm_get_unmapped_area, #endif .llseek = noop_llseek, .fallocate = shm_fallocate, }; static const struct file_operations shm_file_operations_huge = { .mmap = shm_mmap, .fsync = shm_fsync, .release = shm_release, .get_unmapped_area = shm_get_unmapped_area, .llseek = noop_llseek, .fallocate = shm_fallocate, }; int is_file_shm_hugepages(struct file *file) { return file->f_op == &shm_file_operations_huge; } static const struct vm_operations_struct shm_vm_ops = { .open = shm_open, /* callback for a new vm-area open */ .close = shm_close, /* callback for when the vm-area is released */ .fault = shm_fault, #if defined(CONFIG_NUMA) .set_policy = shm_set_policy, .get_policy = shm_get_policy, #endif }; /** * newseg - Create a new shared memory segment * @ns: namespace * @params: ptr to the structure that contains key, size and shmflg * * Called with shm_ids.rwsem held as a writer. */ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) { key_t key = params->key; int shmflg = params->flg; size_t size = params->u.size; int error; struct shmid_kernel *shp; size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; struct file * file; char name[13]; int id; vm_flags_t acctflag = 0; if (size < SHMMIN || size > ns->shm_ctlmax) return -EINVAL; if (ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; shp = ipc_rcu_alloc(sizeof(*shp)); if (!shp) return -ENOMEM; shp->shm_perm.key = key; shp->shm_perm.mode = (shmflg & S_IRWXUGO); shp->mlock_user = NULL; shp->shm_perm.security = NULL; error = security_shm_alloc(shp); if (error) { ipc_rcu_putref(shp, ipc_rcu_free); return error; } sprintf (name, "SYSV%08x", key); if (shmflg & SHM_HUGETLB) { struct hstate *hs; size_t hugesize; hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); if (!hs) { error = -EINVAL; goto no_file; } hugesize = ALIGN(size, huge_page_size(hs)); /* hugetlb_file_setup applies strict accounting */ if (shmflg & SHM_NORESERVE) acctflag = VM_NORESERVE; file = hugetlb_file_setup(name, hugesize, acctflag, &shp->mlock_user, HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); } else { /* * Do not allow no accounting for OVERCOMMIT_NEVER, even * if it's asked for. */ if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) acctflag = VM_NORESERVE; file = shmem_file_setup(name, size, acctflag); } error = PTR_ERR(file); if (IS_ERR(file)) goto no_file; id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); if (id < 0) { error = id; goto no_id; } shp->shm_cprid = task_tgid_vnr(current); shp->shm_lprid = 0; shp->shm_atim = shp->shm_dtim = 0; shp->shm_ctim = get_seconds(); shp->shm_segsz = size; shp->shm_nattch = 0; shp->shm_file = file; shp->shm_creator = current; /* * shmid gets reported as "inode#" in /proc/pid/maps. * proc-ps tools use this. Changing this will break them. */ file_inode(file)->i_ino = shp->shm_perm.id; ns->shm_tot += numpages; error = shp->shm_perm.id; ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); return error; no_id: if (is_file_hugepages(file) && shp->mlock_user) user_shm_unlock(size, shp->mlock_user); fput(file); no_file: ipc_rcu_putref(shp, shm_rcu_free); return error; } /* * Called with shm_ids.rwsem and ipcp locked. */ static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); return security_shm_associate(shp, shmflg); } /* * Called with shm_ids.rwsem and ipcp locked. */ static inline int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_segsz < params->u.size) return -EINVAL; return 0; } SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) { struct ipc_namespace *ns; struct ipc_ops shm_ops; struct ipc_params shm_params; ns = current->nsproxy->ipc_ns; shm_ops.getnew = newseg; shm_ops.associate = shm_security; shm_ops.more_checks = shm_more_checks; shm_params.key = key; shm_params.flg = shmflg; shm_params.u.size = size; return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); } static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) { switch(version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shmid_ds out; memset(&out, 0, sizeof(out)); ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); out.shm_segsz = in->shm_segsz; out.shm_atime = in->shm_atime; out.shm_dtime = in->shm_dtime; out.shm_ctime = in->shm_ctime; out.shm_cpid = in->shm_cpid; out.shm_lpid = in->shm_lpid; out.shm_nattch = in->shm_nattch; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } static inline unsigned long copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) { switch(version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; return 0; case IPC_OLD: { struct shmid_ds tbuf_old; if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; out->shm_perm.uid = tbuf_old.shm_perm.uid; out->shm_perm.gid = tbuf_old.shm_perm.gid; out->shm_perm.mode = tbuf_old.shm_perm.mode; return 0; } default: return -EINVAL; } } static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) { switch(version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shminfo out; if(in->shmmax > INT_MAX) out.shmmax = INT_MAX; else out.shmmax = (int)in->shmmax; out.shmmin = in->shmmin; out.shmmni = in->shmmni; out.shmseg = in->shmseg; out.shmall = in->shmall; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } /* * Calculate and add used RSS and swap pages of a shm. * Called with shm_ids.rwsem held as a reader */ static void shm_add_rss_swap(struct shmid_kernel *shp, unsigned long *rss_add, unsigned long *swp_add) { struct inode *inode; inode = file_inode(shp->shm_file); if (is_file_hugepages(shp->shm_file)) { struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_file(shp->shm_file); *rss_add += pages_per_huge_page(h) * mapping->nrpages; } else { #ifdef CONFIG_SHMEM struct shmem_inode_info *info = SHMEM_I(inode); spin_lock(&info->lock); *rss_add += inode->i_mapping->nrpages; *swp_add += info->swapped; spin_unlock(&info->lock); #else *rss_add += inode->i_mapping->nrpages; #endif } } /* * Called with shm_ids.rwsem held as a reader */ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, unsigned long *swp) { int next_id; int total, in_use; *rss = 0; *swp = 0; in_use = shm_ids(ns).in_use; for (total = 0, next_id = 0; total < in_use; next_id++) { struct kern_ipc_perm *ipc; struct shmid_kernel *shp; ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); if (ipc == NULL) continue; shp = container_of(ipc, struct shmid_kernel, shm_perm); shm_add_rss_swap(shp, rss, swp); total++; } } /* * This function handles some shmctl commands which require the rwsem * to be held in write mode. * NOTE: no locks must be held, the rwsem is taken inside this function. */ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, struct shmid_ds __user *buf, int version) { struct kern_ipc_perm *ipcp; struct shmid64_ds shmid64; struct shmid_kernel *shp; int err; if (cmd == IPC_SET) { if (copy_shmid_from_user(&shmid64, buf, version)) return -EFAULT; } down_write(&shm_ids(ns).rwsem); rcu_read_lock(); ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0); if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); goto out_unlock1; } shp = container_of(ipcp, struct shmid_kernel, shm_perm); err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock1; switch (cmd) { case IPC_RMID: ipc_lock_object(&shp->shm_perm); /* do_shm_rmid unlocks the ipc object and rcu */ do_shm_rmid(ns, ipcp); goto out_up; case IPC_SET: ipc_lock_object(&shp->shm_perm); err = ipc_update_perm(&shmid64.shm_perm, ipcp); if (err) goto out_unlock0; shp->shm_ctim = get_seconds(); break; default: err = -EINVAL; goto out_unlock1; } out_unlock0: ipc_unlock_object(&shp->shm_perm); out_unlock1: rcu_read_unlock(); out_up: up_write(&shm_ids(ns).rwsem); return err; } static int shmctl_nolock(struct ipc_namespace *ns, int shmid, int cmd, int version, void __user *buf) { int err; struct shmid_kernel *shp; /* preliminary security checks for *_INFO */ if (cmd == IPC_INFO || cmd == SHM_INFO) { err = security_shm_shmctl(NULL, cmd); if (err) return err; } switch (cmd) { case IPC_INFO: { struct shminfo64 shminfo; memset(&shminfo, 0, sizeof(shminfo)); shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; shminfo.shmmax = ns->shm_ctlmax; shminfo.shmall = ns->shm_ctlall; shminfo.shmmin = SHMMIN; if(copy_shminfo_to_user (buf, &shminfo, version)) return -EFAULT; down_read(&shm_ids(ns).rwsem); err = ipc_get_maxid(&shm_ids(ns)); up_read(&shm_ids(ns).rwsem); if(err<0) err = 0; goto out; } case SHM_INFO: { struct shm_info shm_info; memset(&shm_info, 0, sizeof(shm_info)); down_read(&shm_ids(ns).rwsem); shm_info.used_ids = shm_ids(ns).in_use; shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); shm_info.shm_tot = ns->shm_tot; shm_info.swap_attempts = 0; shm_info.swap_successes = 0; err = ipc_get_maxid(&shm_ids(ns)); up_read(&shm_ids(ns).rwsem); if (copy_to_user(buf, &shm_info, sizeof(shm_info))) { err = -EFAULT; goto out; } err = err < 0 ? 0 : err; goto out; } case SHM_STAT: case IPC_STAT: { struct shmid64_ds tbuf; int result; rcu_read_lock(); if (cmd == SHM_STAT) { shp = shm_obtain_object(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } result = shp->shm_perm.id; } else { shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } result = 0; } err = -EACCES; if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) goto out_unlock; err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock; memset(&tbuf, 0, sizeof(tbuf)); kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); tbuf.shm_segsz = shp->shm_segsz; tbuf.shm_atime = shp->shm_atim; tbuf.shm_dtime = shp->shm_dtim; tbuf.shm_ctime = shp->shm_ctim; tbuf.shm_cpid = shp->shm_cprid; tbuf.shm_lpid = shp->shm_lprid; tbuf.shm_nattch = shp->shm_nattch; rcu_read_unlock(); if (copy_shmid_to_user(buf, &tbuf, version)) err = -EFAULT; else err = result; goto out; } default: return -EINVAL; } out_unlock: rcu_read_unlock(); out: return err; } SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) { struct shmid_kernel *shp; int err, version; struct ipc_namespace *ns; if (cmd < 0 || shmid < 0) return -EINVAL; version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; switch (cmd) { case IPC_INFO: case SHM_INFO: case SHM_STAT: case IPC_STAT: return shmctl_nolock(ns, shmid, cmd, version, buf); case IPC_RMID: case IPC_SET: return shmctl_down(ns, shmid, cmd, buf, version); case SHM_LOCK: case SHM_UNLOCK: { struct file *shm_file; rcu_read_lock(); shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock1; } audit_ipc_obj(&(shp->shm_perm)); err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock1; ipc_lock_object(&shp->shm_perm); if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { kuid_t euid = current_euid(); err = -EPERM; if (!uid_eq(euid, shp->shm_perm.uid) && !uid_eq(euid, shp->shm_perm.cuid)) goto out_unlock0; if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) goto out_unlock0; } shm_file = shp->shm_file; if (is_file_hugepages(shm_file)) goto out_unlock0; if (cmd == SHM_LOCK) { struct user_struct *user = current_user(); err = shmem_lock(shm_file, 1, user); if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { shp->shm_perm.mode |= SHM_LOCKED; shp->mlock_user = user; } goto out_unlock0; } /* SHM_UNLOCK */ if (!(shp->shm_perm.mode & SHM_LOCKED)) goto out_unlock0; shmem_lock(shm_file, 0, shp->mlock_user); shp->shm_perm.mode &= ~SHM_LOCKED; shp->mlock_user = NULL; get_file(shm_file); ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); shmem_unlock_mapping(shm_file->f_mapping); fput(shm_file); return err; } default: return -EINVAL; } out_unlock0: ipc_unlock_object(&shp->shm_perm); out_unlock1: rcu_read_unlock(); return err; } /* * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. * * NOTE! Despite the name, this is NOT a direct system call entrypoint. The * "raddr" thing points to kernel space, and there has to be a wrapper around * this. */ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, unsigned long shmlba) { struct shmid_kernel *shp; unsigned long addr; unsigned long size; struct file * file; int err; unsigned long flags; unsigned long prot; int acc_mode; struct ipc_namespace *ns; struct shm_file_data *sfd; struct path path; fmode_t f_mode; unsigned long populate = 0; err = -EINVAL; if (shmid < 0) goto out; else if ((addr = (ulong)shmaddr)) { if (addr & (shmlba - 1)) { if (shmflg & SHM_RND) addr &= ~(shmlba - 1); /* round down */ else #ifndef __ARCH_FORCE_SHMLBA if (addr & ~PAGE_MASK) #endif goto out; } flags = MAP_SHARED | MAP_FIXED; } else { if ((shmflg & SHM_REMAP)) goto out; flags = MAP_SHARED; } if (shmflg & SHM_RDONLY) { prot = PROT_READ; acc_mode = S_IRUGO; f_mode = FMODE_READ; } else { prot = PROT_READ | PROT_WRITE; acc_mode = S_IRUGO | S_IWUGO; f_mode = FMODE_READ | FMODE_WRITE; } if (shmflg & SHM_EXEC) { prot |= PROT_EXEC; acc_mode |= S_IXUGO; } /* * We cannot rely on the fs check since SYSV IPC does have an * additional creator id... */ ns = current->nsproxy->ipc_ns; rcu_read_lock(); shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } err = -EACCES; if (ipcperms(ns, &shp->shm_perm, acc_mode)) goto out_unlock; err = security_shm_shmat(shp, shmaddr, shmflg); if (err) goto out_unlock; ipc_lock_object(&shp->shm_perm); path = shp->shm_file->f_path; path_get(&path); shp->shm_nattch++; size = i_size_read(path.dentry->d_inode); ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); err = -ENOMEM; sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); if (!sfd) { path_put(&path); goto out_nattch; } file = alloc_file(&path, f_mode, is_file_hugepages(shp->shm_file) ? &shm_file_operations_huge : &shm_file_operations); err = PTR_ERR(file); if (IS_ERR(file)) { kfree(sfd); path_put(&path); goto out_nattch; } file->private_data = sfd; file->f_mapping = shp->shm_file->f_mapping; sfd->id = shp->shm_perm.id; sfd->ns = get_ipc_ns(ns); sfd->file = shp->shm_file; sfd->vm_ops = NULL; err = security_mmap_file(file, prot, flags); if (err) goto out_fput; down_write(&current->mm->mmap_sem); if (addr && !(shmflg & SHM_REMAP)) { err = -EINVAL; if (find_vma_intersection(current->mm, addr, addr + size)) goto invalid; /* * If shm segment goes below stack, make sure there is some * space left for the stack to grow (at least 4 pages). */ if (addr < current->mm->start_stack && addr > current->mm->start_stack - size - PAGE_SIZE * 5) goto invalid; } addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate); *raddr = addr; err = 0; if (IS_ERR_VALUE(addr)) err = (long)addr; invalid: up_write(&current->mm->mmap_sem); if (populate) mm_populate(addr, populate); out_fput: fput(file); out_nattch: down_write(&shm_ids(ns).rwsem); shp = shm_lock(ns, shmid); BUG_ON(IS_ERR(shp)); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); up_write(&shm_ids(ns).rwsem); return err; out_unlock: rcu_read_unlock(); out: return err; } SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) { unsigned long ret; long err; err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); if (err) return err; force_successful_syscall_return(); return (long)ret; } /* * detach and kill segment if marked destroyed. * The work is done in shm_close. */ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long addr = (unsigned long)shmaddr; int retval = -EINVAL; #ifdef CONFIG_MMU loff_t size = 0; struct vm_area_struct *next; #endif if (addr & ~PAGE_MASK) return retval; down_write(&mm->mmap_sem); /* * This function tries to be smart and unmap shm segments that * were modified by partial mlock or munmap calls: * - It first determines the size of the shm segment that should be * unmapped: It searches for a vma that is backed by shm and that * started at address shmaddr. It records it's size and then unmaps * it. * - Then it unmaps all shm vmas that started at shmaddr and that * are within the initially determined size. * Errors from do_munmap are ignored: the function only fails if * it's called with invalid parameters or if it's called to unmap * a part of a vma. Both calls in this function are for full vmas, * the parameters are directly copied from the vma itself and always * valid - therefore do_munmap cannot fail. (famous last words?) */ /* * If it had been mremap()'d, the starting address would not * match the usual checks anyway. So assume all vma's are * above the starting address given. */ vma = find_vma(mm, addr); #ifdef CONFIG_MMU while (vma) { next = vma->vm_next; /* * Check if the starting address would match, i.e. it's * a fragment created by mprotect() and/or munmap(), or it * otherwise it starts at this address with no hassles. */ if ((vma->vm_ops == &shm_vm_ops) && (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { size = file_inode(vma->vm_file)->i_size; do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); /* * We discovered the size of the shm segment, so * break out of here and fall through to the next * loop that uses the size information to stop * searching for matching vma's. */ retval = 0; vma = next; break; } vma = next; } /* * We need look no further than the maximum address a fragment * could possibly have landed at. Also cast things to loff_t to * prevent overflows and make comparisons vs. equal-width types. */ size = PAGE_ALIGN(size); while (vma && (loff_t)(vma->vm_end - addr) <= size) { next = vma->vm_next; /* finding a matching vma now does not alter retval */ if ((vma->vm_ops == &shm_vm_ops) && (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); vma = next; } #else /* CONFIG_MMU */ /* under NOMMU conditions, the exact address to be destroyed must be * given */ if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); retval = 0; } #endif up_write(&mm->mmap_sem); return retval; } #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it) { struct user_namespace *user_ns = seq_user_ns(s); struct shmid_kernel *shp = it; unsigned long rss = 0, swp = 0; shm_add_rss_swap(shp, &rss, &swp); #if BITS_PER_LONG <= 32 #define SIZE_SPEC "%10lu" #else #define SIZE_SPEC "%21lu" #endif return seq_printf(s, "%10d %10d %4o " SIZE_SPEC " %5u %5u " "%5lu %5u %5u %5u %5u %10lu %10lu %10lu " SIZE_SPEC " " SIZE_SPEC "\n", shp->shm_perm.key, shp->shm_perm.id, shp->shm_perm.mode, shp->shm_segsz, shp->shm_cprid, shp->shm_lprid, shp->shm_nattch, from_kuid_munged(user_ns, shp->shm_perm.uid), from_kgid_munged(user_ns, shp->shm_perm.gid), from_kuid_munged(user_ns, shp->shm_perm.cuid), from_kgid_munged(user_ns, shp->shm_perm.cgid), shp->shm_atim, shp->shm_dtim, shp->shm_ctim, rss * PAGE_SIZE, swp * PAGE_SIZE); } #endif
./CrossVul/dataset_final_sorted/CWE-362/c/bad_5829_0
crossvul-cpp_data_good_4726_0
/* * Kernel-based Virtual Machine driver for Linux * * derived from drivers/kvm/kvm_main.c * * Copyright (C) 2006 Qumranet, Inc. * Copyright (C) 2008 Qumranet, Inc. * Copyright IBM Corporation, 2008 * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * Amit Shah <amit.shah@qumranet.com> * Ben-Ami Yassour <benami@il.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include <linux/kvm_host.h> #include "irq.h" #include "mmu.h" #include "i8254.h" #include "tss.h" #include "kvm_cache_regs.h" #include "x86.h" #include <linux/clocksource.h> #include <linux/interrupt.h> #include <linux/kvm.h> #include <linux/fs.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/mman.h> #include <linux/highmem.h> #include <linux/iommu.h> #include <linux/intel-iommu.h> #include <linux/cpufreq.h> #include <linux/user-return-notifier.h> #include <linux/srcu.h> #include <linux/slab.h> #include <linux/perf_event.h> #include <linux/uaccess.h> #include <linux/hash.h> #include <trace/events/kvm.h> #define CREATE_TRACE_POINTS #include "trace.h" #include <asm/debugreg.h> #include <asm/msr.h> #include <asm/desc.h> #include <asm/mtrr.h> #include <asm/mce.h> #include <asm/i387.h> #include <asm/xcr.h> #include <asm/pvclock.h> #include <asm/div64.h> #define MAX_IO_MSRS 256 #define CR0_RESERVED_BITS \ (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) #define CR4_RESERVED_BITS \ (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \ | X86_CR4_OSXSAVE \ | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) #define KVM_MAX_MCE_BANKS 32 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P) /* EFER defaults: * - enable syscall per default because its emulated by KVM * - enable LME and LMA per default on 64 bit KVM */ #ifdef CONFIG_X86_64 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL; #else static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL; #endif #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU static void update_cr8_intercept(struct kvm_vcpu *vcpu); static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries); struct kvm_x86_ops *kvm_x86_ops; EXPORT_SYMBOL_GPL(kvm_x86_ops); int ignore_msrs = 0; module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR); #define KVM_NR_SHARED_MSRS 16 struct kvm_shared_msrs_global { int nr; u32 msrs[KVM_NR_SHARED_MSRS]; }; struct kvm_shared_msrs { struct user_return_notifier urn; bool registered; struct kvm_shared_msr_values { u64 host; u64 curr; } values[KVM_NR_SHARED_MSRS]; }; static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs); struct kvm_stats_debugfs_item debugfs_entries[] = { { "pf_fixed", VCPU_STAT(pf_fixed) }, { "pf_guest", VCPU_STAT(pf_guest) }, { "tlb_flush", VCPU_STAT(tlb_flush) }, { "invlpg", VCPU_STAT(invlpg) }, { "exits", VCPU_STAT(exits) }, { "io_exits", VCPU_STAT(io_exits) }, { "mmio_exits", VCPU_STAT(mmio_exits) }, { "signal_exits", VCPU_STAT(signal_exits) }, { "irq_window", VCPU_STAT(irq_window_exits) }, { "nmi_window", VCPU_STAT(nmi_window_exits) }, { "halt_exits", VCPU_STAT(halt_exits) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "hypercalls", VCPU_STAT(hypercalls) }, { "request_irq", VCPU_STAT(request_irq_exits) }, { "irq_exits", VCPU_STAT(irq_exits) }, { "host_state_reload", VCPU_STAT(host_state_reload) }, { "efer_reload", VCPU_STAT(efer_reload) }, { "fpu_reload", VCPU_STAT(fpu_reload) }, { "insn_emulation", VCPU_STAT(insn_emulation) }, { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, { "irq_injections", VCPU_STAT(irq_injections) }, { "nmi_injections", VCPU_STAT(nmi_injections) }, { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, { "mmu_pte_write", VM_STAT(mmu_pte_write) }, { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) }, { "mmu_flooded", VM_STAT(mmu_flooded) }, { "mmu_recycled", VM_STAT(mmu_recycled) }, { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, { "mmu_unsync", VM_STAT(mmu_unsync) }, { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, { "largepages", VM_STAT(lpages) }, { NULL } }; u64 __read_mostly host_xcr0; static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) { int i; for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++) vcpu->arch.apf.gfns[i] = ~0; } static void kvm_on_user_return(struct user_return_notifier *urn) { unsigned slot; struct kvm_shared_msrs *locals = container_of(urn, struct kvm_shared_msrs, urn); struct kvm_shared_msr_values *values; for (slot = 0; slot < shared_msrs_global.nr; ++slot) { values = &locals->values[slot]; if (values->host != values->curr) { wrmsrl(shared_msrs_global.msrs[slot], values->host); values->curr = values->host; } } locals->registered = false; user_return_notifier_unregister(urn); } static void shared_msr_update(unsigned slot, u32 msr) { struct kvm_shared_msrs *smsr; u64 value; smsr = &__get_cpu_var(shared_msrs); /* only read, and nobody should modify it at this time, * so don't need lock */ if (slot >= shared_msrs_global.nr) { printk(KERN_ERR "kvm: invalid MSR slot!"); return; } rdmsrl_safe(msr, &value); smsr->values[slot].host = value; smsr->values[slot].curr = value; } void kvm_define_shared_msr(unsigned slot, u32 msr) { if (slot >= shared_msrs_global.nr) shared_msrs_global.nr = slot + 1; shared_msrs_global.msrs[slot] = msr; /* we need ensured the shared_msr_global have been updated */ smp_wmb(); } EXPORT_SYMBOL_GPL(kvm_define_shared_msr); static void kvm_shared_msr_cpu_online(void) { unsigned i; for (i = 0; i < shared_msrs_global.nr; ++i) shared_msr_update(i, shared_msrs_global.msrs[i]); } void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) { struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); if (((value ^ smsr->values[slot].curr) & mask) == 0) return; smsr->values[slot].curr = value; wrmsrl(shared_msrs_global.msrs[slot], value); if (!smsr->registered) { smsr->urn.on_user_return = kvm_on_user_return; user_return_notifier_register(&smsr->urn); smsr->registered = true; } } EXPORT_SYMBOL_GPL(kvm_set_shared_msr); static void drop_user_return_notifiers(void *ignore) { struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); if (smsr->registered) kvm_on_user_return(&smsr->urn); } u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) { if (irqchip_in_kernel(vcpu->kvm)) return vcpu->arch.apic_base; else return vcpu->arch.apic_base; } EXPORT_SYMBOL_GPL(kvm_get_apic_base); void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) { /* TODO: reserve bits check */ if (irqchip_in_kernel(vcpu->kvm)) kvm_lapic_set_base(vcpu, data); else vcpu->arch.apic_base = data; } EXPORT_SYMBOL_GPL(kvm_set_apic_base); #define EXCPT_BENIGN 0 #define EXCPT_CONTRIBUTORY 1 #define EXCPT_PF 2 static int exception_class(int vector) { switch (vector) { case PF_VECTOR: return EXCPT_PF; case DE_VECTOR: case TS_VECTOR: case NP_VECTOR: case SS_VECTOR: case GP_VECTOR: return EXCPT_CONTRIBUTORY; default: break; } return EXCPT_BENIGN; } static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error, u32 error_code, bool reinject) { u32 prev_nr; int class1, class2; kvm_make_request(KVM_REQ_EVENT, vcpu); if (!vcpu->arch.exception.pending) { queue: vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = has_error; vcpu->arch.exception.nr = nr; vcpu->arch.exception.error_code = error_code; vcpu->arch.exception.reinject = reinject; return; } /* to check exception */ prev_nr = vcpu->arch.exception.nr; if (prev_nr == DF_VECTOR) { /* triple fault -> shutdown */ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } class1 = exception_class(prev_nr); class2 = exception_class(nr); if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { /* generate double fault per SDM Table 5-5 */ vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = true; vcpu->arch.exception.nr = DF_VECTOR; vcpu->arch.exception.error_code = 0; } else /* replace previous exception with a new one in a hope that instruction re-execution will regenerate lost exception */ goto queue; } void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception); void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception); void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { ++vcpu->stat.pf_guest; vcpu->arch.cr2 = fault->address; kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); } void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { if (mmu_is_nested(vcpu) && !fault->nested_page_fault) vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); else vcpu->arch.mmu.inject_page_fault(vcpu, fault); } void kvm_inject_nmi(struct kvm_vcpu *vcpu) { kvm_make_request(KVM_REQ_EVENT, vcpu); vcpu->arch.nmi_pending = 1; } EXPORT_SYMBOL_GPL(kvm_inject_nmi); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception_e); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); /* * Checks if cpl <= required_cpl; if true, return true. Otherwise queue * a #GP and return false. */ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) { if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) return true; kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return false; } EXPORT_SYMBOL_GPL(kvm_require_cpl); /* * This function will be used to read from the physical memory of the currently * running guest. The difference to kvm_read_guest_page is that this function * can read from guest physical or from the guest's guest physical memory. */ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gfn_t ngfn, void *data, int offset, int len, u32 access) { gfn_t real_gfn; gpa_t ngpa; ngpa = gfn_to_gpa(ngfn); real_gfn = mmu->translate_gpa(vcpu, ngpa, access); if (real_gfn == UNMAPPED_GVA) return -EFAULT; real_gfn = gpa_to_gfn(real_gfn); return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len); } EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len, u32 access) { return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, data, offset, len, access); } /* * Load the pae pdptrs. Return true is they are all valid. */ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) { gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; int i; int ret; u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, offset * sizeof(u64), sizeof(pdpte), PFERR_USER_MASK|PFERR_WRITE_MASK); if (ret < 0) { ret = 0; goto out; } for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { if (is_present_gpte(pdpte[i]) && (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { ret = 0; goto out; } } ret = 1; memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty); out: return ret; } EXPORT_SYMBOL_GPL(load_pdptrs); static bool pdptrs_changed(struct kvm_vcpu *vcpu) { u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; bool changed = true; int offset; gfn_t gfn; int r; if (is_long_mode(vcpu) || !is_pae(vcpu)) return false; if (!test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail)) return true; gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT; offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1); r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), PFERR_USER_MASK | PFERR_WRITE_MASK); if (r < 0) goto out; changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; out: return changed; } int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { unsigned long old_cr0 = kvm_read_cr0(vcpu); unsigned long update_bits = X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW; cr0 |= X86_CR0_ET; #ifdef CONFIG_X86_64 if (cr0 & 0xffffffff00000000UL) return 1; #endif cr0 &= ~CR0_RESERVED_BITS; if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) return 1; if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) return 1; if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { #ifdef CONFIG_X86_64 if ((vcpu->arch.efer & EFER_LME)) { int cs_db, cs_l; if (!is_pae(vcpu)) return 1; kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); if (cs_l) return 1; } else #endif if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)) return 1; } kvm_x86_ops->set_cr0(vcpu, cr0); if ((cr0 ^ old_cr0) & X86_CR0_PG) kvm_clear_async_pf_completion_queue(vcpu); if ((cr0 ^ old_cr0) & update_bits) kvm_mmu_reset_context(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr0); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) { (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); } EXPORT_SYMBOL_GPL(kvm_lmsw); int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { u64 xcr0; /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ if (index != XCR_XFEATURE_ENABLED_MASK) return 1; xcr0 = xcr; if (kvm_x86_ops->get_cpl(vcpu) != 0) return 1; if (!(xcr0 & XSTATE_FP)) return 1; if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) return 1; if (xcr0 & ~host_xcr0) return 1; vcpu->arch.xcr0 = xcr0; vcpu->guest_xcr0_loaded = 0; return 0; } int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { if (__kvm_set_xcr(vcpu, index, xcr)) { kvm_inject_gp(vcpu, 0); return 1; } return 0; } EXPORT_SYMBOL_GPL(kvm_set_xcr); static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; best = kvm_find_cpuid_entry(vcpu, 1, 0); return best && (best->ecx & bit(X86_FEATURE_XSAVE)); } static void update_cpuid(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; best = kvm_find_cpuid_entry(vcpu, 1, 0); if (!best) return; /* Update OSXSAVE bit */ if (cpu_has_xsave && best->function == 0x1) { best->ecx &= ~(bit(X86_FEATURE_OSXSAVE)); if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) best->ecx |= bit(X86_FEATURE_OSXSAVE); } } int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; if (cr4 & CR4_RESERVED_BITS) return 1; if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) return 1; if (is_long_mode(vcpu)) { if (!(cr4 & X86_CR4_PAE)) return 1; } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) && ((cr4 ^ old_cr4) & pdptr_bits) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)) return 1; if (cr4 & X86_CR4_VMXE) return 1; kvm_x86_ops->set_cr4(vcpu, cr4); if ((cr4 ^ old_cr4) & pdptr_bits) kvm_mmu_reset_context(vcpu); if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) update_cpuid(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr4); int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) { kvm_mmu_sync_roots(vcpu); kvm_mmu_flush_tlb(vcpu); return 0; } if (is_long_mode(vcpu)) { if (cr3 & CR3_L_MODE_RESERVED_BITS) return 1; } else { if (is_pae(vcpu)) { if (cr3 & CR3_PAE_RESERVED_BITS) return 1; if (is_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) return 1; } /* * We don't check reserved bits in nonpae mode, because * this isn't enforced, and VMware depends on this. */ } /* * Does the new cr3 value map to physical memory? (Note, we * catch an invalid cr3 even in real-mode, because it would * cause trouble later on when we turn on paging anyway.) * * A real CPU would silently accept an invalid cr3 and would * attempt to use it - with largely undefined (and often hard * to debug) behavior on the guest side. */ if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) return 1; vcpu->arch.cr3 = cr3; vcpu->arch.mmu.new_cr3(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr3); int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) { if (cr8 & CR8_RESERVED_BITS) return 1; if (irqchip_in_kernel(vcpu->kvm)) kvm_lapic_set_tpr(vcpu, cr8); else vcpu->arch.cr8 = cr8; return 0; } void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) { if (__kvm_set_cr8(vcpu, cr8)) kvm_inject_gp(vcpu, 0); } EXPORT_SYMBOL_GPL(kvm_set_cr8); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) { if (irqchip_in_kernel(vcpu->kvm)) return kvm_lapic_get_cr8(vcpu); else return vcpu->arch.cr8; } EXPORT_SYMBOL_GPL(kvm_get_cr8); static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { switch (dr) { case 0 ... 3: vcpu->arch.db[dr] = val; if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) vcpu->arch.eff_db[dr] = val; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* #UD */ /* fall through */ case 6: if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* #UD */ /* fall through */ default: /* 7 */ if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7); vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK); } break; } return 0; } int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { int res; res = __kvm_set_dr(vcpu, dr, val); if (res > 0) kvm_queue_exception(vcpu, UD_VECTOR); else if (res < 0) kvm_inject_gp(vcpu, 0); return res; } EXPORT_SYMBOL_GPL(kvm_set_dr); static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { switch (dr) { case 0 ... 3: *val = vcpu->arch.db[dr]; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ case 6: *val = vcpu->arch.dr6; break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ default: /* 7 */ *val = vcpu->arch.dr7; break; } return 0; } int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { if (_kvm_get_dr(vcpu, dr, val)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } return 0; } EXPORT_SYMBOL_GPL(kvm_get_dr); /* * List of msr numbers which we expose to userspace through KVM_GET_MSRS * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. * * This list is modified at module load time to reflect the * capabilities of the host cpu. This capabilities test skips MSRs that are * kvm-specific. Those are put in the beginning of the list. */ #define KVM_SAVE_MSRS_BEGIN 8 static u32 msrs_to_save[] = { MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_STAR, #ifdef CONFIG_X86_64 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA }; static unsigned num_msrs_to_save; static u32 emulated_msrs[] = { MSR_IA32_MISC_ENABLE, MSR_IA32_MCG_STATUS, MSR_IA32_MCG_CTL, }; static int set_efer(struct kvm_vcpu *vcpu, u64 efer) { u64 old_efer = vcpu->arch.efer; if (efer & efer_reserved_bits) return 1; if (is_paging(vcpu) && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) return 1; if (efer & EFER_FFXSR) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) return 1; } if (efer & EFER_SVME) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) return 1; } efer &= ~EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA; kvm_x86_ops->set_efer(vcpu, efer); vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled; /* Update reserved bits */ if ((efer ^ old_efer) & EFER_NX) kvm_mmu_reset_context(vcpu); return 0; } void kvm_enable_efer_bits(u64 mask) { efer_reserved_bits &= ~mask; } EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); /* * Writes msr value into into the appropriate "register". * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) { return kvm_x86_ops->set_msr(vcpu, msr_index, data); } /* * Adapt set_msr() to msr_io()'s calling convention */ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { return kvm_set_msr(vcpu, index, *data); } static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) { int version; int r; struct pvclock_wall_clock wc; struct timespec boot; if (!wall_clock) return; r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); if (r) return; if (version & 1) ++version; /* first time write, random junk */ ++version; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); /* * The guest calculates current wall clock time by adding * system time (updated by kvm_guest_time_update below) to the * wall clock specified here. guest system time equals host * system time for us, thus we must fill in host boot time here. */ getboottime(&boot); wc.sec = boot.tv_sec; wc.nsec = boot.tv_nsec; wc.version = version; kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); version++; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); } static uint32_t div_frac(uint32_t dividend, uint32_t divisor) { uint32_t quotient, remainder; /* Don't try to replace with do_div(), this one calculates * "(dividend << 32) / divisor" */ __asm__ ( "divl %4" : "=a" (quotient), "=d" (remainder) : "0" (0), "1" (dividend), "r" (divisor) ); return quotient; } static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, s8 *pshift, u32 *pmultiplier) { uint64_t scaled64; int32_t shift = 0; uint64_t tps64; uint32_t tps32; tps64 = base_khz * 1000LL; scaled64 = scaled_khz * 1000LL; while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { tps64 >>= 1; shift--; } tps32 = (uint32_t)tps64; while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) scaled64 >>= 1; else tps32 <<= 1; shift++; } *pshift = shift; *pmultiplier = div_frac(scaled64, tps32); pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n", __func__, base_khz, scaled_khz, shift, *pmultiplier); } static inline u64 get_kernel_ns(void) { struct timespec ts; WARN_ON(preemptible()); ktime_get_ts(&ts); monotonic_to_bootbased(&ts); return timespec_to_ns(&ts); } static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); unsigned long max_tsc_khz; static inline int kvm_tsc_changes_freq(void) { int cpu = get_cpu(); int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && cpufreq_quick_get(cpu) != 0; put_cpu(); return ret; } static inline u64 nsec_to_cycles(u64 nsec) { u64 ret; WARN_ON(preemptible()); if (kvm_tsc_changes_freq()) printk_once(KERN_WARNING "kvm: unreliable cycle conversion on adjustable rate TSC\n"); ret = nsec * __get_cpu_var(cpu_tsc_khz); do_div(ret, USEC_PER_SEC); return ret; } static void kvm_arch_set_tsc_khz(struct kvm *kvm, u32 this_tsc_khz) { /* Compute a scale to convert nanoseconds in TSC cycles */ kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000, &kvm->arch.virtual_tsc_shift, &kvm->arch.virtual_tsc_mult); kvm->arch.virtual_tsc_khz = this_tsc_khz; } static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) { u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec, vcpu->kvm->arch.virtual_tsc_mult, vcpu->kvm->arch.virtual_tsc_shift); tsc += vcpu->arch.last_tsc_write; return tsc; } void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; u64 offset, ns, elapsed; unsigned long flags; s64 sdiff; spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); offset = data - native_read_tsc(); ns = get_kernel_ns(); elapsed = ns - kvm->arch.last_tsc_nsec; sdiff = data - kvm->arch.last_tsc_write; if (sdiff < 0) sdiff = -sdiff; /* * Special case: close write to TSC within 5 seconds of * another CPU is interpreted as an attempt to synchronize * The 5 seconds is to accomodate host load / swapping as * well as any reset of TSC during the boot process. * * In that case, for a reliable TSC, we can match TSC offsets, * or make a best guest using elapsed value. */ if (sdiff < nsec_to_cycles(5ULL * NSEC_PER_SEC) && elapsed < 5ULL * NSEC_PER_SEC) { if (!check_tsc_unstable()) { offset = kvm->arch.last_tsc_offset; pr_debug("kvm: matched tsc offset for %llu\n", data); } else { u64 delta = nsec_to_cycles(elapsed); offset += delta; pr_debug("kvm: adjusted tsc offset by %llu\n", delta); } ns = kvm->arch.last_tsc_nsec; } kvm->arch.last_tsc_nsec = ns; kvm->arch.last_tsc_write = data; kvm->arch.last_tsc_offset = offset; kvm_x86_ops->write_tsc_offset(vcpu, offset); spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); /* Reset of TSC must disable overshoot protection below */ vcpu->arch.hv_clock.tsc_timestamp = 0; vcpu->arch.last_tsc_write = data; vcpu->arch.last_tsc_nsec = ns; } EXPORT_SYMBOL_GPL(kvm_write_tsc); static int kvm_guest_time_update(struct kvm_vcpu *v) { unsigned long flags; struct kvm_vcpu_arch *vcpu = &v->arch; void *shared_kaddr; unsigned long this_tsc_khz; s64 kernel_ns, max_kernel_ns; u64 tsc_timestamp; /* Keep irq disabled to prevent changes to the clock */ local_irq_save(flags); kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp); kernel_ns = get_kernel_ns(); this_tsc_khz = __get_cpu_var(cpu_tsc_khz); if (unlikely(this_tsc_khz == 0)) { local_irq_restore(flags); kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); return 1; } /* * We may have to catch up the TSC to match elapsed wall clock * time for two reasons, even if kvmclock is used. * 1) CPU could have been running below the maximum TSC rate * 2) Broken TSC compensation resets the base at each VCPU * entry to avoid unknown leaps of TSC even when running * again on the same CPU. This may cause apparent elapsed * time to disappear, and the guest to stand still or run * very slowly. */ if (vcpu->tsc_catchup) { u64 tsc = compute_guest_tsc(v, kernel_ns); if (tsc > tsc_timestamp) { kvm_x86_ops->adjust_tsc_offset(v, tsc - tsc_timestamp); tsc_timestamp = tsc; } } local_irq_restore(flags); if (!vcpu->time_page) return 0; /* * Time as measured by the TSC may go backwards when resetting the base * tsc_timestamp. The reason for this is that the TSC resolution is * higher than the resolution of the other clock scales. Thus, many * possible measurments of the TSC correspond to one measurement of any * other clock, and so a spread of values is possible. This is not a * problem for the computation of the nanosecond clock; with TSC rates * around 1GHZ, there can only be a few cycles which correspond to one * nanosecond value, and any path through this code will inevitably * take longer than that. However, with the kernel_ns value itself, * the precision may be much lower, down to HZ granularity. If the * first sampling of TSC against kernel_ns ends in the low part of the * range, and the second in the high end of the range, we can get: * * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new * * As the sampling errors potentially range in the thousands of cycles, * it is possible such a time value has already been observed by the * guest. To protect against this, we must compute the system time as * observed by the guest and ensure the new system time is greater. */ max_kernel_ns = 0; if (vcpu->hv_clock.tsc_timestamp && vcpu->last_guest_tsc) { max_kernel_ns = vcpu->last_guest_tsc - vcpu->hv_clock.tsc_timestamp; max_kernel_ns = pvclock_scale_delta(max_kernel_ns, vcpu->hv_clock.tsc_to_system_mul, vcpu->hv_clock.tsc_shift); max_kernel_ns += vcpu->last_kernel_ns; } if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) { kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz, &vcpu->hv_clock.tsc_shift, &vcpu->hv_clock.tsc_to_system_mul); vcpu->hw_tsc_khz = this_tsc_khz; } if (max_kernel_ns > kernel_ns) kernel_ns = max_kernel_ns; /* With all the info we got, fill in the values */ vcpu->hv_clock.tsc_timestamp = tsc_timestamp; vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; vcpu->last_kernel_ns = kernel_ns; vcpu->last_guest_tsc = tsc_timestamp; vcpu->hv_clock.flags = 0; /* * The interface expects us to write an even number signaling that the * update is finished. Since the guest won't see the intermediate * state, we just increase by 2 at the end. */ vcpu->hv_clock.version += 2; shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0); memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, sizeof(vcpu->hv_clock)); kunmap_atomic(shared_kaddr, KM_USER0); mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); return 0; } static bool msr_mtrr_valid(unsigned msr) { switch (msr) { case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: case MSR_MTRRfix64K_00000: case MSR_MTRRfix16K_80000: case MSR_MTRRfix16K_A0000: case MSR_MTRRfix4K_C0000: case MSR_MTRRfix4K_C8000: case MSR_MTRRfix4K_D0000: case MSR_MTRRfix4K_D8000: case MSR_MTRRfix4K_E0000: case MSR_MTRRfix4K_E8000: case MSR_MTRRfix4K_F0000: case MSR_MTRRfix4K_F8000: case MSR_MTRRdefType: case MSR_IA32_CR_PAT: return true; case 0x2f8: return true; } return false; } static bool valid_pat_type(unsigned t) { return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ } static bool valid_mtrr_type(unsigned t) { return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ } static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) { int i; if (!msr_mtrr_valid(msr)) return false; if (msr == MSR_IA32_CR_PAT) { for (i = 0; i < 8; i++) if (!valid_pat_type((data >> (i * 8)) & 0xff)) return false; return true; } else if (msr == MSR_MTRRdefType) { if (data & ~0xcff) return false; return valid_mtrr_type(data & 0xff); } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { for (i = 0; i < 8 ; i++) if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) return false; return true; } /* variable MTRRs */ return valid_mtrr_type(data & 0xff); } static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; if (!mtrr_valid(vcpu, msr, data)) return 1; if (msr == MSR_MTRRdefType) { vcpu->arch.mtrr_state.def_type = data; vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10; } else if (msr == MSR_MTRRfix64K_00000) p[0] = data; else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) p[1 + msr - MSR_MTRRfix16K_80000] = data; else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) p[3 + msr - MSR_MTRRfix4K_C0000] = data; else if (msr == MSR_IA32_CR_PAT) vcpu->arch.pat = data; else { /* Variable MTRRs */ int idx, is_mtrr_mask; u64 *pt; idx = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * idx; if (!is_mtrr_mask) pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; else pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; *pt = data; } kvm_mmu_reset_context(vcpu); return 0; } static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_MCG_STATUS: vcpu->arch.mcg_status = data; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; if (data != 0 && data != ~(u64)0) return -1; vcpu->arch.mcg_ctl = data; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MC0_CTL + 4 * bank_num) { u32 offset = msr - MSR_IA32_MC0_CTL; /* only 0 or all 1s can be written to IA32_MCi_CTL * some Linux kernels though clear bit 10 in bank 4 to * workaround a BIOS/GART TBL issue on AMD K8s, ignore * this to avoid an uncatched #GP in the guest */ if ((offset & 0x3) == 0 && data != 0 && (data | (1 << 10)) != ~(u64)0) return -1; vcpu->arch.mce_banks[offset] = data; break; } return 1; } return 0; } static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; int lm = is_long_mode(vcpu); u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 : kvm->arch.xen_hvm_config.blob_size_32; u32 page_num = data & ~PAGE_MASK; u64 page_addr = data & PAGE_MASK; u8 *page; int r; r = -E2BIG; if (page_num >= blob_size) goto out; r = -ENOMEM; page = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!page) goto out; r = -EFAULT; if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE)) goto out_free; if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE)) goto out_free; r = 0; out_free: kfree(page); out: return r; } static bool kvm_hv_hypercall_enabled(struct kvm *kvm) { return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; } static bool kvm_hv_msr_partition_wide(u32 msr) { bool r = false; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: case HV_X64_MSR_HYPERCALL: r = true; break; } return r; } static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) { struct kvm *kvm = vcpu->kvm; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: kvm->arch.hv_guest_os_id = data; /* setting guest os id to zero disables hypercall page */ if (!kvm->arch.hv_guest_os_id) kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; break; case HV_X64_MSR_HYPERCALL: { u64 gfn; unsigned long addr; u8 instructions[4]; /* if guest os id is not set hypercall should remain disabled */ if (!kvm->arch.hv_guest_os_id) break; if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { kvm->arch.hv_hypercall = data; break; } gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; addr = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(addr)) return 1; kvm_x86_ops->patch_hypercall(vcpu, instructions); ((unsigned char *)instructions)[3] = 0xc3; /* ret */ if (copy_to_user((void __user *)addr, instructions, 4)) return 1; kvm->arch.hv_hypercall = data; break; } default: pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; } static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { case HV_X64_MSR_APIC_ASSIST_PAGE: { unsigned long addr; if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { vcpu->arch.hv_vapic = data; break; } addr = gfn_to_hva(vcpu->kvm, data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT); if (kvm_is_error_hva(addr)) return 1; if (clear_user((void __user *)addr, PAGE_SIZE)) return 1; vcpu->arch.hv_vapic = data; break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); default: pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; } static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) { gpa_t gpa = data & ~0x3f; /* Bits 2:5 are resrved, Should be zero */ if (data & 0x3c) return 1; vcpu->arch.apf.msr_val = data; if (!(data & KVM_ASYNC_PF_ENABLED)) { kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); return 0; } if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); kvm_async_pf_wakeup_all(vcpu); return 0; } int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { case MSR_EFER: return set_efer(vcpu, data); case MSR_K7_HWCR: data &= ~(u64)0x40; /* ignore flush filter disable */ data &= ~(u64)0x100; /* ignore ignne emulation enable */ if (data != 0) { pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", data); return 1; } break; case MSR_FAM10H_MMIO_CONF_BASE: if (data != 0) { pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " "0x%llx\n", data); return 1; } break; case MSR_AMD64_NB_CFG: break; case MSR_IA32_DEBUGCTLMSR: if (!data) { /* We support the non-activated case already */ break; } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) { /* Values other than LBR and BTF are vendor-specific, thus reserved and should throw a #GP */ return 1; } pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", __func__, data); break; case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_WRITE: case MSR_VM_HSAVE_PA: case MSR_AMD64_PATCH_LOADER: break; case 0x200 ... 0x2ff: return set_msr_mtrr(vcpu, msr, data); case MSR_IA32_APICBASE: kvm_set_apic_base(vcpu, data); break; case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_write(vcpu, msr, data); case MSR_IA32_MISC_ENABLE: vcpu->arch.ia32_misc_enable_msr = data; break; case MSR_KVM_WALL_CLOCK_NEW: case MSR_KVM_WALL_CLOCK: vcpu->kvm->arch.wall_clock = data; kvm_write_wall_clock(vcpu->kvm, data); break; case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME: { if (vcpu->arch.time_page) { kvm_release_page_dirty(vcpu->arch.time_page); vcpu->arch.time_page = NULL; } vcpu->arch.time = data; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); /* we verify if the enable bit is set... */ if (!(data & 1)) break; /* ...but clean it before doing the actual write */ vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); vcpu->arch.time_page = gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); if (is_error_page(vcpu->arch.time_page)) { kvm_release_page_clean(vcpu->arch.time_page); vcpu->arch.time_page = NULL; } break; } case MSR_KVM_ASYNC_PF_EN: if (kvm_pv_enable_async_pf(vcpu, data)) return 1; break; case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: return set_msr_mce(vcpu, msr, data); /* Performance counters are not protected by a CPUID bit, * so we should check all of them in the generic path for the sake of * cross vendor migration. * Writing a zero into the event select MSRs disables them, * which we perfectly emulate ;-). Any other value should be at least * reported, some guests depend on them. */ case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: case MSR_K7_EVNTSEL0: case MSR_K7_EVNTSEL1: case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL3: if (data != 0) pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; /* at least RHEL 4 unconditionally writes to the perfctr registers, * so we ignore writes to make it happy. */ case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: case MSR_K7_PERFCTR0: case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR2: case MSR_K7_PERFCTR3: pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; case MSR_K7_CLK_CTL: /* * Ignore all writes to this no longer documented MSR. * Writes are only relevant for old K7 processors, * all pre-dating SVM, but a recommended workaround from * AMD for these chips. It is possible to speicify the * affected processor models on the command line, hence * the need to ignore the workaround. */ break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: if (kvm_hv_msr_partition_wide(msr)) { int r; mutex_lock(&vcpu->kvm->lock); r = set_msr_hyperv_pw(vcpu, msr, data); mutex_unlock(&vcpu->kvm->lock); return r; } else return set_msr_hyperv(vcpu, msr, data); break; default: if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) return xen_hvm_config(vcpu, data); if (!ignore_msrs) { pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data); return 1; } else { pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); break; } } return 0; } EXPORT_SYMBOL_GPL(kvm_set_msr_common); /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); } static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; if (!msr_mtrr_valid(msr)) return 1; if (msr == MSR_MTRRdefType) *pdata = vcpu->arch.mtrr_state.def_type + (vcpu->arch.mtrr_state.enabled << 10); else if (msr == MSR_MTRRfix64K_00000) *pdata = p[0]; else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) *pdata = p[1 + msr - MSR_MTRRfix16K_80000]; else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) *pdata = p[3 + msr - MSR_MTRRfix4K_C0000]; else if (msr == MSR_IA32_CR_PAT) *pdata = vcpu->arch.pat; else { /* Variable MTRRs */ int idx, is_mtrr_mask; u64 *pt; idx = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * idx; if (!is_mtrr_mask) pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; else pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; *pdata = *pt; } return 0; } static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: data = 0; break; case MSR_IA32_MCG_CAP: data = vcpu->arch.mcg_cap; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; data = vcpu->arch.mcg_ctl; break; case MSR_IA32_MCG_STATUS: data = vcpu->arch.mcg_status; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MC0_CTL + 4 * bank_num) { u32 offset = msr - MSR_IA32_MC0_CTL; data = vcpu->arch.mce_banks[offset]; break; } return 1; } *pdata = data; return 0; } static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data = 0; struct kvm *kvm = vcpu->kvm; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: data = kvm->arch.hv_guest_os_id; break; case HV_X64_MSR_HYPERCALL: data = kvm->arch.hv_hypercall; break; default: pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } *pdata = data; return 0; } static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data = 0; switch (msr) { case HV_X64_MSR_VP_INDEX: { int r; struct kvm_vcpu *v; kvm_for_each_vcpu(r, v, vcpu->kvm) if (v == vcpu) data = r; break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); default: pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } *pdata = data; return 0; } int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; switch (msr) { case MSR_IA32_PLATFORM_ID: case MSR_IA32_UCODE_REV: case MSR_IA32_EBL_CR_POWERON: case MSR_IA32_DEBUGCTLMSR: case MSR_IA32_LASTBRANCHFROMIP: case MSR_IA32_LASTBRANCHTOIP: case MSR_IA32_LASTINTFROMIP: case MSR_IA32_LASTINTTOIP: case MSR_K8_SYSCFG: case MSR_K7_HWCR: case MSR_VM_HSAVE_PA: case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: case MSR_K7_EVNTSEL0: case MSR_K7_PERFCTR0: case MSR_K8_INT_PENDING_MSG: case MSR_AMD64_NB_CFG: case MSR_FAM10H_MMIO_CONF_BASE: data = 0; break; case MSR_MTRRcap: data = 0x500 | KVM_NR_VAR_MTRR; break; case 0x200 ... 0x2ff: return get_msr_mtrr(vcpu, msr, pdata); case 0xcd: /* fsb frequency */ data = 3; break; /* * MSR_EBC_FREQUENCY_ID * Conservative value valid for even the basic CPU models. * Models 0,1: 000 in bits 23:21 indicating a bus speed of * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, * and 266MHz for model 3, or 4. Set Core Clock * Frequency to System Bus Frequency Ratio to 1 (bits * 31:24) even though these are only valid for CPU * models > 2, however guests may end up dividing or * multiplying by zero otherwise. */ case MSR_EBC_FREQUENCY_ID: data = 1 << 24; break; case MSR_IA32_APICBASE: data = kvm_get_apic_base(vcpu); break; case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_read(vcpu, msr, pdata); break; case MSR_IA32_MISC_ENABLE: data = vcpu->arch.ia32_misc_enable_msr; break; case MSR_IA32_PERF_STATUS: /* TSC increment by tick */ data = 1000ULL; /* CPU multiplier */ data |= (((uint64_t)4ULL) << 40); break; case MSR_EFER: data = vcpu->arch.efer; break; case MSR_KVM_WALL_CLOCK: case MSR_KVM_WALL_CLOCK_NEW: data = vcpu->kvm->arch.wall_clock; break; case MSR_KVM_SYSTEM_TIME: case MSR_KVM_SYSTEM_TIME_NEW: data = vcpu->arch.time; break; case MSR_KVM_ASYNC_PF_EN: data = vcpu->arch.apf.msr_val; break; case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: case MSR_IA32_MCG_CAP: case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: return get_msr_mce(vcpu, msr, pdata); case MSR_K7_CLK_CTL: /* * Provide expected ramp-up count for K7. All other * are set to zero, indicating minimum divisors for * every field. * * This prevents guest kernels on AMD host with CPU * type 6, model 8 and higher from exploding due to * the rdmsr failing. */ data = 0x20000000; break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: if (kvm_hv_msr_partition_wide(msr)) { int r; mutex_lock(&vcpu->kvm->lock); r = get_msr_hyperv_pw(vcpu, msr, pdata); mutex_unlock(&vcpu->kvm->lock); return r; } else return get_msr_hyperv(vcpu, msr, pdata); break; default: if (!ignore_msrs) { pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); return 1; } else { pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); data = 0; } break; } *pdata = data; return 0; } EXPORT_SYMBOL_GPL(kvm_get_msr_common); /* * Read or write a bunch of msrs. All parameters are kernel addresses. * * @return number of msrs set successfully. */ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, struct kvm_msr_entry *entries, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data)) { int i, idx; idx = srcu_read_lock(&vcpu->kvm->srcu); for (i = 0; i < msrs->nmsrs; ++i) if (do_msr(vcpu, entries[i].index, &entries[i].data)) break; srcu_read_unlock(&vcpu->kvm->srcu, idx); return i; } /* * Read or write a bunch of msrs. Parameters are user addresses. * * @return number of msrs set successfully. */ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data), int writeback) { struct kvm_msrs msrs; struct kvm_msr_entry *entries; int r, n; unsigned size; r = -EFAULT; if (copy_from_user(&msrs, user_msrs, sizeof msrs)) goto out; r = -E2BIG; if (msrs.nmsrs >= MAX_IO_MSRS) goto out; r = -ENOMEM; size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; entries = kmalloc(size, GFP_KERNEL); if (!entries) goto out; r = -EFAULT; if (copy_from_user(entries, user_msrs->entries, size)) goto out_free; r = n = __msr_io(vcpu, &msrs, entries, do_msr); if (r < 0) goto out_free; r = -EFAULT; if (writeback && copy_to_user(user_msrs->entries, entries, size)) goto out_free; r = n; out_free: kfree(entries); out: return r; } int kvm_dev_ioctl_check_extension(long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: case KVM_CAP_HLT: case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: case KVM_CAP_SET_TSS_ADDR: case KVM_CAP_EXT_CPUID: case KVM_CAP_CLOCKSOURCE: case KVM_CAP_PIT: case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: case KVM_CAP_SYNC_MMU: case KVM_CAP_REINJECT_CONTROL: case KVM_CAP_IRQ_INJECT_STATUS: case KVM_CAP_ASSIGN_DEV_IRQ: case KVM_CAP_IRQFD: case KVM_CAP_IOEVENTFD: case KVM_CAP_PIT2: case KVM_CAP_PIT_STATE2: case KVM_CAP_SET_IDENTITY_MAP_ADDR: case KVM_CAP_XEN_HVM: case KVM_CAP_ADJUST_CLOCK: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_HYPERV: case KVM_CAP_HYPERV_VAPIC: case KVM_CAP_HYPERV_SPIN: case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: case KVM_CAP_XSAVE: case KVM_CAP_ASYNC_PF: r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_VAPIC: r = !kvm_x86_ops->cpu_has_accelerated_tpr(); break; case KVM_CAP_NR_VCPUS: r = KVM_MAX_VCPUS; break; case KVM_CAP_NR_MEMSLOTS: r = KVM_MEMORY_SLOTS; break; case KVM_CAP_PV_MMU: /* obsolete */ r = 0; break; case KVM_CAP_IOMMU: r = iommu_found(); break; case KVM_CAP_MCE: r = KVM_MAX_MCE_BANKS; break; case KVM_CAP_XCRS: r = cpu_has_xsave; break; default: r = 0; break; } return r; } long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; long r; switch (ioctl) { case KVM_GET_MSR_INDEX_LIST: { struct kvm_msr_list __user *user_msr_list = argp; struct kvm_msr_list msr_list; unsigned n; r = -EFAULT; if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) goto out; n = msr_list.nmsrs; msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs); if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) goto out; r = -E2BIG; if (n < msr_list.nmsrs) goto out; r = -EFAULT; if (copy_to_user(user_msr_list->indices, &msrs_to_save, num_msrs_to_save * sizeof(u32))) goto out; if (copy_to_user(user_msr_list->indices + num_msrs_to_save, &emulated_msrs, ARRAY_SIZE(emulated_msrs) * sizeof(u32))) goto out; r = 0; break; } case KVM_GET_SUPPORTED_CPUID: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_dev_ioctl_get_supported_cpuid(&cpuid, cpuid_arg->entries); if (r) goto out; r = -EFAULT; if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) goto out; r = 0; break; } case KVM_X86_GET_MCE_CAP_SUPPORTED: { u64 mce_cap; mce_cap = KVM_MCE_CAP_SUPPORTED; r = -EFAULT; if (copy_to_user(argp, &mce_cap, sizeof mce_cap)) goto out; r = 0; break; } default: r = -EINVAL; } out: return r; } static void wbinvd_ipi(void *garbage) { wbinvd(); } static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) { return vcpu->kvm->arch.iommu_domain && !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY); } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { /* Address WBINVD may be executed by guest */ if (need_emulate_wbinvd(vcpu)) { if (kvm_x86_ops->has_wbinvd_exit()) cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); else if (vcpu->cpu != -1 && vcpu->cpu != cpu) smp_call_function_single(vcpu->cpu, wbinvd_ipi, NULL, 1); } kvm_x86_ops->vcpu_load(vcpu, cpu); if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { /* Make sure TSC doesn't go backwards */ s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : native_read_tsc() - vcpu->arch.last_host_tsc; if (tsc_delta < 0) mark_tsc_unstable("KVM discovered backwards TSC"); if (check_tsc_unstable()) { kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta); vcpu->arch.tsc_catchup = 1; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); } if (vcpu->cpu != cpu) kvm_migrate_timers(vcpu); vcpu->cpu = cpu; } } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { kvm_x86_ops->vcpu_put(vcpu); kvm_put_guest_fpu(vcpu); vcpu->arch.last_host_tsc = native_read_tsc(); } static int is_efer_nx(void) { unsigned long long efer = 0; rdmsrl_safe(MSR_EFER, &efer); return efer & EFER_NX; } static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu) { int i; struct kvm_cpuid_entry2 *e, *entry; entry = NULL; for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { e = &vcpu->arch.cpuid_entries[i]; if (e->function == 0x80000001) { entry = e; break; } } if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) { entry->edx &= ~(1 << 20); printk(KERN_INFO "kvm: guest NX capability removed\n"); } } /* when an old userspace process fills a new kernel module */ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid *cpuid, struct kvm_cpuid_entry __user *entries) { int r, i; struct kvm_cpuid_entry *cpuid_entries; r = -E2BIG; if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) goto out; r = -ENOMEM; cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent); if (!cpuid_entries) goto out; r = -EFAULT; if (copy_from_user(cpuid_entries, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry))) goto out_free; for (i = 0; i < cpuid->nent; i++) { vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx; vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx; vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx; vcpu->arch.cpuid_entries[i].index = 0; vcpu->arch.cpuid_entries[i].flags = 0; vcpu->arch.cpuid_entries[i].padding[0] = 0; vcpu->arch.cpuid_entries[i].padding[1] = 0; vcpu->arch.cpuid_entries[i].padding[2] = 0; } vcpu->arch.cpuid_nent = cpuid->nent; cpuid_fix_nx_cap(vcpu); r = 0; kvm_apic_set_version(vcpu); kvm_x86_ops->cpuid_update(vcpu); update_cpuid(vcpu); out_free: vfree(cpuid_entries); out: return r; } static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries) { int r; r = -E2BIG; if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) goto out; r = -EFAULT; if (copy_from_user(&vcpu->arch.cpuid_entries, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2))) goto out; vcpu->arch.cpuid_nent = cpuid->nent; kvm_apic_set_version(vcpu); kvm_x86_ops->cpuid_update(vcpu); update_cpuid(vcpu); return 0; out: return r; } static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries) { int r; r = -E2BIG; if (cpuid->nent < vcpu->arch.cpuid_nent) goto out; r = -EFAULT; if (copy_to_user(entries, &vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) goto out; return 0; out: cpuid->nent = vcpu->arch.cpuid_nent; return r; } static void cpuid_mask(u32 *word, int wordnum) { *word &= boot_cpu_data.x86_capability[wordnum]; } static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, u32 index) { entry->function = function; entry->index = index; cpuid_count(entry->function, entry->index, &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); entry->flags = 0; } #define F(x) bit(X86_FEATURE_##x) static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, u32 index, int *nent, int maxnent) { unsigned f_nx = is_efer_nx() ? F(NX) : 0; #ifdef CONFIG_X86_64 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL) ? F(GBPAGES) : 0; unsigned f_lm = F(LM); #else unsigned f_gbpages = 0; unsigned f_lm = 0; #endif unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; /* cpuid 1.edx */ const u32 kvm_supported_word0_x86_features = F(FPU) | F(VME) | F(DE) | F(PSE) | F(TSC) | F(MSR) | F(PAE) | F(MCE) | F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) | F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) | 0 /* Reserved, DS, ACPI */ | F(MMX) | F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) | 0 /* HTT, TM, Reserved, PBE */; /* cpuid 0x80000001.edx */ const u32 kvm_supported_word1_x86_features = F(FPU) | F(VME) | F(DE) | F(PSE) | F(TSC) | F(MSR) | F(PAE) | F(MCE) | F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) | F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | F(PAT) | F(PSE36) | 0 /* Reserved */ | f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp | 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW); /* cpuid 1.ecx */ const u32 kvm_supported_word4_x86_features = F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ | 0 /* DS-CPL, VMX, SMX, EST */ | 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ | 0 /* Reserved, DCA */ | F(XMM4_1) | F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) | F(F16C); /* cpuid 0x80000001.ecx */ const u32 kvm_supported_word6_x86_features = F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) | 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM); /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); do_cpuid_1_ent(entry, function, index); ++*nent; switch (function) { case 0: entry->eax = min(entry->eax, (u32)0xd); break; case 1: entry->edx &= kvm_supported_word0_x86_features; cpuid_mask(&entry->edx, 0); entry->ecx &= kvm_supported_word4_x86_features; cpuid_mask(&entry->ecx, 4); /* we support x2apic emulation even if host does not support * it since we emulate x2apic in software */ entry->ecx |= F(X2APIC); break; /* function 2 entries are STATEFUL. That is, repeated cpuid commands * may return different values. This forces us to get_cpu() before * issuing the first command, and also to emulate this annoying behavior * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */ case 2: { int t, times = entry->eax & 0xff; entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; for (t = 1; t < times && *nent < maxnent; ++t) { do_cpuid_1_ent(&entry[t], function, 0); entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; ++*nent; } break; } /* function 4 and 0xb have additional index. */ case 4: { int i, cache_type; entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; /* read more entries until cache_type is zero */ for (i = 1; *nent < maxnent; ++i) { cache_type = entry[i - 1].eax & 0x1f; if (!cache_type) break; do_cpuid_1_ent(&entry[i], function, i); entry[i].flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; ++*nent; } break; } case 0xb: { int i, level_type; entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; /* read more entries until level_type is zero */ for (i = 1; *nent < maxnent; ++i) { level_type = entry[i - 1].ecx & 0xff00; if (!level_type) break; do_cpuid_1_ent(&entry[i], function, i); entry[i].flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; ++*nent; } break; } case 0xd: { int i; entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; for (i = 1; *nent < maxnent; ++i) { if (entry[i - 1].eax == 0 && i != 2) break; do_cpuid_1_ent(&entry[i], function, i); entry[i].flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; ++*nent; } break; } case KVM_CPUID_SIGNATURE: { char signature[12] = "KVMKVMKVM\0\0"; u32 *sigptr = (u32 *)signature; entry->eax = 0; entry->ebx = sigptr[0]; entry->ecx = sigptr[1]; entry->edx = sigptr[2]; break; } case KVM_CPUID_FEATURES: entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) | (1 << KVM_FEATURE_NOP_IO_DELAY) | (1 << KVM_FEATURE_CLOCKSOURCE2) | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT); entry->ebx = 0; entry->ecx = 0; entry->edx = 0; break; case 0x80000000: entry->eax = min(entry->eax, 0x8000001a); break; case 0x80000001: entry->edx &= kvm_supported_word1_x86_features; cpuid_mask(&entry->edx, 1); entry->ecx &= kvm_supported_word6_x86_features; cpuid_mask(&entry->ecx, 6); break; } kvm_x86_ops->set_supported_cpuid(function, entry); put_cpu(); } #undef F static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries) { struct kvm_cpuid_entry2 *cpuid_entries; int limit, nent = 0, r = -E2BIG; u32 func; if (cpuid->nent < 1) goto out; if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) cpuid->nent = KVM_MAX_CPUID_ENTRIES; r = -ENOMEM; cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent); if (!cpuid_entries) goto out; do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent); limit = cpuid_entries[0].eax; for (func = 1; func <= limit && nent < cpuid->nent; ++func) do_cpuid_ent(&cpuid_entries[nent], func, 0, &nent, cpuid->nent); r = -E2BIG; if (nent >= cpuid->nent) goto out_free; do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent); limit = cpuid_entries[nent - 1].eax; for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func) do_cpuid_ent(&cpuid_entries[nent], func, 0, &nent, cpuid->nent); r = -E2BIG; if (nent >= cpuid->nent) goto out_free; do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent, cpuid->nent); r = -E2BIG; if (nent >= cpuid->nent) goto out_free; do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent, cpuid->nent); r = -E2BIG; if (nent >= cpuid->nent) goto out_free; r = -EFAULT; if (copy_to_user(entries, cpuid_entries, nent * sizeof(struct kvm_cpuid_entry2))) goto out_free; cpuid->nent = nent; r = 0; out_free: vfree(cpuid_entries); out: return r; } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); return 0; } static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); kvm_apic_post_state_restore(vcpu); update_cr8_intercept(vcpu); return 0; } static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { if (irq->irq < 0 || irq->irq >= 256) return -EINVAL; if (irqchip_in_kernel(vcpu->kvm)) return -ENXIO; kvm_queue_interrupt(vcpu, irq->irq, false); kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) { kvm_inject_nmi(vcpu); return 0; } static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, struct kvm_tpr_access_ctl *tac) { if (tac->flags) return -EINVAL; vcpu->arch.tpr_access_reporting = !!tac->enabled; return 0; } static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, u64 mcg_cap) { int r; unsigned bank_num = mcg_cap & 0xff, bank; r = -EINVAL; if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) goto out; if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000)) goto out; r = 0; vcpu->arch.mcg_cap = mcg_cap; /* Init IA32_MCG_CTL to all 1s */ if (mcg_cap & MCG_CTL_P) vcpu->arch.mcg_ctl = ~(u64)0; /* Init IA32_MCi_CTL to all 1s */ for (bank = 0; bank < bank_num; bank++) vcpu->arch.mce_banks[bank*4] = ~(u64)0; out: return r; } static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; u64 *banks = vcpu->arch.mce_banks; if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) return -EINVAL; /* * if IA32_MCG_CTL is not all 1s, the uncorrected error * reporting is disabled */ if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && vcpu->arch.mcg_ctl != ~(u64)0) return 0; banks += 4 * mce->bank; /* * if IA32_MCi_CTL is not all 1s, the uncorrected error * reporting is disabled for the bank */ if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) return 0; if (mce->status & MCI_STATUS_UC) { if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { printk(KERN_DEBUG "kvm: set_mce: " "injects mce exception while " "previous one is in progress!\n"); kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return 0; } if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; vcpu->arch.mcg_status = mce->mcg_status; banks[1] = mce->status; kvm_queue_exception(vcpu, MC_VECTOR); } else if (!(banks[1] & MCI_STATUS_VAL) || !(banks[1] & MCI_STATUS_UC)) { if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; banks[1] = mce->status; } else banks[1] |= MCI_STATUS_OVER; return 0; } static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { events->exception.injected = vcpu->arch.exception.pending && !kvm_exception_is_soft(vcpu->arch.exception.nr); events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; events->exception.pad = 0; events->exception.error_code = vcpu->arch.exception.error_code; events->interrupt.injected = vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; events->interrupt.soft = 0; events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending; events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); events->nmi.pad = 0; events->sipi_vector = vcpu->arch.sipi_vector; events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW); memset(&events->reserved, 0, sizeof(events->reserved)); } static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW)) return -EINVAL; vcpu->arch.exception.pending = events->exception.injected; vcpu->arch.exception.nr = events->exception.nr; vcpu->arch.exception.has_error_code = events->exception.has_error_code; vcpu->arch.exception.error_code = events->exception.error_code; vcpu->arch.interrupt.pending = events->interrupt.injected; vcpu->arch.interrupt.nr = events->interrupt.nr; vcpu->arch.interrupt.soft = events->interrupt.soft; if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm)) kvm_pic_clear_isr_ack(vcpu->kvm); if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) kvm_x86_ops->set_interrupt_shadow(vcpu, events->interrupt.shadow); vcpu->arch.nmi_injected = events->nmi.injected; if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) vcpu->arch.nmi_pending = events->nmi.pending; kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR) vcpu->arch.sipi_vector = events->sipi_vector; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); dbgregs->dr6 = vcpu->arch.dr6; dbgregs->dr7 = vcpu->arch.dr7; dbgregs->flags = 0; memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); } static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { if (dbgregs->flags) return -EINVAL; memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = dbgregs->dr6; vcpu->arch.dr7 = dbgregs->dr7; return 0; } static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { if (cpu_has_xsave) memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->xsave, xstate_size); else { memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->fxsave, sizeof(struct i387_fxsave_struct)); *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = XSTATE_FPSSE; } } static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { u64 xstate_bv = *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; if (cpu_has_xsave) memcpy(&vcpu->arch.guest_fpu.state->xsave, guest_xsave->region, xstate_size); else { if (xstate_bv & ~XSTATE_FPSSE) return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->fxsave, guest_xsave->region, sizeof(struct i387_fxsave_struct)); } return 0; } static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { if (!cpu_has_xsave) { guest_xcrs->nr_xcrs = 0; return; } guest_xcrs->nr_xcrs = 1; guest_xcrs->flags = 0; guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; } static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { int i, r = 0; if (!cpu_has_xsave) return -EINVAL; if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) return -EINVAL; for (i = 0; i < guest_xcrs->nr_xcrs; i++) /* Only support XCR0 currently */ if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) { r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, guest_xcrs->xcrs[0].value); break; } if (r) r = -EINVAL; return r; } long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; int r; union { struct kvm_lapic_state *lapic; struct kvm_xsave *xsave; struct kvm_xcrs *xcrs; void *buffer; } u; u.buffer = NULL; switch (ioctl) { case KVM_GET_LAPIC: { r = -EINVAL; if (!vcpu->arch.apic) goto out; u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); r = -ENOMEM; if (!u.lapic) goto out; r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) goto out; r = 0; break; } case KVM_SET_LAPIC: { r = -EINVAL; if (!vcpu->arch.apic) goto out; u.lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); r = -ENOMEM; if (!u.lapic) goto out; r = -EFAULT; if (copy_from_user(u.lapic, argp, sizeof(struct kvm_lapic_state))) goto out; r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); if (r) goto out; r = 0; break; } case KVM_INTERRUPT: { struct kvm_interrupt irq; r = -EFAULT; if (copy_from_user(&irq, argp, sizeof irq)) goto out; r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); if (r) goto out; r = 0; break; } case KVM_NMI: { r = kvm_vcpu_ioctl_nmi(vcpu); if (r) goto out; r = 0; break; } case KVM_SET_CPUID: { struct kvm_cpuid __user *cpuid_arg = argp; struct kvm_cpuid cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); if (r) goto out; break; } case KVM_SET_CPUID2: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, cpuid_arg->entries); if (r) goto out; break; } case KVM_GET_CPUID2: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, cpuid_arg->entries); if (r) goto out; r = -EFAULT; if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) goto out; r = 0; break; } case KVM_GET_MSRS: r = msr_io(vcpu, argp, kvm_get_msr, 1); break; case KVM_SET_MSRS: r = msr_io(vcpu, argp, do_set_msr, 0); break; case KVM_TPR_ACCESS_REPORTING: { struct kvm_tpr_access_ctl tac; r = -EFAULT; if (copy_from_user(&tac, argp, sizeof tac)) goto out; r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &tac, sizeof tac)) goto out; r = 0; break; }; case KVM_SET_VAPIC_ADDR: { struct kvm_vapic_addr va; r = -EINVAL; if (!irqchip_in_kernel(vcpu->kvm)) goto out; r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; r = 0; kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { u64 mcg_cap; r = -EFAULT; if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap)) goto out; r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); break; } case KVM_X86_SET_MCE: { struct kvm_x86_mce mce; r = -EFAULT; if (copy_from_user(&mce, argp, sizeof mce)) goto out; r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); break; } case KVM_GET_VCPU_EVENTS: { struct kvm_vcpu_events events; kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); r = -EFAULT; if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) break; r = 0; break; } case KVM_SET_VCPU_EVENTS: { struct kvm_vcpu_events events; r = -EFAULT; if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) break; r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); break; } case KVM_GET_DEBUGREGS: { struct kvm_debugregs dbgregs; kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); r = -EFAULT; if (copy_to_user(argp, &dbgregs, sizeof(struct kvm_debugregs))) break; r = 0; break; } case KVM_SET_DEBUGREGS: { struct kvm_debugregs dbgregs; r = -EFAULT; if (copy_from_user(&dbgregs, argp, sizeof(struct kvm_debugregs))) break; r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); break; } case KVM_GET_XSAVE: { u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); r = -ENOMEM; if (!u.xsave) break; kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); r = -EFAULT; if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) break; r = 0; break; } case KVM_SET_XSAVE: { u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); r = -ENOMEM; if (!u.xsave) break; r = -EFAULT; if (copy_from_user(u.xsave, argp, sizeof(struct kvm_xsave))) break; r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); break; } case KVM_GET_XCRS: { u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL); r = -ENOMEM; if (!u.xcrs) break; kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); r = -EFAULT; if (copy_to_user(argp, u.xcrs, sizeof(struct kvm_xcrs))) break; r = 0; break; } case KVM_SET_XCRS: { u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL); r = -ENOMEM; if (!u.xcrs) break; r = -EFAULT; if (copy_from_user(u.xcrs, argp, sizeof(struct kvm_xcrs))) break; r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); break; } default: r = -EINVAL; } out: kfree(u.buffer); return r; } static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) { int ret; if (addr > (unsigned int)(-3 * PAGE_SIZE)) return -1; ret = kvm_x86_ops->set_tss_addr(kvm, addr); return ret; } static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) { kvm->arch.ept_identity_map_addr = ident_addr; return 0; } static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, u32 kvm_nr_mmu_pages) { if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) return -EINVAL; mutex_lock(&kvm->slots_lock); spin_lock(&kvm->mmu_lock); kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; spin_unlock(&kvm->mmu_lock); mutex_unlock(&kvm->slots_lock); return 0; } static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) { return kvm->arch.n_max_mmu_pages; } static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: memcpy(&chip->chip.pic, &pic_irqchip(kvm)->pics[0], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_PIC_SLAVE: memcpy(&chip->chip.pic, &pic_irqchip(kvm)->pics[1], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_IOAPIC: r = kvm_get_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } return r; } static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[0], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_PIC_SLAVE: spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[1], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_IOAPIC: r = kvm_set_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } kvm_pic_update_irq(pic_irqchip(kvm)); return r; } static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, sizeof(ps->channels)); ps->flags = kvm->arch.vpit->pit_state.flags; mutex_unlock(&kvm->arch.vpit->pit_state.lock); memset(&ps->reserved, 0, sizeof(ps->reserved)); return r; } static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int r = 0, start = 0; u32 prev_legacy, cur_legacy; mutex_lock(&kvm->arch.vpit->pit_state.lock); prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; if (!prev_legacy && cur_legacy) start = 1; memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, sizeof(kvm->arch.vpit->pit_state.channels)); kvm->arch.vpit->pit_state.flags = ps->flags; kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_reinject(struct kvm *kvm, struct kvm_reinject_control *control) { if (!kvm->arch.vpit) return -ENXIO; mutex_lock(&kvm->arch.vpit->pit_state.lock); kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject; mutex_unlock(&kvm->arch.vpit->pit_state.lock); return 0; } /* * Get (and clear) the dirty memory log for a memory slot. */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { int r, i; struct kvm_memory_slot *memslot; unsigned long n; unsigned long is_dirty = 0; mutex_lock(&kvm->slots_lock); r = -EINVAL; if (log->slot >= KVM_MEMORY_SLOTS) goto out; memslot = &kvm->memslots->memslots[log->slot]; r = -ENOENT; if (!memslot->dirty_bitmap) goto out; n = kvm_dirty_bitmap_bytes(memslot); for (i = 0; !is_dirty && i < n/sizeof(long); i++) is_dirty = memslot->dirty_bitmap[i]; /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { struct kvm_memslots *slots, *old_slots; unsigned long *dirty_bitmap; dirty_bitmap = memslot->dirty_bitmap_head; if (memslot->dirty_bitmap == dirty_bitmap) dirty_bitmap += n / sizeof(long); memset(dirty_bitmap, 0, n); r = -ENOMEM; slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); if (!slots) goto out; memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); slots->memslots[log->slot].dirty_bitmap = dirty_bitmap; slots->generation++; old_slots = kvm->memslots; rcu_assign_pointer(kvm->memslots, slots); synchronize_srcu_expedited(&kvm->srcu); dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; kfree(old_slots); spin_lock(&kvm->mmu_lock); kvm_mmu_slot_remove_write_access(kvm, log->slot); spin_unlock(&kvm->mmu_lock); r = -EFAULT; if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) goto out; } else { r = -EFAULT; if (clear_user(log->dirty_bitmap, n)) goto out; } r = 0; out: mutex_unlock(&kvm->slots_lock); return r; } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; int r = -ENOTTY; /* * This union makes it completely explicit to gcc-3.x * that these two variables' stack usage should be * combined, not added together. */ union { struct kvm_pit_state ps; struct kvm_pit_state2 ps2; struct kvm_pit_config pit_config; } u; switch (ioctl) { case KVM_SET_TSS_ADDR: r = kvm_vm_ioctl_set_tss_addr(kvm, arg); if (r < 0) goto out; break; case KVM_SET_IDENTITY_MAP_ADDR: { u64 ident_addr; r = -EFAULT; if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) goto out; r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); if (r < 0) goto out; break; } case KVM_SET_NR_MMU_PAGES: r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); if (r) goto out; break; case KVM_GET_NR_MMU_PAGES: r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); break; case KVM_CREATE_IRQCHIP: { struct kvm_pic *vpic; mutex_lock(&kvm->lock); r = -EEXIST; if (kvm->arch.vpic) goto create_irqchip_unlock; r = -ENOMEM; vpic = kvm_create_pic(kvm); if (vpic) { r = kvm_ioapic_init(kvm); if (r) { kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev); kfree(vpic); goto create_irqchip_unlock; } } else goto create_irqchip_unlock; smp_wmb(); kvm->arch.vpic = vpic; smp_wmb(); r = kvm_setup_default_irq_routing(kvm); if (r) { mutex_lock(&kvm->irq_lock); kvm_ioapic_destroy(kvm); kvm_destroy_pic(kvm); mutex_unlock(&kvm->irq_lock); } create_irqchip_unlock: mutex_unlock(&kvm->lock); break; } case KVM_CREATE_PIT: u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; goto create_pit; case KVM_CREATE_PIT2: r = -EFAULT; if (copy_from_user(&u.pit_config, argp, sizeof(struct kvm_pit_config))) goto out; create_pit: mutex_lock(&kvm->slots_lock); r = -EEXIST; if (kvm->arch.vpit) goto create_pit_unlock; r = -ENOMEM; kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); if (kvm->arch.vpit) r = 0; create_pit_unlock: mutex_unlock(&kvm->slots_lock); break; case KVM_IRQ_LINE_STATUS: case KVM_IRQ_LINE: { struct kvm_irq_level irq_event; r = -EFAULT; if (copy_from_user(&irq_event, argp, sizeof irq_event)) goto out; r = -ENXIO; if (irqchip_in_kernel(kvm)) { __s32 status; status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event.irq, irq_event.level); if (ioctl == KVM_IRQ_LINE_STATUS) { r = -EFAULT; irq_event.status = status; if (copy_to_user(argp, &irq_event, sizeof irq_event)) goto out; } r = 0; } break; } case KVM_GET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL); r = -ENOMEM; if (!chip) goto out; r = -EFAULT; if (copy_from_user(chip, argp, sizeof *chip)) goto get_irqchip_out; r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto get_irqchip_out; r = kvm_vm_ioctl_get_irqchip(kvm, chip); if (r) goto get_irqchip_out; r = -EFAULT; if (copy_to_user(argp, chip, sizeof *chip)) goto get_irqchip_out; r = 0; get_irqchip_out: kfree(chip); if (r) goto out; break; } case KVM_SET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL); r = -ENOMEM; if (!chip) goto out; r = -EFAULT; if (copy_from_user(chip, argp, sizeof *chip)) goto set_irqchip_out; r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto set_irqchip_out; r = kvm_vm_ioctl_set_irqchip(kvm, chip); if (r) goto set_irqchip_out; r = 0; set_irqchip_out: kfree(chip); if (r) goto out; break; } case KVM_GET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit(kvm, &u.ps); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) goto out; r = 0; break; } case KVM_SET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof u.ps)) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit(kvm, &u.ps); if (r) goto out; r = 0; break; } case KVM_GET_PIT2: { r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) goto out; r = 0; break; } case KVM_SET_PIT2: { r = -EFAULT; if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); if (r) goto out; r = 0; break; } case KVM_REINJECT_CONTROL: { struct kvm_reinject_control control; r = -EFAULT; if (copy_from_user(&control, argp, sizeof(control))) goto out; r = kvm_vm_ioctl_reinject(kvm, &control); if (r) goto out; r = 0; break; } case KVM_XEN_HVM_CONFIG: { r = -EFAULT; if (copy_from_user(&kvm->arch.xen_hvm_config, argp, sizeof(struct kvm_xen_hvm_config))) goto out; r = -EINVAL; if (kvm->arch.xen_hvm_config.flags) goto out; r = 0; break; } case KVM_SET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; s64 delta; r = -EFAULT; if (copy_from_user(&user_ns, argp, sizeof(user_ns))) goto out; r = -EINVAL; if (user_ns.flags) goto out; r = 0; local_irq_disable(); now_ns = get_kernel_ns(); delta = user_ns.clock - now_ns; local_irq_enable(); kvm->arch.kvmclock_offset = delta; break; } case KVM_GET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; local_irq_disable(); now_ns = get_kernel_ns(); user_ns.clock = kvm->arch.kvmclock_offset + now_ns; local_irq_enable(); user_ns.flags = 0; memset(&user_ns.pad, 0, sizeof(user_ns.pad)); r = -EFAULT; if (copy_to_user(argp, &user_ns, sizeof(user_ns))) goto out; r = 0; break; } default: ; } out: return r; } static void kvm_init_msr_list(void) { u32 dummy[2]; unsigned i, j; /* skip the first msrs in the list. KVM-specific */ for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) { if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) continue; if (j < i) msrs_to_save[j] = msrs_to_save[i]; j++; } num_msrs_to_save = j; } static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *v) { if (vcpu->arch.apic && !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v)) return 0; return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v); } static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) { if (vcpu->arch.apic && !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v)) return 0; return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v); } static void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { kvm_x86_ops->set_segment(vcpu, var, seg); } void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { kvm_x86_ops->get_segment(vcpu, var, seg); } static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) { return gpa; } static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) { gpa_t t_gpa; struct x86_exception exception; BUG_ON(!mmu_is_nested(vcpu)); /* NPT walks are always user-walks */ access |= PFERR_USER_MASK; t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception); return t_gpa; } gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_FETCH_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_WRITE_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } /* uses this to access any guest's mapped memory without checking CPL */ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); } static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, u32 access, struct x86_exception *exception) { void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; ret = kvm_read_guest(vcpu->kvm, gpa, data, toread); if (ret < 0) { r = X86EMUL_IO_NEEDED; goto out; } bytes -= toread; data += toread; addr += toread; } out: return r; } /* used for instruction fetching */ static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access | PFERR_FETCH_MASK, exception); } static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, struct x86_exception *exception) { return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); } static int kvm_write_guest_virt_system(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, struct x86_exception *exception) { void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, PFERR_WRITE_MASK, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); if (ret < 0) { r = X86EMUL_IO_NEEDED; goto out; } bytes -= towrite; data += towrite; addr += towrite; } out: return r; } static int emulator_read_emulated(unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu) { gpa_t gpa; if (vcpu->mmio_read_completed) { memcpy(val, vcpu->mmio_data, bytes); trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, vcpu->mmio_phys_addr, *(u64 *)val); vcpu->mmio_read_completed = 0; return X86EMUL_CONTINUE; } gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, exception); if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; /* For APIC access vmexit */ if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto mmio; if (kvm_read_guest_virt(addr, val, bytes, vcpu, exception) == X86EMUL_CONTINUE) return X86EMUL_CONTINUE; mmio: /* * Is this MMIO handled locally? */ if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) { trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val); return X86EMUL_CONTINUE; } trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); vcpu->mmio_needed = 1; vcpu->run->exit_reason = KVM_EXIT_MMIO; vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa; vcpu->run->mmio.len = vcpu->mmio_size = bytes; vcpu->run->mmio.is_write = vcpu->mmio_is_write = 0; return X86EMUL_IO_NEEDED; } int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, const void *val, int bytes) { int ret; ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); if (ret < 0) return 0; kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1); return 1; } static int emulator_write_emulated_onepage(unsigned long addr, const void *val, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu) { gpa_t gpa; gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, exception); if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; /* For APIC access vmexit */ if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto mmio; if (emulator_write_phys(vcpu, gpa, val, bytes)) return X86EMUL_CONTINUE; mmio: trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val); /* * Is this MMIO handled locally? */ if (!vcpu_mmio_write(vcpu, gpa, bytes, val)) return X86EMUL_CONTINUE; vcpu->mmio_needed = 1; vcpu->run->exit_reason = KVM_EXIT_MMIO; vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa; vcpu->run->mmio.len = vcpu->mmio_size = bytes; vcpu->run->mmio.is_write = vcpu->mmio_is_write = 1; memcpy(vcpu->run->mmio.data, val, bytes); return X86EMUL_CONTINUE; } int emulator_write_emulated(unsigned long addr, const void *val, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu) { /* Crossing a page boundary? */ if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { int rc, now; now = -addr & ~PAGE_MASK; rc = emulator_write_emulated_onepage(addr, val, now, exception, vcpu); if (rc != X86EMUL_CONTINUE) return rc; addr += now; val += now; bytes -= now; } return emulator_write_emulated_onepage(addr, val, bytes, exception, vcpu); } #define CMPXCHG_TYPE(t, ptr, old, new) \ (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old)) #ifdef CONFIG_X86_64 # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new) #else # define CMPXCHG64(ptr, old, new) \ (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old)) #endif static int emulator_cmpxchg_emulated(unsigned long addr, const void *old, const void *new, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu) { gpa_t gpa; struct page *page; char *kaddr; bool exchanged; /* guests cmpxchg8b have to be emulated atomically */ if (bytes > 8 || (bytes & (bytes - 1))) goto emul_write; gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); if (gpa == UNMAPPED_GVA || (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto emul_write; if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK)) goto emul_write; page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); if (is_error_page(page)) { kvm_release_page_clean(page); goto emul_write; } kaddr = kmap_atomic(page, KM_USER0); kaddr += offset_in_page(gpa); switch (bytes) { case 1: exchanged = CMPXCHG_TYPE(u8, kaddr, old, new); break; case 2: exchanged = CMPXCHG_TYPE(u16, kaddr, old, new); break; case 4: exchanged = CMPXCHG_TYPE(u32, kaddr, old, new); break; case 8: exchanged = CMPXCHG64(kaddr, old, new); break; default: BUG(); } kunmap_atomic(kaddr, KM_USER0); kvm_release_page_dirty(page); if (!exchanged) return X86EMUL_CMPXCHG_FAILED; kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1); return X86EMUL_CONTINUE; emul_write: printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); return emulator_write_emulated(addr, new, bytes, exception, vcpu); } static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) { /* TODO: String I/O for in kernel device */ int r; if (vcpu->arch.pio.in) r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, vcpu->arch.pio.size, pd); else r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, vcpu->arch.pio.size, pd); return r; } static int emulator_pio_in_emulated(int size, unsigned short port, void *val, unsigned int count, struct kvm_vcpu *vcpu) { if (vcpu->arch.pio.count) goto data_avail; trace_kvm_pio(0, port, size, 1); vcpu->arch.pio.port = port; vcpu->arch.pio.in = 1; vcpu->arch.pio.count = count; vcpu->arch.pio.size = size; if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { data_avail: memcpy(val, vcpu->arch.pio_data, size * count); vcpu->arch.pio.count = 0; return 1; } vcpu->run->exit_reason = KVM_EXIT_IO; vcpu->run->io.direction = KVM_EXIT_IO_IN; vcpu->run->io.size = size; vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; vcpu->run->io.count = count; vcpu->run->io.port = port; return 0; } static int emulator_pio_out_emulated(int size, unsigned short port, const void *val, unsigned int count, struct kvm_vcpu *vcpu) { trace_kvm_pio(1, port, size, 1); vcpu->arch.pio.port = port; vcpu->arch.pio.in = 0; vcpu->arch.pio.count = count; vcpu->arch.pio.size = size; memcpy(vcpu->arch.pio_data, val, size * count); if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { vcpu->arch.pio.count = 0; return 1; } vcpu->run->exit_reason = KVM_EXIT_IO; vcpu->run->io.direction = KVM_EXIT_IO_OUT; vcpu->run->io.size = size; vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; vcpu->run->io.count = count; vcpu->run->io.port = port; return 0; } static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) { return kvm_x86_ops->get_segment_base(vcpu, seg); } int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) { kvm_mmu_invlpg(vcpu, address); return X86EMUL_CONTINUE; } int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) { if (!need_emulate_wbinvd(vcpu)) return X86EMUL_CONTINUE; if (kvm_x86_ops->has_wbinvd_exit()) { int cpu = get_cpu(); cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, wbinvd_ipi, NULL, 1); put_cpu(); cpumask_clear(vcpu->arch.wbinvd_dirty_mask); } else wbinvd(); return X86EMUL_CONTINUE; } EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); int emulate_clts(struct kvm_vcpu *vcpu) { kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); kvm_x86_ops->fpu_activate(vcpu); return X86EMUL_CONTINUE; } int emulator_get_dr(int dr, unsigned long *dest, struct kvm_vcpu *vcpu) { return _kvm_get_dr(vcpu, dr, dest); } int emulator_set_dr(int dr, unsigned long value, struct kvm_vcpu *vcpu) { return __kvm_set_dr(vcpu, dr, value); } static u64 mk_cr_64(u64 curr_cr, u32 new_val) { return (curr_cr & ~((1ULL << 32) - 1)) | new_val; } static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu) { unsigned long value; switch (cr) { case 0: value = kvm_read_cr0(vcpu); break; case 2: value = vcpu->arch.cr2; break; case 3: value = vcpu->arch.cr3; break; case 4: value = kvm_read_cr4(vcpu); break; case 8: value = kvm_get_cr8(vcpu); break; default: vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); return 0; } return value; } static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu) { int res = 0; switch (cr) { case 0: res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); break; case 2: vcpu->arch.cr2 = val; break; case 3: res = kvm_set_cr3(vcpu, val); break; case 4: res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); break; case 8: res = __kvm_set_cr8(vcpu, val & 0xfUL); break; default: vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); res = -1; } return res; } static int emulator_get_cpl(struct kvm_vcpu *vcpu) { return kvm_x86_ops->get_cpl(vcpu); } static void emulator_get_gdt(struct desc_ptr *dt, struct kvm_vcpu *vcpu) { kvm_x86_ops->get_gdt(vcpu, dt); } static void emulator_get_idt(struct desc_ptr *dt, struct kvm_vcpu *vcpu) { kvm_x86_ops->get_idt(vcpu, dt); } static unsigned long emulator_get_cached_segment_base(int seg, struct kvm_vcpu *vcpu) { return get_segment_base(vcpu, seg); } static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg, struct kvm_vcpu *vcpu) { struct kvm_segment var; kvm_get_segment(vcpu, &var, seg); if (var.unusable) return false; if (var.g) var.limit >>= 12; set_desc_limit(desc, var.limit); set_desc_base(desc, (unsigned long)var.base); desc->type = var.type; desc->s = var.s; desc->dpl = var.dpl; desc->p = var.present; desc->avl = var.avl; desc->l = var.l; desc->d = var.db; desc->g = var.g; return true; } static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg, struct kvm_vcpu *vcpu) { struct kvm_segment var; /* needed to preserve selector */ kvm_get_segment(vcpu, &var, seg); var.base = get_desc_base(desc); var.limit = get_desc_limit(desc); if (desc->g) var.limit = (var.limit << 12) | 0xfff; var.type = desc->type; var.present = desc->p; var.dpl = desc->dpl; var.db = desc->d; var.s = desc->s; var.l = desc->l; var.g = desc->g; var.avl = desc->avl; var.present = desc->p; var.unusable = !var.present; var.padding = 0; kvm_set_segment(vcpu, &var, seg); return; } static u16 emulator_get_segment_selector(int seg, struct kvm_vcpu *vcpu) { struct kvm_segment kvm_seg; kvm_get_segment(vcpu, &kvm_seg, seg); return kvm_seg.selector; } static void emulator_set_segment_selector(u16 sel, int seg, struct kvm_vcpu *vcpu) { struct kvm_segment kvm_seg; kvm_get_segment(vcpu, &kvm_seg, seg); kvm_seg.selector = sel; kvm_set_segment(vcpu, &kvm_seg, seg); } static struct x86_emulate_ops emulate_ops = { .read_std = kvm_read_guest_virt_system, .write_std = kvm_write_guest_virt_system, .fetch = kvm_fetch_guest_virt, .read_emulated = emulator_read_emulated, .write_emulated = emulator_write_emulated, .cmpxchg_emulated = emulator_cmpxchg_emulated, .pio_in_emulated = emulator_pio_in_emulated, .pio_out_emulated = emulator_pio_out_emulated, .get_cached_descriptor = emulator_get_cached_descriptor, .set_cached_descriptor = emulator_set_cached_descriptor, .get_segment_selector = emulator_get_segment_selector, .set_segment_selector = emulator_set_segment_selector, .get_cached_segment_base = emulator_get_cached_segment_base, .get_gdt = emulator_get_gdt, .get_idt = emulator_get_idt, .get_cr = emulator_get_cr, .set_cr = emulator_set_cr, .cpl = emulator_get_cpl, .get_dr = emulator_get_dr, .set_dr = emulator_set_dr, .set_msr = kvm_set_msr, .get_msr = kvm_get_msr, }; static void cache_all_regs(struct kvm_vcpu *vcpu) { kvm_register_read(vcpu, VCPU_REGS_RAX); kvm_register_read(vcpu, VCPU_REGS_RSP); kvm_register_read(vcpu, VCPU_REGS_RIP); vcpu->arch.regs_dirty = ~0; } static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) { u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask); /* * an sti; sti; sequence only disable interrupts for the first * instruction. So, if the last instruction, be it emulated or * not, left the system with the INT_STI flag enabled, it * means that the last instruction is an sti. We should not * leave the flag on in this case. The same goes for mov ss */ if (!(int_shadow & mask)) kvm_x86_ops->set_interrupt_shadow(vcpu, mask); } static void inject_emulated_exception(struct kvm_vcpu *vcpu) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; if (ctxt->exception.vector == PF_VECTOR) kvm_propagate_fault(vcpu, &ctxt->exception); else if (ctxt->exception.error_code_valid) kvm_queue_exception_e(vcpu, ctxt->exception.vector, ctxt->exception.error_code); else kvm_queue_exception(vcpu, ctxt->exception.vector); } static void init_emulate_ctxt(struct kvm_vcpu *vcpu) { struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; int cs_db, cs_l; cache_all_regs(vcpu); kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); vcpu->arch.emulate_ctxt.vcpu = vcpu; vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu); vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu); vcpu->arch.emulate_ctxt.mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : cs_l ? X86EMUL_MODE_PROT64 : cs_db ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; memset(c, 0, sizeof(struct decode_cache)); memcpy(c->regs, vcpu->arch.regs, sizeof c->regs); } int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq) { struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; int ret; init_emulate_ctxt(vcpu); vcpu->arch.emulate_ctxt.decode.op_bytes = 2; vcpu->arch.emulate_ctxt.decode.ad_bytes = 2; vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip; ret = emulate_int_real(&vcpu->arch.emulate_ctxt, &emulate_ops, irq); if (ret != X86EMUL_CONTINUE) return EMULATE_FAIL; vcpu->arch.emulate_ctxt.eip = c->eip; memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); if (irq == NMI_VECTOR) vcpu->arch.nmi_pending = false; else vcpu->arch.interrupt.pending = false; return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); static int handle_emulation_failure(struct kvm_vcpu *vcpu) { int r = EMULATE_DONE; ++vcpu->stat.insn_emulation_fail; trace_kvm_emulate_insn_failed(vcpu); if (!is_guest_mode(vcpu)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; r = EMULATE_FAIL; } kvm_queue_exception(vcpu, UD_VECTOR); return r; } static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva) { gpa_t gpa; if (tdp_enabled) return false; /* * if emulation was due to access to shadowed page table * and it failed try to unshadow page and re-entetr the * guest to let CPU execute the instruction. */ if (kvm_mmu_unprotect_page_virt(vcpu, gva)) return true; gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL); if (gpa == UNMAPPED_GVA) return true; /* let cpu generate fault */ if (!kvm_is_error_hva(gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT))) return true; return false; } int emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, u16 error_code, int emulation_type) { int r; struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; kvm_clear_exception_queue(vcpu); vcpu->arch.mmio_fault_cr2 = cr2; /* * TODO: fix emulate.c to use guest_read/write_register * instead of direct ->regs accesses, can save hundred cycles * on Intel for instructions that don't read/change RSP, for * for example. */ cache_all_regs(vcpu); if (!(emulation_type & EMULTYPE_NO_DECODE)) { init_emulate_ctxt(vcpu); vcpu->arch.emulate_ctxt.interruptibility = 0; vcpu->arch.emulate_ctxt.have_exception = false; vcpu->arch.emulate_ctxt.perm_ok = false; r = x86_decode_insn(&vcpu->arch.emulate_ctxt); if (r == X86EMUL_PROPAGATE_FAULT) goto done; trace_kvm_emulate_insn_start(vcpu); /* Only allow emulation of specific instructions on #UD * (namely VMMCALL, sysenter, sysexit, syscall)*/ if (emulation_type & EMULTYPE_TRAP_UD) { if (!c->twobyte) return EMULATE_FAIL; switch (c->b) { case 0x01: /* VMMCALL */ if (c->modrm_mod != 3 || c->modrm_rm != 1) return EMULATE_FAIL; break; case 0x34: /* sysenter */ case 0x35: /* sysexit */ if (c->modrm_mod != 0 || c->modrm_rm != 0) return EMULATE_FAIL; break; case 0x05: /* syscall */ if (c->modrm_mod != 0 || c->modrm_rm != 0) return EMULATE_FAIL; break; default: return EMULATE_FAIL; } if (!(c->modrm_reg == 0 || c->modrm_reg == 3)) return EMULATE_FAIL; } ++vcpu->stat.insn_emulation; if (r) { if (reexecute_instruction(vcpu, cr2)) return EMULATE_DONE; if (emulation_type & EMULTYPE_SKIP) return EMULATE_FAIL; return handle_emulation_failure(vcpu); } } if (emulation_type & EMULTYPE_SKIP) { kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip); return EMULATE_DONE; } /* this is needed for vmware backdor interface to work since it changes registers values during IO operation */ memcpy(c->regs, vcpu->arch.regs, sizeof c->regs); restart: r = x86_emulate_insn(&vcpu->arch.emulate_ctxt); if (r == EMULATION_FAILED) { if (reexecute_instruction(vcpu, cr2)) return EMULATE_DONE; return handle_emulation_failure(vcpu); } done: if (vcpu->arch.emulate_ctxt.have_exception) { inject_emulated_exception(vcpu); r = EMULATE_DONE; } else if (vcpu->arch.pio.count) { if (!vcpu->arch.pio.in) vcpu->arch.pio.count = 0; r = EMULATE_DO_MMIO; } else if (vcpu->mmio_needed) { if (vcpu->mmio_is_write) vcpu->mmio_needed = 0; r = EMULATE_DO_MMIO; } else if (r == EMULATION_RESTART) goto restart; else r = EMULATE_DONE; toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility); kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); kvm_make_request(KVM_REQ_EVENT, vcpu); memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); return r; } EXPORT_SYMBOL_GPL(emulate_instruction); int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) { unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); int ret = emulator_pio_out_emulated(size, port, &val, 1, vcpu); /* do not return to emulator after return from userspace */ vcpu->arch.pio.count = 0; return ret; } EXPORT_SYMBOL_GPL(kvm_fast_pio_out); static void tsc_bad(void *info) { __get_cpu_var(cpu_tsc_khz) = 0; } static void tsc_khz_changed(void *data) { struct cpufreq_freqs *freq = data; unsigned long khz = 0; if (data) khz = freq->new; else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) khz = cpufreq_quick_get(raw_smp_processor_id()); if (!khz) khz = tsc_khz; __get_cpu_var(cpu_tsc_khz) = khz; } static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; struct kvm *kvm; struct kvm_vcpu *vcpu; int i, send_ipi = 0; /* * We allow guests to temporarily run on slowing clocks, * provided we notify them after, or to run on accelerating * clocks, provided we notify them before. Thus time never * goes backwards. * * However, we have a problem. We can't atomically update * the frequency of a given CPU from this function; it is * merely a notifier, which can be called from any CPU. * Changing the TSC frequency at arbitrary points in time * requires a recomputation of local variables related to * the TSC for each VCPU. We must flag these local variables * to be updated and be sure the update takes place with the * new frequency before any guests proceed. * * Unfortunately, the combination of hotplug CPU and frequency * change creates an intractable locking scenario; the order * of when these callouts happen is undefined with respect to * CPU hotplug, and they can race with each other. As such, * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is * undefined; you can actually have a CPU frequency change take * place in between the computation of X and the setting of the * variable. To protect against this problem, all updates of * the per_cpu tsc_khz variable are done in an interrupt * protected IPI, and all callers wishing to update the value * must wait for a synchronous IPI to complete (which is trivial * if the caller is on the CPU already). This establishes the * necessary total order on variable updates. * * Note that because a guest time update may take place * anytime after the setting of the VCPU's request bit, the * correct TSC value must be set before the request. However, * to ensure the update actually makes it to any guest which * starts running in hardware virtualization between the set * and the acquisition of the spinlock, we must also ping the * CPU after setting the request bit. * */ if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) return 0; if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) return 0; smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->cpu != freq->cpu) continue; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->cpu != smp_processor_id()) send_ipi = 1; } } spin_unlock(&kvm_lock); if (freq->old < freq->new && send_ipi) { /* * We upscale the frequency. Must make the guest * doesn't see old kvmclock values while running with * the new frequency, otherwise we risk the guest sees * time go backwards. * * In case we update the frequency for another cpu * (which might be in guest context) send an interrupt * to kick the cpu out of guest context. Next time * guest context is entered kvmclock will be updated, * so the guest will not see stale values. */ smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); } return 0; } static struct notifier_block kvmclock_cpufreq_notifier_block = { .notifier_call = kvmclock_cpufreq_notifier }; static int kvmclock_cpu_notifier(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; switch (action) { case CPU_ONLINE: case CPU_DOWN_FAILED: smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); break; case CPU_DOWN_PREPARE: smp_call_function_single(cpu, tsc_bad, NULL, 1); break; } return NOTIFY_OK; } static struct notifier_block kvmclock_cpu_notifier_block = { .notifier_call = kvmclock_cpu_notifier, .priority = -INT_MAX }; static void kvm_timer_init(void) { int cpu; max_tsc_khz = tsc_khz; register_hotcpu_notifier(&kvmclock_cpu_notifier_block); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { #ifdef CONFIG_CPU_FREQ struct cpufreq_policy policy; memset(&policy, 0, sizeof(policy)); cpu = get_cpu(); cpufreq_get_policy(&policy, cpu); if (policy.cpuinfo.max_freq) max_tsc_khz = policy.cpuinfo.max_freq; put_cpu(); #endif cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz); for_each_online_cpu(cpu) smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); } static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); static int kvm_is_in_guest(void) { return percpu_read(current_vcpu) != NULL; } static int kvm_is_user_mode(void) { int user_mode = 3; if (percpu_read(current_vcpu)) user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu)); return user_mode != 0; } static unsigned long kvm_get_guest_ip(void) { unsigned long ip = 0; if (percpu_read(current_vcpu)) ip = kvm_rip_read(percpu_read(current_vcpu)); return ip; } static struct perf_guest_info_callbacks kvm_guest_cbs = { .is_in_guest = kvm_is_in_guest, .is_user_mode = kvm_is_user_mode, .get_guest_ip = kvm_get_guest_ip, }; void kvm_before_handle_nmi(struct kvm_vcpu *vcpu) { percpu_write(current_vcpu, vcpu); } EXPORT_SYMBOL_GPL(kvm_before_handle_nmi); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) { percpu_write(current_vcpu, NULL); } EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); int kvm_arch_init(void *opaque) { int r; struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; if (kvm_x86_ops) { printk(KERN_ERR "kvm: already loaded the other module\n"); r = -EEXIST; goto out; } if (!ops->cpu_has_kvm_support()) { printk(KERN_ERR "kvm: no hardware support\n"); r = -EOPNOTSUPP; goto out; } if (ops->disabled_by_bios()) { printk(KERN_ERR "kvm: disabled by bios\n"); r = -EOPNOTSUPP; goto out; } r = kvm_mmu_module_init(); if (r) goto out; kvm_init_msr_list(); kvm_x86_ops = ops; kvm_mmu_set_nonpresent_ptes(0ull, 0ull); kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, PT_DIRTY_MASK, PT64_NX_MASK, 0); kvm_timer_init(); perf_register_guest_info_callbacks(&kvm_guest_cbs); if (cpu_has_xsave) host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); return 0; out: return r; } void kvm_arch_exit(void) { perf_unregister_guest_info_callbacks(&kvm_guest_cbs); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block); kvm_x86_ops = NULL; kvm_mmu_module_exit(); } int kvm_emulate_halt(struct kvm_vcpu *vcpu) { ++vcpu->stat.halt_exits; if (irqchip_in_kernel(vcpu->kvm)) { vcpu->arch.mp_state = KVM_MP_STATE_HALTED; return 1; } else { vcpu->run->exit_reason = KVM_EXIT_HLT; return 0; } } EXPORT_SYMBOL_GPL(kvm_emulate_halt); static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0, unsigned long a1) { if (is_long_mode(vcpu)) return a0; else return a0 | ((gpa_t)a1 << 32); } int kvm_hv_hypercall(struct kvm_vcpu *vcpu) { u64 param, ingpa, outgpa, ret; uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; bool fast, longmode; int cs_db, cs_l; /* * hypercall generates UD from non zero cpl and real mode * per HYPER-V spec */ if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; } kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); longmode = is_long_mode(vcpu) && cs_l == 1; if (!longmode) { param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); } #ifdef CONFIG_X86_64 else { param = kvm_register_read(vcpu, VCPU_REGS_RCX); ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); } #endif code = param & 0xffff; fast = (param >> 16) & 0x1; rep_cnt = (param >> 32) & 0xfff; rep_idx = (param >> 48) & 0xfff; trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); switch (code) { case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: kvm_vcpu_on_spin(vcpu); break; default: res = HV_STATUS_INVALID_HYPERCALL_CODE; break; } ret = res | (((u64)rep_done & 0xfff) << 32); if (longmode) { kvm_register_write(vcpu, VCPU_REGS_RAX, ret); } else { kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); } return 1; } int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) { unsigned long nr, a0, a1, a2, a3, ret; int r = 1; if (kvm_hv_hypercall_enabled(vcpu->kvm)) return kvm_hv_hypercall(vcpu); nr = kvm_register_read(vcpu, VCPU_REGS_RAX); a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); a1 = kvm_register_read(vcpu, VCPU_REGS_RCX); a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); trace_kvm_hypercall(nr, a0, a1, a2, a3); if (!is_long_mode(vcpu)) { nr &= 0xFFFFFFFF; a0 &= 0xFFFFFFFF; a1 &= 0xFFFFFFFF; a2 &= 0xFFFFFFFF; a3 &= 0xFFFFFFFF; } if (kvm_x86_ops->get_cpl(vcpu) != 0) { ret = -KVM_EPERM; goto out; } switch (nr) { case KVM_HC_VAPIC_POLL_IRQ: ret = 0; break; case KVM_HC_MMU_OP: r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret); break; default: ret = -KVM_ENOSYS; break; } out: kvm_register_write(vcpu, VCPU_REGS_RAX, ret); ++vcpu->stat.hypercalls; return r; } EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); int kvm_fix_hypercall(struct kvm_vcpu *vcpu) { char instruction[3]; unsigned long rip = kvm_rip_read(vcpu); /* * Blow out the MMU to ensure that no other VCPU has an active mapping * to ensure that the updated hypercall appears atomically across all * VCPUs. */ kvm_mmu_zap_all(vcpu->kvm); kvm_x86_ops->patch_hypercall(vcpu, instruction); return emulator_write_emulated(rip, instruction, 3, NULL, vcpu); } void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) { struct desc_ptr dt = { limit, base }; kvm_x86_ops->set_gdt(vcpu, &dt); } void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) { struct desc_ptr dt = { limit, base }; kvm_x86_ops->set_idt(vcpu, &dt); } static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) { struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; int j, nent = vcpu->arch.cpuid_nent; e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; /* when no next entry is found, the current entry[i] is reselected */ for (j = i + 1; ; j = (j + 1) % nent) { struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; if (ej->function == e->function) { ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; return j; } } return 0; /* silence gcc, even though control never reaches here */ } /* find an entry with matching function, matching index (if needed), and that * should be read next (if it's stateful) */ static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e, u32 function, u32 index) { if (e->function != function) return 0; if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index) return 0; if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) && !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT)) return 0; return 1; } struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function, u32 index) { int i; struct kvm_cpuid_entry2 *best = NULL; for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { struct kvm_cpuid_entry2 *e; e = &vcpu->arch.cpuid_entries[i]; if (is_matching_cpuid_entry(e, function, index)) { if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) move_to_next_stateful_cpuid_entry(vcpu, i); best = e; break; } /* * Both basic or both extended? */ if (((e->function ^ function) & 0x80000000) == 0) if (!best || e->function > best->function) best = e; } return best; } EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0); if (!best || best->eax < 0x80000008) goto not_found; best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); if (best) return best->eax & 0xff; not_found: return 36; } void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) { u32 function, index; struct kvm_cpuid_entry2 *best; function = kvm_register_read(vcpu, VCPU_REGS_RAX); index = kvm_register_read(vcpu, VCPU_REGS_RCX); kvm_register_write(vcpu, VCPU_REGS_RAX, 0); kvm_register_write(vcpu, VCPU_REGS_RBX, 0); kvm_register_write(vcpu, VCPU_REGS_RCX, 0); kvm_register_write(vcpu, VCPU_REGS_RDX, 0); best = kvm_find_cpuid_entry(vcpu, function, index); if (best) { kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax); kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx); kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx); kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx); } kvm_x86_ops->skip_emulated_instruction(vcpu); trace_kvm_cpuid(function, kvm_register_read(vcpu, VCPU_REGS_RAX), kvm_register_read(vcpu, VCPU_REGS_RBX), kvm_register_read(vcpu, VCPU_REGS_RCX), kvm_register_read(vcpu, VCPU_REGS_RDX)); } EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); /* * Check if userspace requested an interrupt window, and that the * interrupt window is open. * * No need to exit to userspace if we already have an interrupt queued. */ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) { return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && vcpu->run->request_interrupt_window && kvm_arch_interrupt_allowed(vcpu)); } static void post_kvm_run_save(struct kvm_vcpu *vcpu) { struct kvm_run *kvm_run = vcpu->run; kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu); if (irqchip_in_kernel(vcpu->kvm)) kvm_run->ready_for_interrupt_injection = 1; else kvm_run->ready_for_interrupt_injection = kvm_arch_interrupt_allowed(vcpu) && !kvm_cpu_has_interrupt(vcpu) && !kvm_event_needs_reinjection(vcpu); } static void vapic_enter(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; struct page *page; if (!apic || !apic->vapic_addr) return; page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); vcpu->arch.apic->vapic_page = page; } static void vapic_exit(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; int idx; if (!apic || !apic->vapic_addr) return; idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_release_page_dirty(apic->vapic_page); mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); srcu_read_unlock(&vcpu->kvm->srcu, idx); } static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; if (!kvm_x86_ops->update_cr8_intercept) return; if (!vcpu->arch.apic) return; if (!vcpu->arch.apic->vapic_addr) max_irr = kvm_lapic_find_highest_irr(vcpu); else max_irr = -1; if (max_irr != -1) max_irr >>= 4; tpr = kvm_lapic_get_cr8(vcpu); kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); } static void inject_pending_event(struct kvm_vcpu *vcpu) { /* try to reinject previous events if any */ if (vcpu->arch.exception.pending) { trace_kvm_inj_exception(vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code); kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code, vcpu->arch.exception.reinject); return; } if (vcpu->arch.nmi_injected) { kvm_x86_ops->set_nmi(vcpu); return; } if (vcpu->arch.interrupt.pending) { kvm_x86_ops->set_irq(vcpu); return; } /* try to inject new event if pending */ if (vcpu->arch.nmi_pending) { if (kvm_x86_ops->nmi_allowed(vcpu)) { vcpu->arch.nmi_pending = false; vcpu->arch.nmi_injected = true; kvm_x86_ops->set_nmi(vcpu); } } else if (kvm_cpu_has_interrupt(vcpu)) { if (kvm_x86_ops->interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); kvm_x86_ops->set_irq(vcpu); } } } static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) { if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && !vcpu->guest_xcr0_loaded) { /* kvm_set_xcr() also depends on this */ xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); vcpu->guest_xcr0_loaded = 1; } } static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) { if (vcpu->guest_xcr0_loaded) { if (vcpu->arch.xcr0 != host_xcr0) xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); vcpu->guest_xcr0_loaded = 0; } } static int vcpu_enter_guest(struct kvm_vcpu *vcpu) { int r; bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && vcpu->run->request_interrupt_window; if (vcpu->requests) { if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) kvm_mmu_unload(vcpu); if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) __kvm_migrate_timers(vcpu); if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { r = kvm_guest_time_update(vcpu); if (unlikely(r)) goto out; } if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) kvm_mmu_sync_roots(vcpu); if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) kvm_x86_ops->tlb_flush(vcpu); if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; r = 0; goto out; } if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; r = 0; goto out; } if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) { vcpu->fpu_active = 0; kvm_x86_ops->fpu_deactivate(vcpu); } if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { /* Page is swapped out. Do synthetic halt */ vcpu->arch.apf.halted = true; r = 1; goto out; } } r = kvm_mmu_reload(vcpu); if (unlikely(r)) goto out; if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { inject_pending_event(vcpu); /* enable NMI/IRQ window open exits if needed */ if (vcpu->arch.nmi_pending) kvm_x86_ops->enable_nmi_window(vcpu); else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) kvm_x86_ops->enable_irq_window(vcpu); if (kvm_lapic_enabled(vcpu)) { update_cr8_intercept(vcpu); kvm_lapic_sync_to_vapic(vcpu); } } preempt_disable(); kvm_x86_ops->prepare_guest_switch(vcpu); if (vcpu->fpu_active) kvm_load_guest_fpu(vcpu); kvm_load_guest_xcr0(vcpu); atomic_set(&vcpu->guest_mode, 1); smp_wmb(); local_irq_disable(); if (!atomic_read(&vcpu->guest_mode) || vcpu->requests || need_resched() || signal_pending(current)) { atomic_set(&vcpu->guest_mode, 0); smp_wmb(); local_irq_enable(); preempt_enable(); kvm_x86_ops->cancel_injection(vcpu); r = 1; goto out; } srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); kvm_guest_enter(); if (unlikely(vcpu->arch.switch_db_regs)) { set_debugreg(0, 7); set_debugreg(vcpu->arch.eff_db[0], 0); set_debugreg(vcpu->arch.eff_db[1], 1); set_debugreg(vcpu->arch.eff_db[2], 2); set_debugreg(vcpu->arch.eff_db[3], 3); } trace_kvm_entry(vcpu->vcpu_id); kvm_x86_ops->run(vcpu); /* * If the guest has used debug registers, at least dr7 * will be disabled while returning to the host. * If we don't have active breakpoints in the host, we don't * care about the messed up debug address registers. But if * we have some of them active, restore the old state. */ if (hw_breakpoint_active()) hw_breakpoint_restore(); kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc); atomic_set(&vcpu->guest_mode, 0); smp_wmb(); local_irq_enable(); ++vcpu->stat.exits; /* * We must have an instruction between local_irq_enable() and * kvm_guest_exit(), so the timer interrupt isn't delayed by * the interrupt shadow. The stat.exits increment will do nicely. * But we need to prevent reordering, hence this barrier(): */ barrier(); kvm_guest_exit(); preempt_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); /* * Profile KVM exit RIPs: */ if (unlikely(prof_on == KVM_PROFILING)) { unsigned long rip = kvm_rip_read(vcpu); profile_hit(KVM_PROFILING, (void *)rip); } kvm_lapic_sync_from_vapic(vcpu); r = kvm_x86_ops->handle_exit(vcpu); out: return r; } static int __vcpu_run(struct kvm_vcpu *vcpu) { int r; struct kvm *kvm = vcpu->kvm; if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) { pr_debug("vcpu %d received sipi with vector # %x\n", vcpu->vcpu_id, vcpu->arch.sipi_vector); kvm_lapic_reset(vcpu); r = kvm_arch_vcpu_reset(vcpu); if (r) return r; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); vapic_enter(vcpu); r = 1; while (r > 0) { if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) r = vcpu_enter_guest(vcpu); else { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { switch(vcpu->arch.mp_state) { case KVM_MP_STATE_HALTED: vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; case KVM_MP_STATE_RUNNABLE: vcpu->arch.apf.halted = false; break; case KVM_MP_STATE_SIPI_RECEIVED: default: r = -EINTR; break; } } } if (r <= 0) break; clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); if (kvm_cpu_has_pending_timer(vcpu)) kvm_inject_pending_timer_irqs(vcpu); if (dm_request_for_irq_injection(vcpu)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.request_irq_exits; } kvm_check_async_pf_completion(vcpu); if (signal_pending(current)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.signal_exits; } if (need_resched()) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_resched(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); } } srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); vapic_exit(vcpu); return r; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; sigset_t sigsaved; if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); r = -EAGAIN; goto out; } /* re-sync apic's tpr */ if (!irqchip_in_kernel(vcpu->kvm)) kvm_set_cr8(vcpu, kvm_run->cr8); if (vcpu->arch.pio.count || vcpu->mmio_needed) { if (vcpu->mmio_needed) { memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); vcpu->mmio_read_completed = 1; vcpu->mmio_needed = 0; } vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (r != EMULATE_DONE) { r = 0; goto out; } } if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) kvm_register_write(vcpu, VCPU_REGS_RAX, kvm_run->hypercall.ret); r = __vcpu_run(vcpu); out: post_kvm_run_save(vcpu); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return r; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX); regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX); regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI); regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI); regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP); #ifdef CONFIG_X86_64 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8); regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9); regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10); regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11); regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12); regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13); regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14); regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15); #endif regs->rip = kvm_rip_read(vcpu); regs->rflags = kvm_get_rflags(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax); kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx); kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx); kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi); kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi); kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp); kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp); #ifdef CONFIG_X86_64 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8); kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9); kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10); kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11); kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12); kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); #endif kvm_rip_write(vcpu, regs->rip); kvm_set_rflags(vcpu, regs->rflags); vcpu->arch.exception.pending = false; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) { struct kvm_segment cs; kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); *db = cs.db; *l = cs.l; } EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits); int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct desc_ptr dt; kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); kvm_x86_ops->get_idt(vcpu, &dt); sregs->idt.limit = dt.size; sregs->idt.base = dt.address; kvm_x86_ops->get_gdt(vcpu, &dt); sregs->gdt.limit = dt.size; sregs->gdt.base = dt.address; sregs->cr0 = kvm_read_cr0(vcpu); sregs->cr2 = vcpu->arch.cr2; sregs->cr3 = vcpu->arch.cr3; sregs->cr4 = kvm_read_cr4(vcpu); sregs->cr8 = kvm_get_cr8(vcpu); sregs->efer = vcpu->arch.efer; sregs->apic_base = kvm_get_apic_base(vcpu); memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft) set_bit(vcpu->arch.interrupt.nr, (unsigned long *)sregs->interrupt_bitmap); return 0; } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { mp_state->mp_state = vcpu->arch.mp_state; return 0; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { vcpu->arch.mp_state = mp_state->mp_state; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, bool has_error_code, u32 error_code) { struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; int ret; init_emulate_ctxt(vcpu); ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, tss_selector, reason, has_error_code, error_code); if (ret) return EMULATE_FAIL; memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); kvm_make_request(KVM_REQ_EVENT, vcpu); return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_task_switch); int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { int mmu_reset_needed = 0; int pending_vec, max_bits; struct desc_ptr dt; dt.size = sregs->idt.limit; dt.address = sregs->idt.base; kvm_x86_ops->set_idt(vcpu, &dt); dt.size = sregs->gdt.limit; dt.address = sregs->gdt.base; kvm_x86_ops->set_gdt(vcpu, &dt); vcpu->arch.cr2 = sregs->cr2; mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3; vcpu->arch.cr3 = sregs->cr3; kvm_set_cr8(vcpu, sregs->cr8); mmu_reset_needed |= vcpu->arch.efer != sregs->efer; kvm_x86_ops->set_efer(vcpu, sregs->efer); kvm_set_apic_base(vcpu, sregs->apic_base); mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; kvm_x86_ops->set_cr0(vcpu, sregs->cr0); vcpu->arch.cr0 = sregs->cr0; mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; kvm_x86_ops->set_cr4(vcpu, sregs->cr4); if (sregs->cr4 & X86_CR4_OSXSAVE) update_cpuid(vcpu); if (!is_long_mode(vcpu) && is_pae(vcpu)) { load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3); mmu_reset_needed = 1; } if (mmu_reset_needed) kvm_mmu_reset_context(vcpu); max_bits = (sizeof sregs->interrupt_bitmap) << 3; pending_vec = find_first_bit( (const unsigned long *)sregs->interrupt_bitmap, max_bits); if (pending_vec < max_bits) { kvm_queue_interrupt(vcpu, pending_vec, false); pr_debug("Set back pending irq %d\n", pending_vec); if (irqchip_in_kernel(vcpu->kvm)) kvm_pic_clear_isr_ack(vcpu->kvm); } kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); update_cr8_intercept(vcpu); /* Older userspace won't unhalt the vcpu on reset. */ if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && !is_protmode(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { unsigned long rflags; int i, r; if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { r = -EBUSY; if (vcpu->arch.exception.pending) goto out; if (dbg->control & KVM_GUESTDBG_INJECT_DB) kvm_queue_exception(vcpu, DB_VECTOR); else kvm_queue_exception(vcpu, BP_VECTOR); } /* * Read rflags as long as potentially injected trace flags are still * filtered out. */ rflags = kvm_get_rflags(vcpu); vcpu->guest_debug = dbg->control; if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) vcpu->guest_debug = 0; if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { for (i = 0; i < KVM_NR_DB_REGS; ++i) vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; vcpu->arch.switch_db_regs = (dbg->arch.debugreg[7] & DR7_BP_EN_MASK); } else { for (i = 0; i < KVM_NR_DB_REGS; i++) vcpu->arch.eff_db[i] = vcpu->arch.db[i]; vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK); } if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); /* * Trigger an rflags update that will inject or remove the trace * flags. */ kvm_set_rflags(vcpu, rflags); kvm_x86_ops->set_guest_debug(vcpu, dbg); r = 0; out: return r; } /* * Translate a guest virtual address to a guest physical address. */ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { unsigned long vaddr = tr->linear_address; gpa_t gpa; int idx; idx = srcu_read_lock(&vcpu->kvm->srcu); gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); srcu_read_unlock(&vcpu->kvm->srcu, idx); tr->physical_address = gpa; tr->valid = gpa != UNMAPPED_GVA; tr->writeable = 1; tr->usermode = 0; return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct i387_fxsave_struct *fxsave = &vcpu->arch.guest_fpu.state->fxsave; memcpy(fpu->fpr, fxsave->st_space, 128); fpu->fcw = fxsave->cwd; fpu->fsw = fxsave->swd; fpu->ftwx = fxsave->twd; fpu->last_opcode = fxsave->fop; fpu->last_ip = fxsave->rip; fpu->last_dp = fxsave->rdp; memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); return 0; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct i387_fxsave_struct *fxsave = &vcpu->arch.guest_fpu.state->fxsave; memcpy(fxsave->st_space, fpu->fpr, 128); fxsave->cwd = fpu->fcw; fxsave->swd = fpu->fsw; fxsave->twd = fpu->ftwx; fxsave->fop = fpu->last_opcode; fxsave->rip = fpu->last_ip; fxsave->rdp = fpu->last_dp; memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); return 0; } int fx_init(struct kvm_vcpu *vcpu) { int err; err = fpu_alloc(&vcpu->arch.guest_fpu); if (err) return err; fpu_finit(&vcpu->arch.guest_fpu); /* * Ensure guest xcr0 is valid for loading */ vcpu->arch.xcr0 = XSTATE_FP; vcpu->arch.cr0 |= X86_CR0_ET; return 0; } EXPORT_SYMBOL_GPL(fx_init); static void fx_free(struct kvm_vcpu *vcpu) { fpu_free(&vcpu->arch.guest_fpu); } void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) { if (vcpu->guest_fpu_loaded) return; /* * Restore all possible states in the guest, * and assume host would use all available bits. * Guest xcr0 would be loaded later. */ kvm_put_guest_xcr0(vcpu); vcpu->guest_fpu_loaded = 1; unlazy_fpu(current); fpu_restore_checking(&vcpu->arch.guest_fpu); trace_kvm_fpu(1); } void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) { kvm_put_guest_xcr0(vcpu); if (!vcpu->guest_fpu_loaded) return; vcpu->guest_fpu_loaded = 0; fpu_save_init(&vcpu->arch.guest_fpu); ++vcpu->stat.fpu_reload; kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); trace_kvm_fpu(0); } void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { if (vcpu->arch.time_page) { kvm_release_page_dirty(vcpu->arch.time_page); vcpu->arch.time_page = NULL; } free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) printk_once(KERN_WARNING "kvm: SMP vm created on host with unstable TSC; " "guest TSC will not be reliable\n"); return kvm_x86_ops->vcpu_create(kvm, id); } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { int r; vcpu->arch.mtrr_state.have_fixed = 1; vcpu_load(vcpu); r = kvm_arch_vcpu_reset(vcpu); if (r == 0) r = kvm_mmu_setup(vcpu); vcpu_put(vcpu); if (r < 0) goto free_vcpu; return 0; free_vcpu: kvm_x86_ops->vcpu_free(vcpu); return r; } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { vcpu->arch.apf.msr_val = 0; vcpu_load(vcpu); kvm_mmu_unload(vcpu); vcpu_put(vcpu); fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) { vcpu->arch.nmi_pending = false; vcpu->arch.nmi_injected = false; vcpu->arch.switch_db_regs = 0; memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = DR6_FIXED_1; vcpu->arch.dr7 = DR7_FIXED_1; kvm_make_request(KVM_REQ_EVENT, vcpu); vcpu->arch.apf.msr_val = 0; kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); vcpu->arch.apf.halted = false; return kvm_x86_ops->vcpu_reset(vcpu); } int kvm_arch_hardware_enable(void *garbage) { struct kvm *kvm; struct kvm_vcpu *vcpu; int i; kvm_shared_msr_cpu_online(); list_for_each_entry(kvm, &vm_list, vm_list) kvm_for_each_vcpu(i, vcpu, kvm) if (vcpu->cpu == smp_processor_id()) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); return kvm_x86_ops->hardware_enable(garbage); } void kvm_arch_hardware_disable(void *garbage) { kvm_x86_ops->hardware_disable(garbage); drop_user_return_notifiers(garbage); } int kvm_arch_hardware_setup(void) { return kvm_x86_ops->hardware_setup(); } void kvm_arch_hardware_unsetup(void) { kvm_x86_ops->hardware_unsetup(); } void kvm_arch_check_processor_compat(void *rtn) { kvm_x86_ops->check_processor_compatibility(rtn); } int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct page *page; struct kvm *kvm; int r; BUG_ON(vcpu->kvm == NULL); kvm = vcpu->kvm; vcpu->arch.emulate_ctxt.ops = &emulate_ops; vcpu->arch.walk_mmu = &vcpu->arch.mmu; vcpu->arch.mmu.root_hpa = INVALID_PAGE; vcpu->arch.mmu.translate_gpa = translate_gpa; vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; else vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) { r = -ENOMEM; goto fail; } vcpu->arch.pio_data = page_address(page); if (!kvm->arch.virtual_tsc_khz) kvm_arch_set_tsc_khz(kvm, max_tsc_khz); r = kvm_mmu_create(vcpu); if (r < 0) goto fail_free_pio_data; if (irqchip_in_kernel(kvm)) { r = kvm_create_lapic(vcpu); if (r < 0) goto fail_mmu_destroy; } vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, GFP_KERNEL); if (!vcpu->arch.mce_banks) { r = -ENOMEM; goto fail_free_lapic; } vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) goto fail_free_mce_banks; kvm_async_pf_hash_reset(vcpu); return 0; fail_free_mce_banks: kfree(vcpu->arch.mce_banks); fail_free_lapic: kvm_free_lapic(vcpu); fail_mmu_destroy: kvm_mmu_destroy(vcpu); fail_free_pio_data: free_page((unsigned long)vcpu->arch.pio_data); fail: return r; } void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { int idx; kfree(vcpu->arch.mce_banks); kvm_free_lapic(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_mmu_destroy(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, idx); free_page((unsigned long)vcpu->arch.pio_data); } int kvm_arch_init_vm(struct kvm *kvm) { INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); spin_lock_init(&kvm->arch.tsc_write_lock); return 0; } static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) { vcpu_load(vcpu); kvm_mmu_unload(vcpu); vcpu_put(vcpu); } static void kvm_free_vcpus(struct kvm *kvm) { unsigned int i; struct kvm_vcpu *vcpu; /* * Unpin any mmu pages first. */ kvm_for_each_vcpu(i, vcpu, kvm) { kvm_clear_async_pf_completion_queue(vcpu); kvm_unload_vcpu_mmu(vcpu); } kvm_for_each_vcpu(i, vcpu, kvm) kvm_arch_vcpu_free(vcpu); mutex_lock(&kvm->lock); for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) kvm->vcpus[i] = NULL; atomic_set(&kvm->online_vcpus, 0); mutex_unlock(&kvm->lock); } void kvm_arch_sync_events(struct kvm *kvm) { kvm_free_all_assigned_devices(kvm); kvm_free_pit(kvm); } void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_iommu_unmap_guest(kvm); kfree(kvm->arch.vpic); kfree(kvm->arch.vioapic); kvm_free_vcpus(kvm); if (kvm->arch.apic_access_page) put_page(kvm->arch.apic_access_page); if (kvm->arch.ept_identity_pagetable) put_page(kvm->arch.ept_identity_pagetable); } int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem, int user_alloc) { int npages = memslot->npages; int map_flags = MAP_PRIVATE | MAP_ANONYMOUS; /* Prevent internal slot pages from being moved by fork()/COW. */ if (memslot->id >= KVM_MEMORY_SLOTS) map_flags = MAP_SHARED | MAP_ANONYMOUS; /*To keep backward compatibility with older userspace, *x86 needs to hanlde !user_alloc case. */ if (!user_alloc) { if (npages && !old.rmap) { unsigned long userspace_addr; down_write(&current->mm->mmap_sem); userspace_addr = do_mmap(NULL, 0, npages * PAGE_SIZE, PROT_READ | PROT_WRITE, map_flags, 0); up_write(&current->mm->mmap_sem); if (IS_ERR((void *)userspace_addr)) return PTR_ERR((void *)userspace_addr); memslot->userspace_addr = userspace_addr; } } return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, int user_alloc) { int npages = mem->memory_size >> PAGE_SHIFT; if (!user_alloc && !old.user_alloc && old.rmap && !npages) { int ret; down_write(&current->mm->mmap_sem); ret = do_munmap(current->mm, old.userspace_addr, old.npages * PAGE_SIZE); up_write(&current->mm->mmap_sem); if (ret < 0) printk(KERN_WARNING "kvm_vm_ioctl_set_memory_region: " "failed to munmap memory\n"); } spin_lock(&kvm->mmu_lock); if (!kvm->arch.n_requested_mmu_pages) { unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); } kvm_mmu_slot_remove_write_access(kvm, mem->slot); spin_unlock(&kvm->mmu_lock); } void kvm_arch_flush_shadow(struct kvm *kvm) { kvm_mmu_zap_all(kvm); kvm_reload_remote_mmus(kvm); } int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) || !list_empty_careful(&vcpu->async_pf.done) || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED || vcpu->arch.nmi_pending || (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu)); } void kvm_vcpu_kick(struct kvm_vcpu *vcpu) { int me; int cpu = vcpu->cpu; if (waitqueue_active(&vcpu->wq)) { wake_up_interruptible(&vcpu->wq); ++vcpu->stat.halt_wakeup; } me = get_cpu(); if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) if (atomic_xchg(&vcpu->guest_mode, 0)) smp_send_reschedule(cpu); put_cpu(); } int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) { return kvm_x86_ops->interrupt_allowed(vcpu); } bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) { unsigned long current_rip = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); return current_rip == linear_rip; } EXPORT_SYMBOL_GPL(kvm_is_linear_rip); unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags; rflags = kvm_x86_ops->get_rflags(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) rflags &= ~X86_EFLAGS_TF; return rflags; } EXPORT_SYMBOL_GPL(kvm_get_rflags); void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) rflags |= X86_EFLAGS_TF; kvm_x86_ops->set_rflags(vcpu, rflags); kvm_make_request(KVM_REQ_EVENT, vcpu); } EXPORT_SYMBOL_GPL(kvm_set_rflags); void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { int r; if (!vcpu->arch.mmu.direct_map || !work->arch.direct_map || is_error_page(work->page)) return; r = kvm_mmu_reload(vcpu); if (unlikely(r)) return; vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); } static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) { return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); } static inline u32 kvm_async_pf_next_probe(u32 key) { return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1); } static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { u32 key = kvm_async_pf_hash_fn(gfn); while (vcpu->arch.apf.gfns[key] != ~0) key = kvm_async_pf_next_probe(key); vcpu->arch.apf.gfns[key] = gfn; } static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) { int i; u32 key = kvm_async_pf_hash_fn(gfn); for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) && (vcpu->arch.apf.gfns[key] != gfn && vcpu->arch.apf.gfns[key] != ~0); i++) key = kvm_async_pf_next_probe(key); return key; } bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; } static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { u32 i, j, k; i = j = kvm_async_pf_gfn_slot(vcpu, gfn); while (true) { vcpu->arch.apf.gfns[i] = ~0; do { j = kvm_async_pf_next_probe(j); if (vcpu->arch.apf.gfns[j] == ~0) return; k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); /* * k lies cyclically in ]i,j] * | i.k.j | * |....j i.k.| or |.k..j i...| */ } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; i = j; } } static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) { return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, sizeof(val)); } void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; trace_kvm_async_pf_not_present(work->arch.token, work->gva); kvm_add_async_pf_gfn(vcpu, work->arch.gfn); if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || (vcpu->arch.apf.send_user_only && kvm_x86_ops->get_cpl(vcpu) == 0)) kvm_make_request(KVM_REQ_APF_HALT, vcpu); else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; fault.error_code = 0; fault.nested_page_fault = false; fault.address = work->arch.token; kvm_inject_page_fault(vcpu, &fault); } } void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; trace_kvm_async_pf_ready(work->arch.token, work->gva); if (is_error_page(work->page)) work->arch.token = ~0; /* broadcast wakeup */ else kvm_del_async_pf_gfn(vcpu, work->arch.gfn); if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; fault.error_code = 0; fault.nested_page_fault = false; fault.address = work->arch.token; kvm_inject_page_fault(vcpu, &fault); } vcpu->arch.apf.halted = false; } bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) return true; else return !kvm_event_needs_reinjection(vcpu) && kvm_x86_ops->interrupt_allowed(vcpu); } EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
./CrossVul/dataset_final_sorted/CWE-362/c/good_4726_0
crossvul-cpp_data_good_829_2
/* * fs/userfaultfd.c * * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> * Copyright (C) 2008-2009 Red Hat, Inc. * Copyright (C) 2015 Red Hat, Inc. * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * * Some part derived from fs/eventfd.c (anon inode setup) and * mm/ksm.c (mm hashing). */ #include <linux/list.h> #include <linux/hashtable.h> #include <linux/sched/signal.h> #include <linux/sched/mm.h> #include <linux/mm.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/file.h> #include <linux/bug.h> #include <linux/anon_inodes.h> #include <linux/syscalls.h> #include <linux/userfaultfd_k.h> #include <linux/mempolicy.h> #include <linux/ioctl.h> #include <linux/security.h> #include <linux/hugetlb.h> static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly; enum userfaultfd_state { UFFD_STATE_WAIT_API, UFFD_STATE_RUNNING, }; /* * Start with fault_pending_wqh and fault_wqh so they're more likely * to be in the same cacheline. */ struct userfaultfd_ctx { /* waitqueue head for the pending (i.e. not read) userfaults */ wait_queue_head_t fault_pending_wqh; /* waitqueue head for the userfaults */ wait_queue_head_t fault_wqh; /* waitqueue head for the pseudo fd to wakeup poll/read */ wait_queue_head_t fd_wqh; /* waitqueue head for events */ wait_queue_head_t event_wqh; /* a refile sequence protected by fault_pending_wqh lock */ struct seqcount refile_seq; /* pseudo fd refcounting */ refcount_t refcount; /* userfaultfd syscall flags */ unsigned int flags; /* features requested from the userspace */ unsigned int features; /* state machine */ enum userfaultfd_state state; /* released */ bool released; /* memory mappings are changing because of non-cooperative event */ bool mmap_changing; /* mm with one ore more vmas attached to this userfaultfd_ctx */ struct mm_struct *mm; }; struct userfaultfd_fork_ctx { struct userfaultfd_ctx *orig; struct userfaultfd_ctx *new; struct list_head list; }; struct userfaultfd_unmap_ctx { struct userfaultfd_ctx *ctx; unsigned long start; unsigned long end; struct list_head list; }; struct userfaultfd_wait_queue { struct uffd_msg msg; wait_queue_entry_t wq; struct userfaultfd_ctx *ctx; bool waken; }; struct userfaultfd_wake_range { unsigned long start; unsigned long len; }; static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, int wake_flags, void *key) { struct userfaultfd_wake_range *range = key; int ret; struct userfaultfd_wait_queue *uwq; unsigned long start, len; uwq = container_of(wq, struct userfaultfd_wait_queue, wq); ret = 0; /* len == 0 means wake all */ start = range->start; len = range->len; if (len && (start > uwq->msg.arg.pagefault.address || start + len <= uwq->msg.arg.pagefault.address)) goto out; WRITE_ONCE(uwq->waken, true); /* * The Program-Order guarantees provided by the scheduler * ensure uwq->waken is visible before the task is woken. */ ret = wake_up_state(wq->private, mode); if (ret) { /* * Wake only once, autoremove behavior. * * After the effect of list_del_init is visible to the other * CPUs, the waitqueue may disappear from under us, see the * !list_empty_careful() in handle_userfault(). * * try_to_wake_up() has an implicit smp_mb(), and the * wq->private is read before calling the extern function * "wake_up_state" (which in turns calls try_to_wake_up). */ list_del_init(&wq->entry); } out: return ret; } /** * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd * context. * @ctx: [in] Pointer to the userfaultfd context. */ static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) { refcount_inc(&ctx->refcount); } /** * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd * context. * @ctx: [in] Pointer to userfaultfd context. * * The userfaultfd context reference must have been previously acquired either * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget(). */ static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx) { if (refcount_dec_and_test(&ctx->refcount)) { VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock)); VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh)); VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock)); VM_BUG_ON(waitqueue_active(&ctx->fault_wqh)); VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock)); VM_BUG_ON(waitqueue_active(&ctx->event_wqh)); VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock)); VM_BUG_ON(waitqueue_active(&ctx->fd_wqh)); mmdrop(ctx->mm); kmem_cache_free(userfaultfd_ctx_cachep, ctx); } } static inline void msg_init(struct uffd_msg *msg) { BUILD_BUG_ON(sizeof(struct uffd_msg) != 32); /* * Must use memset to zero out the paddings or kernel data is * leaked to userland. */ memset(msg, 0, sizeof(struct uffd_msg)); } static inline struct uffd_msg userfault_msg(unsigned long address, unsigned int flags, unsigned long reason, unsigned int features) { struct uffd_msg msg; msg_init(&msg); msg.event = UFFD_EVENT_PAGEFAULT; msg.arg.pagefault.address = address; if (flags & FAULT_FLAG_WRITE) /* * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WRITE * was not set in a UFFD_EVENT_PAGEFAULT, it means it * was a read fault, otherwise if set it means it's * a write fault. */ msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE; if (reason & VM_UFFD_WP) /* * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WP was * not set in a UFFD_EVENT_PAGEFAULT, it means it was * a missing fault, otherwise if set it means it's a * write protect fault. */ msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP; if (features & UFFD_FEATURE_THREAD_ID) msg.arg.pagefault.feat.ptid = task_pid_vnr(current); return msg; } #ifdef CONFIG_HUGETLB_PAGE /* * Same functionality as userfaultfd_must_wait below with modifications for * hugepmd ranges. */ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, struct vm_area_struct *vma, unsigned long address, unsigned long flags, unsigned long reason) { struct mm_struct *mm = ctx->mm; pte_t *ptep, pte; bool ret = true; VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); if (!ptep) goto out; ret = false; pte = huge_ptep_get(ptep); /* * Lockless access: we're in a wait_event so it's ok if it * changes under us. */ if (huge_pte_none(pte)) ret = true; if (!huge_pte_write(pte) && (reason & VM_UFFD_WP)) ret = true; out: return ret; } #else static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, struct vm_area_struct *vma, unsigned long address, unsigned long flags, unsigned long reason) { return false; /* should never get here */ } #endif /* CONFIG_HUGETLB_PAGE */ /* * Verify the pagetables are still not ok after having reigstered into * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any * userfault that has already been resolved, if userfaultfd_read and * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different * threads. */ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, unsigned long address, unsigned long flags, unsigned long reason) { struct mm_struct *mm = ctx->mm; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd, _pmd; pte_t *pte; bool ret = true; VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto out; p4d = p4d_offset(pgd, address); if (!p4d_present(*p4d)) goto out; pud = pud_offset(p4d, address); if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, address); /* * READ_ONCE must function as a barrier with narrower scope * and it must be equivalent to: * _pmd = *pmd; barrier(); * * This is to deal with the instability (as in * pmd_trans_unstable) of the pmd. */ _pmd = READ_ONCE(*pmd); if (pmd_none(_pmd)) goto out; ret = false; if (!pmd_present(_pmd)) goto out; if (pmd_trans_huge(_pmd)) goto out; /* * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it * and use the standard pte_offset_map() instead of parsing _pmd. */ pte = pte_offset_map(pmd, address); /* * Lockless access: we're in a wait_event so it's ok if it * changes under us. */ if (pte_none(*pte)) ret = true; pte_unmap(pte); out: return ret; } /* * The locking rules involved in returning VM_FAULT_RETRY depending on * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and * FAULT_FLAG_KILLABLE are not straightforward. The "Caution" * recommendation in __lock_page_or_retry is not an understatement. * * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_sem must be released * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is * not set. * * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not * set, VM_FAULT_RETRY can still be returned if and only if there are * fatal_signal_pending()s, and the mmap_sem must be released before * returning it. */ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) { struct mm_struct *mm = vmf->vma->vm_mm; struct userfaultfd_ctx *ctx; struct userfaultfd_wait_queue uwq; vm_fault_t ret = VM_FAULT_SIGBUS; bool must_wait, return_to_userland; long blocking_state; /* * We don't do userfault handling for the final child pid update. * * We also don't do userfault handling during * coredumping. hugetlbfs has the special * follow_hugetlb_page() to skip missing pages in the * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with * the no_page_table() helper in follow_page_mask(), but the * shmem_vm_ops->fault method is invoked even during * coredumping without mmap_sem and it ends up here. */ if (current->flags & (PF_EXITING|PF_DUMPCORE)) goto out; /* * Coredumping runs without mmap_sem so we can only check that * the mmap_sem is held, if PF_DUMPCORE was not set. */ WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem)); ctx = vmf->vma->vm_userfaultfd_ctx.ctx; if (!ctx) goto out; BUG_ON(ctx->mm != mm); VM_BUG_ON(reason & ~(VM_UFFD_MISSING|VM_UFFD_WP)); VM_BUG_ON(!(reason & VM_UFFD_MISSING) ^ !!(reason & VM_UFFD_WP)); if (ctx->features & UFFD_FEATURE_SIGBUS) goto out; /* * If it's already released don't get it. This avoids to loop * in __get_user_pages if userfaultfd_release waits on the * caller of handle_userfault to release the mmap_sem. */ if (unlikely(READ_ONCE(ctx->released))) { /* * Don't return VM_FAULT_SIGBUS in this case, so a non * cooperative manager can close the uffd after the * last UFFDIO_COPY, without risking to trigger an * involuntary SIGBUS if the process was starting the * userfaultfd while the userfaultfd was still armed * (but after the last UFFDIO_COPY). If the uffd * wasn't already closed when the userfault reached * this point, that would normally be solved by * userfaultfd_must_wait returning 'false'. * * If we were to return VM_FAULT_SIGBUS here, the non * cooperative manager would be instead forced to * always call UFFDIO_UNREGISTER before it can safely * close the uffd. */ ret = VM_FAULT_NOPAGE; goto out; } /* * Check that we can return VM_FAULT_RETRY. * * NOTE: it should become possible to return VM_FAULT_RETRY * even if FAULT_FLAG_TRIED is set without leading to gup() * -EBUSY failures, if the userfaultfd is to be extended for * VM_UFFD_WP tracking and we intend to arm the userfault * without first stopping userland access to the memory. For * VM_UFFD_MISSING userfaults this is enough for now. */ if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) { /* * Validate the invariant that nowait must allow retry * to be sure not to return SIGBUS erroneously on * nowait invocations. */ BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT); #ifdef CONFIG_DEBUG_VM if (printk_ratelimit()) { printk(KERN_WARNING "FAULT_FLAG_ALLOW_RETRY missing %x\n", vmf->flags); dump_stack(); } #endif goto out; } /* * Handle nowait, not much to do other than tell it to retry * and wait. */ ret = VM_FAULT_RETRY; if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) goto out; /* take the reference before dropping the mmap_sem */ userfaultfd_ctx_get(ctx); init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); uwq.wq.private = current; uwq.msg = userfault_msg(vmf->address, vmf->flags, reason, ctx->features); uwq.ctx = ctx; uwq.waken = false; return_to_userland = (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); blocking_state = return_to_userland ? TASK_INTERRUPTIBLE : TASK_KILLABLE; spin_lock(&ctx->fault_pending_wqh.lock); /* * After the __add_wait_queue the uwq is visible to userland * through poll/read(). */ __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq); /* * The smp_mb() after __set_current_state prevents the reads * following the spin_unlock to happen before the list_add in * __add_wait_queue. */ set_current_state(blocking_state); spin_unlock(&ctx->fault_pending_wqh.lock); if (!is_vm_hugetlb_page(vmf->vma)) must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, reason); else must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma, vmf->address, vmf->flags, reason); up_read(&mm->mmap_sem); if (likely(must_wait && !READ_ONCE(ctx->released) && (return_to_userland ? !signal_pending(current) : !fatal_signal_pending(current)))) { wake_up_poll(&ctx->fd_wqh, EPOLLIN); schedule(); ret |= VM_FAULT_MAJOR; /* * False wakeups can orginate even from rwsem before * up_read() however userfaults will wait either for a * targeted wakeup on the specific uwq waitqueue from * wake_userfault() or for signals or for uffd * release. */ while (!READ_ONCE(uwq.waken)) { /* * This needs the full smp_store_mb() * guarantee as the state write must be * visible to other CPUs before reading * uwq.waken from other CPUs. */ set_current_state(blocking_state); if (READ_ONCE(uwq.waken) || READ_ONCE(ctx->released) || (return_to_userland ? signal_pending(current) : fatal_signal_pending(current))) break; schedule(); } } __set_current_state(TASK_RUNNING); if (return_to_userland) { if (signal_pending(current) && !fatal_signal_pending(current)) { /* * If we got a SIGSTOP or SIGCONT and this is * a normal userland page fault, just let * userland return so the signal will be * handled and gdb debugging works. The page * fault code immediately after we return from * this function is going to release the * mmap_sem and it's not depending on it * (unlike gup would if we were not to return * VM_FAULT_RETRY). * * If a fatal signal is pending we still take * the streamlined VM_FAULT_RETRY failure path * and there's no need to retake the mmap_sem * in such case. */ down_read(&mm->mmap_sem); ret = VM_FAULT_NOPAGE; } } /* * Here we race with the list_del; list_add in * userfaultfd_ctx_read(), however because we don't ever run * list_del_init() to refile across the two lists, the prev * and next pointers will never point to self. list_add also * would never let any of the two pointers to point to * self. So list_empty_careful won't risk to see both pointers * pointing to self at any time during the list refile. The * only case where list_del_init() is called is the full * removal in the wake function and there we don't re-list_add * and it's fine not to block on the spinlock. The uwq on this * kernel stack can be released after the list_del_init. */ if (!list_empty_careful(&uwq.wq.entry)) { spin_lock(&ctx->fault_pending_wqh.lock); /* * No need of list_del_init(), the uwq on the stack * will be freed shortly anyway. */ list_del(&uwq.wq.entry); spin_unlock(&ctx->fault_pending_wqh.lock); } /* * ctx may go away after this if the userfault pseudo fd is * already released. */ userfaultfd_ctx_put(ctx); out: return ret; } static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, struct userfaultfd_wait_queue *ewq) { struct userfaultfd_ctx *release_new_ctx; if (WARN_ON_ONCE(current->flags & PF_EXITING)) goto out; ewq->ctx = ctx; init_waitqueue_entry(&ewq->wq, current); release_new_ctx = NULL; spin_lock(&ctx->event_wqh.lock); /* * After the __add_wait_queue the uwq is visible to userland * through poll/read(). */ __add_wait_queue(&ctx->event_wqh, &ewq->wq); for (;;) { set_current_state(TASK_KILLABLE); if (ewq->msg.event == 0) break; if (READ_ONCE(ctx->released) || fatal_signal_pending(current)) { /* * &ewq->wq may be queued in fork_event, but * __remove_wait_queue ignores the head * parameter. It would be a problem if it * didn't. */ __remove_wait_queue(&ctx->event_wqh, &ewq->wq); if (ewq->msg.event == UFFD_EVENT_FORK) { struct userfaultfd_ctx *new; new = (struct userfaultfd_ctx *) (unsigned long) ewq->msg.arg.reserved.reserved1; release_new_ctx = new; } break; } spin_unlock(&ctx->event_wqh.lock); wake_up_poll(&ctx->fd_wqh, EPOLLIN); schedule(); spin_lock(&ctx->event_wqh.lock); } __set_current_state(TASK_RUNNING); spin_unlock(&ctx->event_wqh.lock); if (release_new_ctx) { struct vm_area_struct *vma; struct mm_struct *mm = release_new_ctx->mm; /* the various vma->vm_userfaultfd_ctx still points to it */ down_write(&mm->mmap_sem); /* no task can run (and in turn coredump) yet */ VM_WARN_ON(!mmget_still_valid(mm)); for (vma = mm->mmap; vma; vma = vma->vm_next) if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); } up_write(&mm->mmap_sem); userfaultfd_ctx_put(release_new_ctx); } /* * ctx may go away after this if the userfault pseudo fd is * already released. */ out: WRITE_ONCE(ctx->mmap_changing, false); userfaultfd_ctx_put(ctx); } static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx, struct userfaultfd_wait_queue *ewq) { ewq->msg.event = 0; wake_up_locked(&ctx->event_wqh); __remove_wait_queue(&ctx->event_wqh, &ewq->wq); } int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) { struct userfaultfd_ctx *ctx = NULL, *octx; struct userfaultfd_fork_ctx *fctx; octx = vma->vm_userfaultfd_ctx.ctx; if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); return 0; } list_for_each_entry(fctx, fcs, list) if (fctx->orig == octx) { ctx = fctx->new; break; } if (!ctx) { fctx = kmalloc(sizeof(*fctx), GFP_KERNEL); if (!fctx) return -ENOMEM; ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); if (!ctx) { kfree(fctx); return -ENOMEM; } refcount_set(&ctx->refcount, 1); ctx->flags = octx->flags; ctx->state = UFFD_STATE_RUNNING; ctx->features = octx->features; ctx->released = false; ctx->mmap_changing = false; ctx->mm = vma->vm_mm; mmgrab(ctx->mm); userfaultfd_ctx_get(octx); WRITE_ONCE(octx->mmap_changing, true); fctx->orig = octx; fctx->new = ctx; list_add_tail(&fctx->list, fcs); } vma->vm_userfaultfd_ctx.ctx = ctx; return 0; } static void dup_fctx(struct userfaultfd_fork_ctx *fctx) { struct userfaultfd_ctx *ctx = fctx->orig; struct userfaultfd_wait_queue ewq; msg_init(&ewq.msg); ewq.msg.event = UFFD_EVENT_FORK; ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new; userfaultfd_event_wait_completion(ctx, &ewq); } void dup_userfaultfd_complete(struct list_head *fcs) { struct userfaultfd_fork_ctx *fctx, *n; list_for_each_entry_safe(fctx, n, fcs, list) { dup_fctx(fctx); list_del(&fctx->list); kfree(fctx); } } void mremap_userfaultfd_prep(struct vm_area_struct *vma, struct vm_userfaultfd_ctx *vm_ctx) { struct userfaultfd_ctx *ctx; ctx = vma->vm_userfaultfd_ctx.ctx; if (!ctx) return; if (ctx->features & UFFD_FEATURE_EVENT_REMAP) { vm_ctx->ctx = ctx; userfaultfd_ctx_get(ctx); WRITE_ONCE(ctx->mmap_changing, true); } else { /* Drop uffd context if remap feature not enabled */ vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); } } void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx, unsigned long from, unsigned long to, unsigned long len) { struct userfaultfd_ctx *ctx = vm_ctx->ctx; struct userfaultfd_wait_queue ewq; if (!ctx) return; if (to & ~PAGE_MASK) { userfaultfd_ctx_put(ctx); return; } msg_init(&ewq.msg); ewq.msg.event = UFFD_EVENT_REMAP; ewq.msg.arg.remap.from = from; ewq.msg.arg.remap.to = to; ewq.msg.arg.remap.len = len; userfaultfd_event_wait_completion(ctx, &ewq); } bool userfaultfd_remove(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; struct userfaultfd_ctx *ctx; struct userfaultfd_wait_queue ewq; ctx = vma->vm_userfaultfd_ctx.ctx; if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE)) return true; userfaultfd_ctx_get(ctx); WRITE_ONCE(ctx->mmap_changing, true); up_read(&mm->mmap_sem); msg_init(&ewq.msg); ewq.msg.event = UFFD_EVENT_REMOVE; ewq.msg.arg.remove.start = start; ewq.msg.arg.remove.end = end; userfaultfd_event_wait_completion(ctx, &ewq); return false; } static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, unsigned long start, unsigned long end) { struct userfaultfd_unmap_ctx *unmap_ctx; list_for_each_entry(unmap_ctx, unmaps, list) if (unmap_ctx->ctx == ctx && unmap_ctx->start == start && unmap_ctx->end == end) return true; return false; } int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *unmaps) { for ( ; vma && vma->vm_start < end; vma = vma->vm_next) { struct userfaultfd_unmap_ctx *unmap_ctx; struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) || has_unmap_ctx(ctx, unmaps, start, end)) continue; unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL); if (!unmap_ctx) return -ENOMEM; userfaultfd_ctx_get(ctx); WRITE_ONCE(ctx->mmap_changing, true); unmap_ctx->ctx = ctx; unmap_ctx->start = start; unmap_ctx->end = end; list_add_tail(&unmap_ctx->list, unmaps); } return 0; } void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf) { struct userfaultfd_unmap_ctx *ctx, *n; struct userfaultfd_wait_queue ewq; list_for_each_entry_safe(ctx, n, uf, list) { msg_init(&ewq.msg); ewq.msg.event = UFFD_EVENT_UNMAP; ewq.msg.arg.remove.start = ctx->start; ewq.msg.arg.remove.end = ctx->end; userfaultfd_event_wait_completion(ctx->ctx, &ewq); list_del(&ctx->list); kfree(ctx); } } static int userfaultfd_release(struct inode *inode, struct file *file) { struct userfaultfd_ctx *ctx = file->private_data; struct mm_struct *mm = ctx->mm; struct vm_area_struct *vma, *prev; /* len == 0 means wake all */ struct userfaultfd_wake_range range = { .len = 0, }; unsigned long new_flags; WRITE_ONCE(ctx->released, true); if (!mmget_not_zero(mm)) goto wakeup; /* * Flush page faults out of all CPUs. NOTE: all page faults * must be retried without returning VM_FAULT_SIGBUS if * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx * changes while handle_userfault released the mmap_sem. So * it's critical that released is set to true (above), before * taking the mmap_sem for writing. */ down_write(&mm->mmap_sem); if (!mmget_still_valid(mm)) goto skip_mm; prev = NULL; for (vma = mm->mmap; vma; vma = vma->vm_next) { cond_resched(); BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP))); if (vma->vm_userfaultfd_ctx.ctx != ctx) { prev = vma; continue; } new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, new_flags, vma->anon_vma, vma->vm_file, vma->vm_pgoff, vma_policy(vma), NULL_VM_UFFD_CTX); if (prev) vma = prev; else prev = vma; vma->vm_flags = new_flags; vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; } skip_mm: up_write(&mm->mmap_sem); mmput(mm); wakeup: /* * After no new page faults can wait on this fault_*wqh, flush * the last page faults that may have been already waiting on * the fault_*wqh. */ spin_lock(&ctx->fault_pending_wqh.lock); __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range); spin_unlock(&ctx->fault_pending_wqh.lock); /* Flush pending events that may still wait on event_wqh */ wake_up_all(&ctx->event_wqh); wake_up_poll(&ctx->fd_wqh, EPOLLHUP); userfaultfd_ctx_put(ctx); return 0; } /* fault_pending_wqh.lock must be hold by the caller */ static inline struct userfaultfd_wait_queue *find_userfault_in( wait_queue_head_t *wqh) { wait_queue_entry_t *wq; struct userfaultfd_wait_queue *uwq; lockdep_assert_held(&wqh->lock); uwq = NULL; if (!waitqueue_active(wqh)) goto out; /* walk in reverse to provide FIFO behavior to read userfaults */ wq = list_last_entry(&wqh->head, typeof(*wq), entry); uwq = container_of(wq, struct userfaultfd_wait_queue, wq); out: return uwq; } static inline struct userfaultfd_wait_queue *find_userfault( struct userfaultfd_ctx *ctx) { return find_userfault_in(&ctx->fault_pending_wqh); } static inline struct userfaultfd_wait_queue *find_userfault_evt( struct userfaultfd_ctx *ctx) { return find_userfault_in(&ctx->event_wqh); } static __poll_t userfaultfd_poll(struct file *file, poll_table *wait) { struct userfaultfd_ctx *ctx = file->private_data; __poll_t ret; poll_wait(file, &ctx->fd_wqh, wait); switch (ctx->state) { case UFFD_STATE_WAIT_API: return EPOLLERR; case UFFD_STATE_RUNNING: /* * poll() never guarantees that read won't block. * userfaults can be waken before they're read(). */ if (unlikely(!(file->f_flags & O_NONBLOCK))) return EPOLLERR; /* * lockless access to see if there are pending faults * __pollwait last action is the add_wait_queue but * the spin_unlock would allow the waitqueue_active to * pass above the actual list_add inside * add_wait_queue critical section. So use a full * memory barrier to serialize the list_add write of * add_wait_queue() with the waitqueue_active read * below. */ ret = 0; smp_mb(); if (waitqueue_active(&ctx->fault_pending_wqh)) ret = EPOLLIN; else if (waitqueue_active(&ctx->event_wqh)) ret = EPOLLIN; return ret; default: WARN_ON_ONCE(1); return EPOLLERR; } } static const struct file_operations userfaultfd_fops; static int resolve_userfault_fork(struct userfaultfd_ctx *ctx, struct userfaultfd_ctx *new, struct uffd_msg *msg) { int fd; fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, new, O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS)); if (fd < 0) return fd; msg->arg.reserved.reserved1 = 0; msg->arg.fork.ufd = fd; return 0; } static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, struct uffd_msg *msg) { ssize_t ret; DECLARE_WAITQUEUE(wait, current); struct userfaultfd_wait_queue *uwq; /* * Handling fork event requires sleeping operations, so * we drop the event_wqh lock, then do these ops, then * lock it back and wake up the waiter. While the lock is * dropped the ewq may go away so we keep track of it * carefully. */ LIST_HEAD(fork_event); struct userfaultfd_ctx *fork_nctx = NULL; /* always take the fd_wqh lock before the fault_pending_wqh lock */ spin_lock_irq(&ctx->fd_wqh.lock); __add_wait_queue(&ctx->fd_wqh, &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); spin_lock(&ctx->fault_pending_wqh.lock); uwq = find_userfault(ctx); if (uwq) { /* * Use a seqcount to repeat the lockless check * in wake_userfault() to avoid missing * wakeups because during the refile both * waitqueue could become empty if this is the * only userfault. */ write_seqcount_begin(&ctx->refile_seq); /* * The fault_pending_wqh.lock prevents the uwq * to disappear from under us. * * Refile this userfault from * fault_pending_wqh to fault_wqh, it's not * pending anymore after we read it. * * Use list_del() by hand (as * userfaultfd_wake_function also uses * list_del_init() by hand) to be sure nobody * changes __remove_wait_queue() to use * list_del_init() in turn breaking the * !list_empty_careful() check in * handle_userfault(). The uwq->wq.head list * must never be empty at any time during the * refile, or the waitqueue could disappear * from under us. The "wait_queue_head_t" * parameter of __remove_wait_queue() is unused * anyway. */ list_del(&uwq->wq.entry); add_wait_queue(&ctx->fault_wqh, &uwq->wq); write_seqcount_end(&ctx->refile_seq); /* careful to always initialize msg if ret == 0 */ *msg = uwq->msg; spin_unlock(&ctx->fault_pending_wqh.lock); ret = 0; break; } spin_unlock(&ctx->fault_pending_wqh.lock); spin_lock(&ctx->event_wqh.lock); uwq = find_userfault_evt(ctx); if (uwq) { *msg = uwq->msg; if (uwq->msg.event == UFFD_EVENT_FORK) { fork_nctx = (struct userfaultfd_ctx *) (unsigned long) uwq->msg.arg.reserved.reserved1; list_move(&uwq->wq.entry, &fork_event); /* * fork_nctx can be freed as soon as * we drop the lock, unless we take a * reference on it. */ userfaultfd_ctx_get(fork_nctx); spin_unlock(&ctx->event_wqh.lock); ret = 0; break; } userfaultfd_event_complete(ctx, uwq); spin_unlock(&ctx->event_wqh.lock); ret = 0; break; } spin_unlock(&ctx->event_wqh.lock); if (signal_pending(current)) { ret = -ERESTARTSYS; break; } if (no_wait) { ret = -EAGAIN; break; } spin_unlock_irq(&ctx->fd_wqh.lock); schedule(); spin_lock_irq(&ctx->fd_wqh.lock); } __remove_wait_queue(&ctx->fd_wqh, &wait); __set_current_state(TASK_RUNNING); spin_unlock_irq(&ctx->fd_wqh.lock); if (!ret && msg->event == UFFD_EVENT_FORK) { ret = resolve_userfault_fork(ctx, fork_nctx, msg); spin_lock(&ctx->event_wqh.lock); if (!list_empty(&fork_event)) { /* * The fork thread didn't abort, so we can * drop the temporary refcount. */ userfaultfd_ctx_put(fork_nctx); uwq = list_first_entry(&fork_event, typeof(*uwq), wq.entry); /* * If fork_event list wasn't empty and in turn * the event wasn't already released by fork * (the event is allocated on fork kernel * stack), put the event back to its place in * the event_wq. fork_event head will be freed * as soon as we return so the event cannot * stay queued there no matter the current * "ret" value. */ list_del(&uwq->wq.entry); __add_wait_queue(&ctx->event_wqh, &uwq->wq); /* * Leave the event in the waitqueue and report * error to userland if we failed to resolve * the userfault fork. */ if (likely(!ret)) userfaultfd_event_complete(ctx, uwq); } else { /* * Here the fork thread aborted and the * refcount from the fork thread on fork_nctx * has already been released. We still hold * the reference we took before releasing the * lock above. If resolve_userfault_fork * failed we've to drop it because the * fork_nctx has to be freed in such case. If * it succeeded we'll hold it because the new * uffd references it. */ if (ret) userfaultfd_ctx_put(fork_nctx); } spin_unlock(&ctx->event_wqh.lock); } return ret; } static ssize_t userfaultfd_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct userfaultfd_ctx *ctx = file->private_data; ssize_t _ret, ret = 0; struct uffd_msg msg; int no_wait = file->f_flags & O_NONBLOCK; if (ctx->state == UFFD_STATE_WAIT_API) return -EINVAL; for (;;) { if (count < sizeof(msg)) return ret ? ret : -EINVAL; _ret = userfaultfd_ctx_read(ctx, no_wait, &msg); if (_ret < 0) return ret ? ret : _ret; if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg))) return ret ? ret : -EFAULT; ret += sizeof(msg); buf += sizeof(msg); count -= sizeof(msg); /* * Allow to read more than one fault at time but only * block if waiting for the very first one. */ no_wait = O_NONBLOCK; } } static void __wake_userfault(struct userfaultfd_ctx *ctx, struct userfaultfd_wake_range *range) { spin_lock(&ctx->fault_pending_wqh.lock); /* wake all in the range and autoremove */ if (waitqueue_active(&ctx->fault_pending_wqh)) __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, range); if (waitqueue_active(&ctx->fault_wqh)) __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range); spin_unlock(&ctx->fault_pending_wqh.lock); } static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, struct userfaultfd_wake_range *range) { unsigned seq; bool need_wakeup; /* * To be sure waitqueue_active() is not reordered by the CPU * before the pagetable update, use an explicit SMP memory * barrier here. PT lock release or up_read(mmap_sem) still * have release semantics that can allow the * waitqueue_active() to be reordered before the pte update. */ smp_mb(); /* * Use waitqueue_active because it's very frequent to * change the address space atomically even if there are no * userfaults yet. So we take the spinlock only when we're * sure we've userfaults to wake. */ do { seq = read_seqcount_begin(&ctx->refile_seq); need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) || waitqueue_active(&ctx->fault_wqh); cond_resched(); } while (read_seqcount_retry(&ctx->refile_seq, seq)); if (need_wakeup) __wake_userfault(ctx, range); } static __always_inline int validate_range(struct mm_struct *mm, __u64 start, __u64 len) { __u64 task_size = mm->task_size; if (start & ~PAGE_MASK) return -EINVAL; if (len & ~PAGE_MASK) return -EINVAL; if (!len) return -EINVAL; if (start < mmap_min_addr) return -EINVAL; if (start >= task_size) return -EINVAL; if (len > task_size - start) return -EINVAL; return 0; } static inline bool vma_can_userfault(struct vm_area_struct *vma) { return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || vma_is_shmem(vma); } static int userfaultfd_register(struct userfaultfd_ctx *ctx, unsigned long arg) { struct mm_struct *mm = ctx->mm; struct vm_area_struct *vma, *prev, *cur; int ret; struct uffdio_register uffdio_register; struct uffdio_register __user *user_uffdio_register; unsigned long vm_flags, new_flags; bool found; bool basic_ioctls; unsigned long start, end, vma_end; user_uffdio_register = (struct uffdio_register __user *) arg; ret = -EFAULT; if (copy_from_user(&uffdio_register, user_uffdio_register, sizeof(uffdio_register)-sizeof(__u64))) goto out; ret = -EINVAL; if (!uffdio_register.mode) goto out; if (uffdio_register.mode & ~(UFFDIO_REGISTER_MODE_MISSING| UFFDIO_REGISTER_MODE_WP)) goto out; vm_flags = 0; if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING) vm_flags |= VM_UFFD_MISSING; if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) { vm_flags |= VM_UFFD_WP; /* * FIXME: remove the below error constraint by * implementing the wprotect tracking mode. */ ret = -EINVAL; goto out; } ret = validate_range(mm, uffdio_register.range.start, uffdio_register.range.len); if (ret) goto out; start = uffdio_register.range.start; end = start + uffdio_register.range.len; ret = -ENOMEM; if (!mmget_not_zero(mm)) goto out; down_write(&mm->mmap_sem); if (!mmget_still_valid(mm)) goto out_unlock; vma = find_vma_prev(mm, start, &prev); if (!vma) goto out_unlock; /* check that there's at least one vma in the range */ ret = -EINVAL; if (vma->vm_start >= end) goto out_unlock; /* * If the first vma contains huge pages, make sure start address * is aligned to huge page size. */ if (is_vm_hugetlb_page(vma)) { unsigned long vma_hpagesize = vma_kernel_pagesize(vma); if (start & (vma_hpagesize - 1)) goto out_unlock; } /* * Search for not compatible vmas. */ found = false; basic_ioctls = false; for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { cond_resched(); BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP))); /* check not compatible vmas */ ret = -EINVAL; if (!vma_can_userfault(cur)) goto out_unlock; /* * UFFDIO_COPY will fill file holes even without * PROT_WRITE. This check enforces that if this is a * MAP_SHARED, the process has write permission to the backing * file. If VM_MAYWRITE is set it also enforces that on a * MAP_SHARED vma: there is no F_WRITE_SEAL and no further * F_WRITE_SEAL can be taken until the vma is destroyed. */ ret = -EPERM; if (unlikely(!(cur->vm_flags & VM_MAYWRITE))) goto out_unlock; /* * If this vma contains ending address, and huge pages * check alignment. */ if (is_vm_hugetlb_page(cur) && end <= cur->vm_end && end > cur->vm_start) { unsigned long vma_hpagesize = vma_kernel_pagesize(cur); ret = -EINVAL; if (end & (vma_hpagesize - 1)) goto out_unlock; } /* * Check that this vma isn't already owned by a * different userfaultfd. We can't allow more than one * userfaultfd to own a single vma simultaneously or we * wouldn't know which one to deliver the userfaults to. */ ret = -EBUSY; if (cur->vm_userfaultfd_ctx.ctx && cur->vm_userfaultfd_ctx.ctx != ctx) goto out_unlock; /* * Note vmas containing huge pages */ if (is_vm_hugetlb_page(cur)) basic_ioctls = true; found = true; } BUG_ON(!found); if (vma->vm_start < start) prev = vma; ret = 0; do { cond_resched(); BUG_ON(!vma_can_userfault(vma)); BUG_ON(vma->vm_userfaultfd_ctx.ctx && vma->vm_userfaultfd_ctx.ctx != ctx); WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); /* * Nothing to do: this vma is already registered into this * userfaultfd and with the right tracking mode too. */ if (vma->vm_userfaultfd_ctx.ctx == ctx && (vma->vm_flags & vm_flags) == vm_flags) goto skip; if (vma->vm_start > start) start = vma->vm_start; vma_end = min(end, vma->vm_end); new_flags = (vma->vm_flags & ~vm_flags) | vm_flags; prev = vma_merge(mm, prev, start, vma_end, new_flags, vma->anon_vma, vma->vm_file, vma->vm_pgoff, vma_policy(vma), ((struct vm_userfaultfd_ctx){ ctx })); if (prev) { vma = prev; goto next; } if (vma->vm_start < start) { ret = split_vma(mm, vma, start, 1); if (ret) break; } if (vma->vm_end > end) { ret = split_vma(mm, vma, end, 0); if (ret) break; } next: /* * In the vma_merge() successful mprotect-like case 8: * the next vma was merged into the current one and * the current one has not been updated yet. */ vma->vm_flags = new_flags; vma->vm_userfaultfd_ctx.ctx = ctx; skip: prev = vma; start = vma->vm_end; vma = vma->vm_next; } while (vma && vma->vm_start < end); out_unlock: up_write(&mm->mmap_sem); mmput(mm); if (!ret) { /* * Now that we scanned all vmas we can already tell * userland which ioctls methods are guaranteed to * succeed on this range. */ if (put_user(basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC : UFFD_API_RANGE_IOCTLS, &user_uffdio_register->ioctls)) ret = -EFAULT; } out: return ret; } static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, unsigned long arg) { struct mm_struct *mm = ctx->mm; struct vm_area_struct *vma, *prev, *cur; int ret; struct uffdio_range uffdio_unregister; unsigned long new_flags; bool found; unsigned long start, end, vma_end; const void __user *buf = (void __user *)arg; ret = -EFAULT; if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister))) goto out; ret = validate_range(mm, uffdio_unregister.start, uffdio_unregister.len); if (ret) goto out; start = uffdio_unregister.start; end = start + uffdio_unregister.len; ret = -ENOMEM; if (!mmget_not_zero(mm)) goto out; down_write(&mm->mmap_sem); if (!mmget_still_valid(mm)) goto out_unlock; vma = find_vma_prev(mm, start, &prev); if (!vma) goto out_unlock; /* check that there's at least one vma in the range */ ret = -EINVAL; if (vma->vm_start >= end) goto out_unlock; /* * If the first vma contains huge pages, make sure start address * is aligned to huge page size. */ if (is_vm_hugetlb_page(vma)) { unsigned long vma_hpagesize = vma_kernel_pagesize(vma); if (start & (vma_hpagesize - 1)) goto out_unlock; } /* * Search for not compatible vmas. */ found = false; ret = -EINVAL; for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { cond_resched(); BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP))); /* * Check not compatible vmas, not strictly required * here as not compatible vmas cannot have an * userfaultfd_ctx registered on them, but this * provides for more strict behavior to notice * unregistration errors. */ if (!vma_can_userfault(cur)) goto out_unlock; found = true; } BUG_ON(!found); if (vma->vm_start < start) prev = vma; ret = 0; do { cond_resched(); BUG_ON(!vma_can_userfault(vma)); /* * Nothing to do: this vma is already registered into this * userfaultfd and with the right tracking mode too. */ if (!vma->vm_userfaultfd_ctx.ctx) goto skip; WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); if (vma->vm_start > start) start = vma->vm_start; vma_end = min(end, vma->vm_end); if (userfaultfd_missing(vma)) { /* * Wake any concurrent pending userfault while * we unregister, so they will not hang * permanently and it avoids userland to call * UFFDIO_WAKE explicitly. */ struct userfaultfd_wake_range range; range.start = start; range.len = vma_end - start; wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range); } new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); prev = vma_merge(mm, prev, start, vma_end, new_flags, vma->anon_vma, vma->vm_file, vma->vm_pgoff, vma_policy(vma), NULL_VM_UFFD_CTX); if (prev) { vma = prev; goto next; } if (vma->vm_start < start) { ret = split_vma(mm, vma, start, 1); if (ret) break; } if (vma->vm_end > end) { ret = split_vma(mm, vma, end, 0); if (ret) break; } next: /* * In the vma_merge() successful mprotect-like case 8: * the next vma was merged into the current one and * the current one has not been updated yet. */ vma->vm_flags = new_flags; vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; skip: prev = vma; start = vma->vm_end; vma = vma->vm_next; } while (vma && vma->vm_start < end); out_unlock: up_write(&mm->mmap_sem); mmput(mm); out: return ret; } /* * userfaultfd_wake may be used in combination with the * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches. */ static int userfaultfd_wake(struct userfaultfd_ctx *ctx, unsigned long arg) { int ret; struct uffdio_range uffdio_wake; struct userfaultfd_wake_range range; const void __user *buf = (void __user *)arg; ret = -EFAULT; if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake))) goto out; ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len); if (ret) goto out; range.start = uffdio_wake.start; range.len = uffdio_wake.len; /* * len == 0 means wake all and we don't want to wake all here, * so check it again to be sure. */ VM_BUG_ON(!range.len); wake_userfault(ctx, &range); ret = 0; out: return ret; } static int userfaultfd_copy(struct userfaultfd_ctx *ctx, unsigned long arg) { __s64 ret; struct uffdio_copy uffdio_copy; struct uffdio_copy __user *user_uffdio_copy; struct userfaultfd_wake_range range; user_uffdio_copy = (struct uffdio_copy __user *) arg; ret = -EAGAIN; if (READ_ONCE(ctx->mmap_changing)) goto out; ret = -EFAULT; if (copy_from_user(&uffdio_copy, user_uffdio_copy, /* don't copy "copy" last field */ sizeof(uffdio_copy)-sizeof(__s64))) goto out; ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len); if (ret) goto out; /* * double check for wraparound just in case. copy_from_user() * will later check uffdio_copy.src + uffdio_copy.len to fit * in the userland range. */ ret = -EINVAL; if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src) goto out; if (uffdio_copy.mode & ~UFFDIO_COPY_MODE_DONTWAKE) goto out; if (mmget_not_zero(ctx->mm)) { ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src, uffdio_copy.len, &ctx->mmap_changing); mmput(ctx->mm); } else { return -ESRCH; } if (unlikely(put_user(ret, &user_uffdio_copy->copy))) return -EFAULT; if (ret < 0) goto out; BUG_ON(!ret); /* len == 0 would wake all */ range.len = ret; if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) { range.start = uffdio_copy.dst; wake_userfault(ctx, &range); } ret = range.len == uffdio_copy.len ? 0 : -EAGAIN; out: return ret; } static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, unsigned long arg) { __s64 ret; struct uffdio_zeropage uffdio_zeropage; struct uffdio_zeropage __user *user_uffdio_zeropage; struct userfaultfd_wake_range range; user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg; ret = -EAGAIN; if (READ_ONCE(ctx->mmap_changing)) goto out; ret = -EFAULT; if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage, /* don't copy "zeropage" last field */ sizeof(uffdio_zeropage)-sizeof(__s64))) goto out; ret = validate_range(ctx->mm, uffdio_zeropage.range.start, uffdio_zeropage.range.len); if (ret) goto out; ret = -EINVAL; if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE) goto out; if (mmget_not_zero(ctx->mm)) { ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start, uffdio_zeropage.range.len, &ctx->mmap_changing); mmput(ctx->mm); } else { return -ESRCH; } if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) return -EFAULT; if (ret < 0) goto out; /* len == 0 would wake all */ BUG_ON(!ret); range.len = ret; if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) { range.start = uffdio_zeropage.range.start; wake_userfault(ctx, &range); } ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN; out: return ret; } static inline unsigned int uffd_ctx_features(__u64 user_features) { /* * For the current set of features the bits just coincide */ return (unsigned int)user_features; } /* * userland asks for a certain API version and we return which bits * and ioctl commands are implemented in this kernel for such API * version or -EINVAL if unknown. */ static int userfaultfd_api(struct userfaultfd_ctx *ctx, unsigned long arg) { struct uffdio_api uffdio_api; void __user *buf = (void __user *)arg; int ret; __u64 features; ret = -EINVAL; if (ctx->state != UFFD_STATE_WAIT_API) goto out; ret = -EFAULT; if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api))) goto out; features = uffdio_api.features; if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) { memset(&uffdio_api, 0, sizeof(uffdio_api)); if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) goto out; ret = -EINVAL; goto out; } /* report all available features and ioctls to userland */ uffdio_api.features = UFFD_API_FEATURES; uffdio_api.ioctls = UFFD_API_IOCTLS; ret = -EFAULT; if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) goto out; ctx->state = UFFD_STATE_RUNNING; /* only enable the requested features for this uffd context */ ctx->features = uffd_ctx_features(features); ret = 0; out: return ret; } static long userfaultfd_ioctl(struct file *file, unsigned cmd, unsigned long arg) { int ret = -EINVAL; struct userfaultfd_ctx *ctx = file->private_data; if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API) return -EINVAL; switch(cmd) { case UFFDIO_API: ret = userfaultfd_api(ctx, arg); break; case UFFDIO_REGISTER: ret = userfaultfd_register(ctx, arg); break; case UFFDIO_UNREGISTER: ret = userfaultfd_unregister(ctx, arg); break; case UFFDIO_WAKE: ret = userfaultfd_wake(ctx, arg); break; case UFFDIO_COPY: ret = userfaultfd_copy(ctx, arg); break; case UFFDIO_ZEROPAGE: ret = userfaultfd_zeropage(ctx, arg); break; } return ret; } #ifdef CONFIG_PROC_FS static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) { struct userfaultfd_ctx *ctx = f->private_data; wait_queue_entry_t *wq; unsigned long pending = 0, total = 0; spin_lock(&ctx->fault_pending_wqh.lock); list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) { pending++; total++; } list_for_each_entry(wq, &ctx->fault_wqh.head, entry) { total++; } spin_unlock(&ctx->fault_pending_wqh.lock); /* * If more protocols will be added, there will be all shown * separated by a space. Like this: * protocols: aa:... bb:... */ seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n", pending, total, UFFD_API, ctx->features, UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS); } #endif static const struct file_operations userfaultfd_fops = { #ifdef CONFIG_PROC_FS .show_fdinfo = userfaultfd_show_fdinfo, #endif .release = userfaultfd_release, .poll = userfaultfd_poll, .read = userfaultfd_read, .unlocked_ioctl = userfaultfd_ioctl, .compat_ioctl = userfaultfd_ioctl, .llseek = noop_llseek, }; static void init_once_userfaultfd_ctx(void *mem) { struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem; init_waitqueue_head(&ctx->fault_pending_wqh); init_waitqueue_head(&ctx->fault_wqh); init_waitqueue_head(&ctx->event_wqh); init_waitqueue_head(&ctx->fd_wqh); seqcount_init(&ctx->refile_seq); } SYSCALL_DEFINE1(userfaultfd, int, flags) { struct userfaultfd_ctx *ctx; int fd; BUG_ON(!current->mm); /* Check the UFFD_* constants for consistency. */ BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK); if (flags & ~UFFD_SHARED_FCNTL_FLAGS) return -EINVAL; ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); if (!ctx) return -ENOMEM; refcount_set(&ctx->refcount, 1); ctx->flags = flags; ctx->features = 0; ctx->state = UFFD_STATE_WAIT_API; ctx->released = false; ctx->mmap_changing = false; ctx->mm = current->mm; /* prevent the mm struct to be freed */ mmgrab(ctx->mm); fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, ctx, O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS)); if (fd < 0) { mmdrop(ctx->mm); kmem_cache_free(userfaultfd_ctx_cachep, ctx); } return fd; } static int __init userfaultfd_init(void) { userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache", sizeof(struct userfaultfd_ctx), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, init_once_userfaultfd_ctx); return 0; } __initcall(userfaultfd_init);
./CrossVul/dataset_final_sorted/CWE-362/c/good_829_2
crossvul-cpp_data_good_3971_0
/* * OpenVPN -- An application to securely tunnel IP networks * over a single TCP/UDP port, with support for SSL/TLS-based * session authentication and key exchange, * packet encryption, packet authentication, and * packet compression. * * Copyright (C) 2002-2018 OpenVPN Inc <sales@openvpn.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifdef HAVE_CONFIG_H #include "config.h" #elif defined(_MSC_VER) #include "config-msvc.h" #endif #ifdef HAVE_SYS_INOTIFY_H #include <sys/inotify.h> #define INOTIFY_EVENT_BUFFER_SIZE 16384 #endif #include "syshead.h" #if P2MP_SERVER #include "forward.h" #include "multi.h" #include "push.h" #include "run_command.h" #include "otime.h" #include "pf.h" #include "gremlin.h" #include "mstats.h" #include "ssl_verify.h" #include "ssl_ncp.h" #include "vlan.h" #include <inttypes.h> #include "memdbg.h" #include "crypto_backend.h" /*#define MULTI_DEBUG_EVENT_LOOP*/ #ifdef MULTI_DEBUG_EVENT_LOOP static const char * id(struct multi_instance *mi) { if (mi) { return tls_common_name(mi->context.c2.tls_multi, false); } else { return "NULL"; } } #endif #ifdef MANAGEMENT_DEF_AUTH static void set_cc_config(struct multi_instance *mi, struct buffer_list *cc_config) { if (mi->cc_config) { buffer_list_free(mi->cc_config); } mi->cc_config = cc_config; } #endif static inline void update_mstat_n_clients(const int n_clients) { #ifdef ENABLE_MEMSTATS if (mmap_stats) { mmap_stats->n_clients = n_clients; } #endif } static bool learn_address_script(const struct multi_context *m, const struct multi_instance *mi, const char *op, const struct mroute_addr *addr) { struct gc_arena gc = gc_new(); struct env_set *es; bool ret = true; struct plugin_list *plugins; /* get environmental variable source */ if (mi && mi->context.c2.es) { es = mi->context.c2.es; } else { es = env_set_create(&gc); } /* get plugin source */ if (mi) { plugins = mi->context.plugins; } else { plugins = m->top.plugins; } if (plugin_defined(plugins, OPENVPN_PLUGIN_LEARN_ADDRESS)) { struct argv argv = argv_new(); argv_printf(&argv, "%s %s", op, mroute_addr_print(addr, &gc)); if (mi) { argv_printf_cat(&argv, "%s", tls_common_name(mi->context.c2.tls_multi, false)); } if (plugin_call(plugins, OPENVPN_PLUGIN_LEARN_ADDRESS, &argv, NULL, es) != OPENVPN_PLUGIN_FUNC_SUCCESS) { msg(M_WARN, "WARNING: learn-address plugin call failed"); ret = false; } argv_free(&argv); } if (m->top.options.learn_address_script) { struct argv argv = argv_new(); setenv_str(es, "script_type", "learn-address"); argv_parse_cmd(&argv, m->top.options.learn_address_script); argv_printf_cat(&argv, "%s %s", op, mroute_addr_print(addr, &gc)); if (mi) { argv_printf_cat(&argv, "%s", tls_common_name(mi->context.c2.tls_multi, false)); } if (!openvpn_run_script(&argv, es, 0, "--learn-address")) { ret = false; } argv_free(&argv); } gc_free(&gc); return ret; } void multi_ifconfig_pool_persist(struct multi_context *m, bool force) { /* write pool data to file */ if (m->ifconfig_pool && m->top.c1.ifconfig_pool_persist && (force || ifconfig_pool_write_trigger(m->top.c1.ifconfig_pool_persist))) { ifconfig_pool_write(m->top.c1.ifconfig_pool_persist, m->ifconfig_pool); } } static void multi_reap_range(const struct multi_context *m, int start_bucket, int end_bucket) { struct gc_arena gc = gc_new(); struct hash_iterator hi; struct hash_element *he; if (start_bucket < 0) { start_bucket = 0; end_bucket = hash_n_buckets(m->vhash); } dmsg(D_MULTI_DEBUG, "MULTI: REAP range %d -> %d", start_bucket, end_bucket); hash_iterator_init_range(m->vhash, &hi, start_bucket, end_bucket); while ((he = hash_iterator_next(&hi)) != NULL) { struct multi_route *r = (struct multi_route *) he->value; if (!multi_route_defined(m, r)) { dmsg(D_MULTI_DEBUG, "MULTI: REAP DEL %s", mroute_addr_print(&r->addr, &gc)); learn_address_script(m, NULL, "delete", &r->addr); multi_route_del(r); hash_iterator_delete_element(&hi); } } hash_iterator_free(&hi); gc_free(&gc); } static void multi_reap_all(const struct multi_context *m) { multi_reap_range(m, -1, 0); } static struct multi_reap * multi_reap_new(int buckets_per_pass) { struct multi_reap *mr; ALLOC_OBJ(mr, struct multi_reap); mr->bucket_base = 0; mr->buckets_per_pass = buckets_per_pass; mr->last_call = now; return mr; } void multi_reap_process_dowork(const struct multi_context *m) { struct multi_reap *mr = m->reaper; if (mr->bucket_base >= hash_n_buckets(m->vhash)) { mr->bucket_base = 0; } multi_reap_range(m, mr->bucket_base, mr->bucket_base + mr->buckets_per_pass); mr->bucket_base += mr->buckets_per_pass; mr->last_call = now; } static void multi_reap_free(struct multi_reap *mr) { free(mr); } /* * How many buckets in vhash to reap per pass. */ static int reap_buckets_per_pass(int n_buckets) { return constrain_int(n_buckets / REAP_DIVISOR, REAP_MIN, REAP_MAX); } #ifdef MANAGEMENT_DEF_AUTH static uint32_t cid_hash_function(const void *key, uint32_t iv) { const unsigned long *k = (const unsigned long *)key; return (uint32_t) *k; } static bool cid_compare_function(const void *key1, const void *key2) { const unsigned long *k1 = (const unsigned long *)key1; const unsigned long *k2 = (const unsigned long *)key2; return *k1 == *k2; } #endif #ifdef ENABLE_ASYNC_PUSH static uint32_t /* * inotify watcher descriptors are used as hash value */ int_hash_function(const void *key, uint32_t iv) { return (unsigned long)key; } static bool int_compare_function(const void *key1, const void *key2) { return (unsigned long)key1 == (unsigned long)key2; } #endif /* * Main initialization function, init multi_context object. */ void multi_init(struct multi_context *m, struct context *t, bool tcp_mode, int thread_mode) { int dev = DEV_TYPE_UNDEF; msg(D_MULTI_LOW, "MULTI: multi_init called, r=%d v=%d", t->options.real_hash_size, t->options.virtual_hash_size); /* * Get tun/tap/null device type */ dev = dev_type_enum(t->options.dev, t->options.dev_type); /* * Init our multi_context object. */ CLEAR(*m); m->thread_mode = thread_mode; /* * Real address hash table (source port number is * considered to be part of the address). Used * to determine which client sent an incoming packet * which is seen on the TCP/UDP socket. */ m->hash = hash_init(t->options.real_hash_size, get_random(), mroute_addr_hash_function, mroute_addr_compare_function); /* * Virtual address hash table. Used to determine * which client to route a packet to. */ m->vhash = hash_init(t->options.virtual_hash_size, get_random(), mroute_addr_hash_function, mroute_addr_compare_function); /* * This hash table is a clone of m->hash but with a * bucket size of one so that it can be used * for fast iteration through the list. */ m->iter = hash_init(1, get_random(), mroute_addr_hash_function, mroute_addr_compare_function); #ifdef MANAGEMENT_DEF_AUTH m->cid_hash = hash_init(t->options.real_hash_size, 0, cid_hash_function, cid_compare_function); #endif #ifdef ENABLE_ASYNC_PUSH /* * Mapping between inotify watch descriptors and * multi_instances. */ m->inotify_watchers = hash_init(t->options.real_hash_size, get_random(), int_hash_function, int_compare_function); #endif /* * This is our scheduler, for time-based wakeup * events. */ m->schedule = schedule_init(); /* * Limit frequency of incoming connections to control * DoS. */ m->new_connection_limiter = frequency_limit_init(t->options.cf_max, t->options.cf_per); /* * Allocate broadcast/multicast buffer list */ m->mbuf = mbuf_init(t->options.n_bcast_buf); /* * Different status file format options are available */ m->status_file_version = t->options.status_file_version; /* * Possibly allocate an ifconfig pool, do it * differently based on whether a tun or tap style * tunnel. */ if (t->options.ifconfig_pool_defined) { int pool_type = IFCONFIG_POOL_INDIV; if (dev == DEV_TYPE_TUN && t->options.topology == TOP_NET30) { pool_type = IFCONFIG_POOL_30NET; } m->ifconfig_pool = ifconfig_pool_init(pool_type, t->options.ifconfig_pool_start, t->options.ifconfig_pool_end, t->options.duplicate_cn, t->options.ifconfig_ipv6_pool_defined, t->options.ifconfig_ipv6_pool_base, t->options.ifconfig_ipv6_pool_netbits ); /* reload pool data from file */ if (t->c1.ifconfig_pool_persist) { ifconfig_pool_read(t->c1.ifconfig_pool_persist, m->ifconfig_pool); } } /* * Help us keep track of routing table. */ m->route_helper = mroute_helper_init(MULTI_CACHE_ROUTE_TTL); /* * Initialize route and instance reaper. */ m->reaper = multi_reap_new(reap_buckets_per_pass(t->options.virtual_hash_size)); /* * Get local ifconfig address */ CLEAR(m->local); ASSERT(t->c1.tuntap); mroute_extract_in_addr_t(&m->local, t->c1.tuntap->local); /* * Per-client limits */ m->max_clients = t->options.max_clients; m->instances = calloc(m->max_clients, sizeof(struct multi_instance *)); /* * Initialize multi-socket TCP I/O wait object */ if (tcp_mode) { m->mtcp = multi_tcp_init(t->options.max_clients, &m->max_clients); } m->tcp_queue_limit = t->options.tcp_queue_limit; /* * Allow client <-> client communication, without going through * tun/tap interface and network stack? */ m->enable_c2c = t->options.enable_c2c; /* initialize stale routes check timer */ if (t->options.stale_routes_check_interval > 0) { msg(M_INFO, "Initializing stale route check timer to run every %i seconds and to removing routes with activity timeout older than %i seconds", t->options.stale_routes_check_interval, t->options.stale_routes_ageing_time); event_timeout_init(&m->stale_routes_check_et, t->options.stale_routes_check_interval, 0); } m->deferred_shutdown_signal.signal_received = 0; } const char * multi_instance_string(const struct multi_instance *mi, bool null, struct gc_arena *gc) { if (mi) { struct buffer out = alloc_buf_gc(MULTI_PREFIX_MAX_LENGTH, gc); const char *cn = tls_common_name(mi->context.c2.tls_multi, true); if (cn) { buf_printf(&out, "%s/", cn); } buf_printf(&out, "%s", mroute_addr_print(&mi->real, gc)); return BSTR(&out); } else if (null) { return NULL; } else { return "UNDEF"; } } static void generate_prefix(struct multi_instance *mi) { struct gc_arena gc = gc_new(); const char *prefix = multi_instance_string(mi, true, &gc); if (prefix) { strncpynt(mi->msg_prefix, prefix, sizeof(mi->msg_prefix)); } else { mi->msg_prefix[0] = '\0'; } set_prefix(mi); gc_free(&gc); } void ungenerate_prefix(struct multi_instance *mi) { mi->msg_prefix[0] = '\0'; set_prefix(mi); } static const char * mi_prefix(const struct multi_instance *mi) { if (mi && mi->msg_prefix[0]) { return mi->msg_prefix; } else { return "UNDEF_I"; } } /* * Tell the route helper about deleted iroutes so * that it can update its mask of currently used * CIDR netlengths. */ static void multi_del_iroutes(struct multi_context *m, struct multi_instance *mi) { const struct iroute *ir; const struct iroute_ipv6 *ir6; if (TUNNEL_TYPE(mi->context.c1.tuntap) == DEV_TYPE_TUN) { for (ir = mi->context.options.iroutes; ir != NULL; ir = ir->next) { mroute_helper_del_iroute46(m->route_helper, ir->netbits); } for (ir6 = mi->context.options.iroutes_ipv6; ir6 != NULL; ir6 = ir6->next) { mroute_helper_del_iroute46(m->route_helper, ir6->netbits); } } } static void setenv_stats(struct context *c) { setenv_counter(c->c2.es, "bytes_received", c->c2.link_read_bytes); setenv_counter(c->c2.es, "bytes_sent", c->c2.link_write_bytes); } static void multi_client_disconnect_setenv(struct multi_instance *mi) { /* setenv client real IP address */ setenv_trusted(mi->context.c2.es, get_link_socket_info(&mi->context)); /* setenv stats */ setenv_stats(&mi->context); /* setenv connection duration */ setenv_long_long(mi->context.c2.es, "time_duration", now - mi->created); } static void multi_client_disconnect_script(struct multi_instance *mi) { if ((mi->context.c2.context_auth == CAS_SUCCEEDED && mi->connection_established_flag) || mi->context.c2.context_auth == CAS_PARTIAL) { multi_client_disconnect_setenv(mi); if (plugin_defined(mi->context.plugins, OPENVPN_PLUGIN_CLIENT_DISCONNECT)) { if (plugin_call(mi->context.plugins, OPENVPN_PLUGIN_CLIENT_DISCONNECT, NULL, NULL, mi->context.c2.es) != OPENVPN_PLUGIN_FUNC_SUCCESS) { msg(M_WARN, "WARNING: client-disconnect plugin call failed"); } } if (mi->context.options.client_disconnect_script) { struct argv argv = argv_new(); setenv_str(mi->context.c2.es, "script_type", "client-disconnect"); argv_parse_cmd(&argv, mi->context.options.client_disconnect_script); openvpn_run_script(&argv, mi->context.c2.es, 0, "--client-disconnect"); argv_free(&argv); } #ifdef MANAGEMENT_DEF_AUTH if (management) { management_notify_client_close(management, &mi->context.c2.mda_context, mi->context.c2.es); } #endif } } void multi_close_instance(struct multi_context *m, struct multi_instance *mi, bool shutdown) { perf_push(PERF_MULTI_CLOSE_INSTANCE); ASSERT(!mi->halt); mi->halt = true; dmsg(D_MULTI_DEBUG, "MULTI: multi_close_instance called"); /* adjust current client connection count */ m->n_clients += mi->n_clients_delta; update_mstat_n_clients(m->n_clients); mi->n_clients_delta = 0; /* prevent dangling pointers */ if (m->pending == mi) { multi_set_pending(m, NULL); } if (m->earliest_wakeup == mi) { m->earliest_wakeup = NULL; } if (!shutdown) { if (mi->did_real_hash) { ASSERT(hash_remove(m->hash, &mi->real)); } if (mi->did_iter) { ASSERT(hash_remove(m->iter, &mi->real)); } #ifdef MANAGEMENT_DEF_AUTH if (mi->did_cid_hash) { ASSERT(hash_remove(m->cid_hash, &mi->context.c2.mda_context.cid)); } #endif #ifdef ENABLE_ASYNC_PUSH if (mi->inotify_watch != -1) { hash_remove(m->inotify_watchers, (void *) (unsigned long)mi->inotify_watch); mi->inotify_watch = -1; } #endif if (mi->context.c2.tls_multi->peer_id != MAX_PEER_ID) { m->instances[mi->context.c2.tls_multi->peer_id] = NULL; } schedule_remove_entry(m->schedule, (struct schedule_entry *) mi); ifconfig_pool_release(m->ifconfig_pool, mi->vaddr_handle, false); if (mi->did_iroutes) { multi_del_iroutes(m, mi); mi->did_iroutes = false; } if (m->mtcp) { multi_tcp_dereference_instance(m->mtcp, mi); } mbuf_dereference_instance(m->mbuf, mi); } #ifdef MANAGEMENT_DEF_AUTH set_cc_config(mi, NULL); #endif multi_client_disconnect_script(mi); if (mi->did_open_context) { close_context(&mi->context, SIGTERM, CC_GC_FREE); } multi_tcp_instance_specific_free(mi); ungenerate_prefix(mi); /* * Don't actually delete the instance memory allocation yet, * because virtual routes may still point to it. Let the * vhash reaper deal with it. */ multi_instance_dec_refcount(mi); perf_pop(); } /* * Called on shutdown or restart. */ void multi_uninit(struct multi_context *m) { if (m->thread_mode & MC_WORK_THREAD) { multi_top_free(m); m->thread_mode = MC_UNDEF; } else if (m->thread_mode) { if (m->hash) { struct hash_iterator hi; struct hash_element *he; hash_iterator_init(m->iter, &hi); while ((he = hash_iterator_next(&hi))) { struct multi_instance *mi = (struct multi_instance *) he->value; mi->did_iter = false; multi_close_instance(m, mi, true); } hash_iterator_free(&hi); multi_reap_all(m); hash_free(m->hash); hash_free(m->vhash); hash_free(m->iter); #ifdef MANAGEMENT_DEF_AUTH hash_free(m->cid_hash); #endif m->hash = NULL; free(m->instances); #ifdef ENABLE_ASYNC_PUSH hash_free(m->inotify_watchers); m->inotify_watchers = NULL; #endif schedule_free(m->schedule); mbuf_free(m->mbuf); ifconfig_pool_free(m->ifconfig_pool); frequency_limit_free(m->new_connection_limiter); multi_reap_free(m->reaper); mroute_helper_free(m->route_helper); multi_tcp_free(m->mtcp); m->thread_mode = MC_UNDEF; } } } /* * Create a client instance object for a newly connected client. */ struct multi_instance * multi_create_instance(struct multi_context *m, const struct mroute_addr *real) { struct gc_arena gc = gc_new(); struct multi_instance *mi; perf_push(PERF_MULTI_CREATE_INSTANCE); msg(D_MULTI_MEDIUM, "MULTI: multi_create_instance called"); ALLOC_OBJ_CLEAR(mi, struct multi_instance); mi->gc = gc_new(); multi_instance_inc_refcount(mi); mi->vaddr_handle = -1; mi->created = now; mroute_addr_init(&mi->real); if (real) { mi->real = *real; generate_prefix(mi); } mi->did_open_context = true; inherit_context_child(&mi->context, &m->top); if (IS_SIG(&mi->context)) { goto err; } mi->context.c2.context_auth = CAS_PENDING; if (hash_n_elements(m->hash) >= m->max_clients) { msg(D_MULTI_ERRORS, "MULTI: new incoming connection would exceed maximum number of clients (%d)", m->max_clients); goto err; } if (!real) /* TCP mode? */ { if (!multi_tcp_instance_specific_init(m, mi)) { goto err; } generate_prefix(mi); } if (!hash_add(m->iter, &mi->real, mi, false)) { msg(D_MULTI_LOW, "MULTI: unable to add real address [%s] to iterator hash table", mroute_addr_print(&mi->real, &gc)); goto err; } mi->did_iter = true; #ifdef MANAGEMENT_DEF_AUTH do { mi->context.c2.mda_context.cid = m->cid_counter++; } while (!hash_add(m->cid_hash, &mi->context.c2.mda_context.cid, mi, false)); mi->did_cid_hash = true; #endif mi->context.c2.push_reply_deferred = true; #ifdef ENABLE_ASYNC_PUSH mi->context.c2.push_request_received = false; mi->inotify_watch = -1; #endif if (!multi_process_post(m, mi, MPP_PRE_SELECT)) { msg(D_MULTI_ERRORS, "MULTI: signal occurred during client instance initialization"); goto err; } perf_pop(); gc_free(&gc); return mi; err: multi_close_instance(m, mi, false); perf_pop(); gc_free(&gc); return NULL; } /* * Dump tables -- triggered by SIGUSR2. * If status file is defined, write to file. * If status file is NULL, write to syslog. */ void multi_print_status(struct multi_context *m, struct status_output *so, const int version) { if (m->hash) { struct gc_arena gc_top = gc_new(); struct hash_iterator hi; const struct hash_element *he; status_reset(so); if (version == 1) /* WAS: m->status_file_version */ { /* * Status file version 1 */ status_printf(so, "OpenVPN CLIENT LIST"); status_printf(so, "Updated,%s", time_string(0, 0, false, &gc_top)); status_printf(so, "Common Name,Real Address,Bytes Received,Bytes Sent,Connected Since"); hash_iterator_init(m->hash, &hi); while ((he = hash_iterator_next(&hi))) { struct gc_arena gc = gc_new(); const struct multi_instance *mi = (struct multi_instance *) he->value; if (!mi->halt) { status_printf(so, "%s,%s," counter_format "," counter_format ",%s", tls_common_name(mi->context.c2.tls_multi, false), mroute_addr_print(&mi->real, &gc), mi->context.c2.link_read_bytes, mi->context.c2.link_write_bytes, time_string(mi->created, 0, false, &gc)); } gc_free(&gc); } hash_iterator_free(&hi); status_printf(so, "ROUTING TABLE"); status_printf(so, "Virtual Address,Common Name,Real Address,Last Ref"); hash_iterator_init(m->vhash, &hi); while ((he = hash_iterator_next(&hi))) { struct gc_arena gc = gc_new(); const struct multi_route *route = (struct multi_route *) he->value; if (multi_route_defined(m, route)) { const struct multi_instance *mi = route->instance; const struct mroute_addr *ma = &route->addr; char flags[2] = {0, 0}; if (route->flags & MULTI_ROUTE_CACHE) { flags[0] = 'C'; } status_printf(so, "%s%s,%s,%s,%s", mroute_addr_print(ma, &gc), flags, tls_common_name(mi->context.c2.tls_multi, false), mroute_addr_print(&mi->real, &gc), time_string(route->last_reference, 0, false, &gc)); } gc_free(&gc); } hash_iterator_free(&hi); status_printf(so, "GLOBAL STATS"); if (m->mbuf) { status_printf(so, "Max bcast/mcast queue length,%d", mbuf_maximum_queued(m->mbuf)); } status_printf(so, "END"); } else if (version == 2 || version == 3) { const char sep = (version == 3) ? '\t' : ','; /* * Status file version 2 and 3 */ status_printf(so, "TITLE%c%s", sep, title_string); status_printf(so, "TIME%c%s%c%u", sep, time_string(now, 0, false, &gc_top), sep, (unsigned int)now); status_printf(so, "HEADER%cCLIENT_LIST%cCommon Name%cReal Address%cVirtual Address%cVirtual IPv6 Address%cBytes Received%cBytes Sent%cConnected Since%cConnected Since (time_t)%cUsername%cClient ID%cPeer ID%cData Channel Cipher", sep, sep, sep, sep, sep, sep, sep, sep, sep, sep, sep, sep, sep); hash_iterator_init(m->hash, &hi); while ((he = hash_iterator_next(&hi))) { struct gc_arena gc = gc_new(); const struct multi_instance *mi = (struct multi_instance *) he->value; if (!mi->halt) { status_printf(so, "CLIENT_LIST%c%s%c%s%c%s%c%s%c" counter_format "%c" counter_format "%c%s%c%u%c%s%c" #ifdef MANAGEMENT_DEF_AUTH "%lu" #else "" #endif "%c%" PRIu32 "%c%s", sep, tls_common_name(mi->context.c2.tls_multi, false), sep, mroute_addr_print(&mi->real, &gc), sep, print_in_addr_t(mi->reporting_addr, IA_EMPTY_IF_UNDEF, &gc), sep, print_in6_addr(mi->reporting_addr_ipv6, IA_EMPTY_IF_UNDEF, &gc), sep, mi->context.c2.link_read_bytes, sep, mi->context.c2.link_write_bytes, sep, time_string(mi->created, 0, false, &gc), sep, (unsigned int)mi->created, sep, tls_username(mi->context.c2.tls_multi, false), #ifdef MANAGEMENT_DEF_AUTH sep, mi->context.c2.mda_context.cid, #else sep, #endif sep, mi->context.c2.tls_multi ? mi->context.c2.tls_multi->peer_id : UINT32_MAX, sep, translate_cipher_name_to_openvpn(mi->context.options.ciphername)); } gc_free(&gc); } hash_iterator_free(&hi); status_printf(so, "HEADER%cROUTING_TABLE%cVirtual Address%cCommon Name%cReal Address%cLast Ref%cLast Ref (time_t)", sep, sep, sep, sep, sep, sep); hash_iterator_init(m->vhash, &hi); while ((he = hash_iterator_next(&hi))) { struct gc_arena gc = gc_new(); const struct multi_route *route = (struct multi_route *) he->value; if (multi_route_defined(m, route)) { const struct multi_instance *mi = route->instance; const struct mroute_addr *ma = &route->addr; char flags[2] = {0, 0}; if (route->flags & MULTI_ROUTE_CACHE) { flags[0] = 'C'; } status_printf(so, "ROUTING_TABLE%c%s%s%c%s%c%s%c%s%c%u", sep, mroute_addr_print(ma, &gc), flags, sep, tls_common_name(mi->context.c2.tls_multi, false), sep, mroute_addr_print(&mi->real, &gc), sep, time_string(route->last_reference, 0, false, &gc), sep, (unsigned int)route->last_reference); } gc_free(&gc); } hash_iterator_free(&hi); if (m->mbuf) { status_printf(so, "GLOBAL_STATS%cMax bcast/mcast queue length%c%d", sep, sep, mbuf_maximum_queued(m->mbuf)); } status_printf(so, "END"); } else { status_printf(so, "ERROR: bad status format version number"); } #ifdef PACKET_TRUNCATION_CHECK { status_printf(so, "HEADER,ERRORS,Common Name,TUN Read Trunc,TUN Write Trunc,Pre-encrypt Trunc,Post-decrypt Trunc"); hash_iterator_init(m->hash, &hi); while ((he = hash_iterator_next(&hi))) { struct gc_arena gc = gc_new(); const struct multi_instance *mi = (struct multi_instance *) he->value; if (!mi->halt) { status_printf(so, "ERRORS,%s," counter_format "," counter_format "," counter_format "," counter_format, tls_common_name(mi->context.c2.tls_multi, false), m->top.c2.n_trunc_tun_read, mi->context.c2.n_trunc_tun_write, mi->context.c2.n_trunc_pre_encrypt, mi->context.c2.n_trunc_post_decrypt); } gc_free(&gc); } hash_iterator_free(&hi); } #endif /* ifdef PACKET_TRUNCATION_CHECK */ status_flush(so); gc_free(&gc_top); } #ifdef ENABLE_ASYNC_PUSH if (m->inotify_watchers) { msg(D_MULTI_DEBUG, "inotify watchers count: %d\n", hash_n_elements(m->inotify_watchers)); } #endif } /* * Learn a virtual address or route. * The learn will fail if the learn address * script/plugin fails. In this case the * return value may be != mi. * Return the instance which owns this route, * or NULL if none. */ static struct multi_instance * multi_learn_addr(struct multi_context *m, struct multi_instance *mi, const struct mroute_addr *addr, const unsigned int flags) { struct hash_element *he; const uint32_t hv = hash_value(m->vhash, addr); struct hash_bucket *bucket = hash_bucket(m->vhash, hv); struct multi_route *oldroute = NULL; struct multi_instance *owner = NULL; struct gc_arena gc = gc_new(); /* if route currently exists, get the instance which owns it */ he = hash_lookup_fast(m->vhash, bucket, addr, hv); if (he) { oldroute = (struct multi_route *) he->value; } if (oldroute && multi_route_defined(m, oldroute)) { owner = oldroute->instance; } /* do we need to add address to hash table? */ if ((!owner || owner != mi) && mroute_learnable_address(addr, &gc) && !mroute_addr_equal(addr, &m->local)) { struct multi_route *newroute; bool learn_succeeded = false; ALLOC_OBJ(newroute, struct multi_route); newroute->addr = *addr; newroute->instance = mi; newroute->flags = flags; newroute->last_reference = now; newroute->cache_generation = 0; /* The cache is invalidated when cache_generation is incremented */ if (flags & MULTI_ROUTE_CACHE) { newroute->cache_generation = m->route_helper->cache_generation; } if (oldroute) /* route already exists? */ { if (route_quota_test(mi) && learn_address_script(m, mi, "update", &newroute->addr)) { learn_succeeded = true; owner = mi; multi_instance_inc_refcount(mi); route_quota_inc(mi); /* delete old route */ multi_route_del(oldroute); /* modify hash table entry, replacing old route */ he->key = &newroute->addr; he->value = newroute; } } else { if (route_quota_test(mi) && learn_address_script(m, mi, "add", &newroute->addr)) { learn_succeeded = true; owner = mi; multi_instance_inc_refcount(mi); route_quota_inc(mi); /* add new route */ hash_add_fast(m->vhash, bucket, &newroute->addr, hv, newroute); } } msg(D_MULTI_LOW, "MULTI: Learn%s: %s -> %s", learn_succeeded ? "" : " FAILED", mroute_addr_print(&newroute->addr, &gc), multi_instance_string(mi, false, &gc)); if (!learn_succeeded) { free(newroute); } } gc_free(&gc); return owner; } /* * Get client instance based on virtual address. */ static struct multi_instance * multi_get_instance_by_virtual_addr(struct multi_context *m, const struct mroute_addr *addr, bool cidr_routing) { struct multi_route *route; struct multi_instance *ret = NULL; /* check for local address */ if (mroute_addr_equal(addr, &m->local)) { return NULL; } route = (struct multi_route *) hash_lookup(m->vhash, addr); /* does host route (possible cached) exist? */ if (route && multi_route_defined(m, route)) { struct multi_instance *mi = route->instance; route->last_reference = now; ret = mi; } else if (cidr_routing) /* do we need to regenerate a host route cache entry? */ { struct mroute_helper *rh = m->route_helper; struct mroute_addr tryaddr; int i; /* cycle through each CIDR length */ for (i = 0; i < rh->n_net_len; ++i) { tryaddr = *addr; tryaddr.type |= MR_WITH_NETBITS; tryaddr.netbits = rh->net_len[i]; mroute_addr_mask_host_bits(&tryaddr); /* look up a possible route with netbits netmask */ route = (struct multi_route *) hash_lookup(m->vhash, &tryaddr); if (route && multi_route_defined(m, route)) { /* found an applicable route, cache host route */ struct multi_instance *mi = route->instance; multi_learn_addr(m, mi, addr, MULTI_ROUTE_CACHE|MULTI_ROUTE_AGEABLE); ret = mi; break; } } } #ifdef ENABLE_DEBUG if (check_debug_level(D_MULTI_DEBUG)) { struct gc_arena gc = gc_new(); const char *addr_text = mroute_addr_print(addr, &gc); if (ret) { dmsg(D_MULTI_DEBUG, "GET INST BY VIRT: %s -> %s via %s", addr_text, multi_instance_string(ret, false, &gc), mroute_addr_print(&route->addr, &gc)); } else { dmsg(D_MULTI_DEBUG, "GET INST BY VIRT: %s [failed]", addr_text); } gc_free(&gc); } #endif ASSERT(!(ret && ret->halt)); return ret; } /* * Helper function to multi_learn_addr(). */ static struct multi_instance * multi_learn_in_addr_t(struct multi_context *m, struct multi_instance *mi, in_addr_t a, int netbits, /* -1 if host route, otherwise # of network bits in address */ bool primary) { struct openvpn_sockaddr remote_si; struct mroute_addr addr; CLEAR(remote_si); remote_si.addr.in4.sin_family = AF_INET; remote_si.addr.in4.sin_addr.s_addr = htonl(a); ASSERT(mroute_extract_openvpn_sockaddr(&addr, &remote_si, false)); if (netbits >= 0) { addr.type |= MR_WITH_NETBITS; addr.netbits = (uint8_t) netbits; } { struct multi_instance *owner = multi_learn_addr(m, mi, &addr, 0); #ifdef MANAGEMENT_DEF_AUTH if (management && owner) { management_learn_addr(management, &mi->context.c2.mda_context, &addr, primary); } #endif return owner; } } static struct multi_instance * multi_learn_in6_addr(struct multi_context *m, struct multi_instance *mi, struct in6_addr a6, int netbits, /* -1 if host route, otherwise # of network bits in address */ bool primary) { struct mroute_addr addr; addr.len = 16; addr.type = MR_ADDR_IPV6; addr.netbits = 0; addr.v6.addr = a6; if (netbits >= 0) { addr.type |= MR_WITH_NETBITS; addr.netbits = (uint8_t) netbits; mroute_addr_mask_host_bits( &addr ); } { struct multi_instance *owner = multi_learn_addr(m, mi, &addr, 0); #ifdef MANAGEMENT_DEF_AUTH if (management && owner) { management_learn_addr(management, &mi->context.c2.mda_context, &addr, primary); } #endif return owner; } } /* * A new client has connected, add routes (server -> client) * to internal routing table. */ static void multi_add_iroutes(struct multi_context *m, struct multi_instance *mi) { struct gc_arena gc = gc_new(); const struct iroute *ir; const struct iroute_ipv6 *ir6; if (TUNNEL_TYPE(mi->context.c1.tuntap) == DEV_TYPE_TUN) { mi->did_iroutes = true; for (ir = mi->context.options.iroutes; ir != NULL; ir = ir->next) { if (ir->netbits >= 0) { msg(D_MULTI_LOW, "MULTI: internal route %s/%d -> %s", print_in_addr_t(ir->network, 0, &gc), ir->netbits, multi_instance_string(mi, false, &gc)); } else { msg(D_MULTI_LOW, "MULTI: internal route %s -> %s", print_in_addr_t(ir->network, 0, &gc), multi_instance_string(mi, false, &gc)); } mroute_helper_add_iroute46(m->route_helper, ir->netbits); multi_learn_in_addr_t(m, mi, ir->network, ir->netbits, false); } for (ir6 = mi->context.options.iroutes_ipv6; ir6 != NULL; ir6 = ir6->next) { msg(D_MULTI_LOW, "MULTI: internal route %s/%d -> %s", print_in6_addr(ir6->network, 0, &gc), ir6->netbits, multi_instance_string(mi, false, &gc)); mroute_helper_add_iroute46(m->route_helper, ir6->netbits); multi_learn_in6_addr(m, mi, ir6->network, ir6->netbits, false); } } gc_free(&gc); } /* * Given an instance (new_mi), delete all other instances which use the * same common name. */ static void multi_delete_dup(struct multi_context *m, struct multi_instance *new_mi) { if (new_mi) { const char *new_cn = tls_common_name(new_mi->context.c2.tls_multi, true); if (new_cn) { struct hash_iterator hi; struct hash_element *he; int count = 0; hash_iterator_init(m->iter, &hi); while ((he = hash_iterator_next(&hi))) { struct multi_instance *mi = (struct multi_instance *) he->value; if (mi != new_mi && !mi->halt) { const char *cn = tls_common_name(mi->context.c2.tls_multi, true); if (cn && !strcmp(cn, new_cn)) { mi->did_iter = false; multi_close_instance(m, mi, false); hash_iterator_delete_element(&hi); ++count; } } } hash_iterator_free(&hi); if (count) { msg(D_MULTI_LOW, "MULTI: new connection by client '%s' will cause previous active sessions by this client to be dropped. Remember to use the --duplicate-cn option if you want multiple clients using the same certificate or username to concurrently connect.", new_cn); } } } } static void check_stale_routes(struct multi_context *m) { struct gc_arena gc = gc_new(); struct hash_iterator hi; struct hash_element *he; dmsg(D_MULTI_DEBUG, "MULTI: Checking stale routes"); hash_iterator_init_range(m->vhash, &hi, 0, hash_n_buckets(m->vhash)); while ((he = hash_iterator_next(&hi)) != NULL) { struct multi_route *r = (struct multi_route *) he->value; if (multi_route_defined(m, r) && difftime(now, r->last_reference) >= m->top.options.stale_routes_ageing_time) { dmsg(D_MULTI_DEBUG, "MULTI: Deleting stale route for address '%s'", mroute_addr_print(&r->addr, &gc)); learn_address_script(m, NULL, "delete", &r->addr); multi_route_del(r); hash_iterator_delete_element(&hi); } } hash_iterator_free(&hi); gc_free(&gc); } /* * Ensure that endpoint to be pushed to client * complies with --ifconfig-push-constraint directive. */ static bool ifconfig_push_constraint_satisfied(const struct context *c) { const struct options *o = &c->options; if (o->push_ifconfig_constraint_defined && c->c2.push_ifconfig_defined) { return (o->push_ifconfig_constraint_netmask & c->c2.push_ifconfig_local) == o->push_ifconfig_constraint_network; } else { return true; } } /* * Select a virtual address for a new client instance. * Use an --ifconfig-push directive, if given (static IP). * Otherwise use an --ifconfig-pool address (dynamic IP). */ static void multi_select_virtual_addr(struct multi_context *m, struct multi_instance *mi) { struct gc_arena gc = gc_new(); /* * If ifconfig addresses were set by dynamic config file, * release pool addresses, otherwise keep them. */ if (mi->context.options.push_ifconfig_defined) { /* ifconfig addresses were set statically, * release dynamic allocation */ if (mi->vaddr_handle >= 0) { ifconfig_pool_release(m->ifconfig_pool, mi->vaddr_handle, true); mi->vaddr_handle = -1; } mi->context.c2.push_ifconfig_defined = true; mi->context.c2.push_ifconfig_local = mi->context.options.push_ifconfig_local; mi->context.c2.push_ifconfig_remote_netmask = mi->context.options.push_ifconfig_remote_netmask; mi->context.c2.push_ifconfig_local_alias = mi->context.options.push_ifconfig_local_alias; /* the current implementation does not allow "static IPv4, pool IPv6", * (see below) so issue a warning if that happens - don't break the * session, though, as we don't even know if this client WANTS IPv6 */ if (mi->context.options.ifconfig_ipv6_pool_defined && !mi->context.options.push_ifconfig_ipv6_defined) { msg( M_INFO, "MULTI_sva: WARNING: if --ifconfig-push is used for IPv4, automatic IPv6 assignment from --ifconfig-ipv6-pool does not work. Use --ifconfig-ipv6-push for IPv6 then." ); } } else if (m->ifconfig_pool && mi->vaddr_handle < 0) /* otherwise, choose a pool address */ { in_addr_t local = 0, remote = 0; struct in6_addr remote_ipv6; const char *cn = NULL; if (!mi->context.options.duplicate_cn) { cn = tls_common_name(mi->context.c2.tls_multi, true); } CLEAR(remote_ipv6); mi->vaddr_handle = ifconfig_pool_acquire(m->ifconfig_pool, &local, &remote, &remote_ipv6, cn); if (mi->vaddr_handle >= 0) { const int tunnel_type = TUNNEL_TYPE(mi->context.c1.tuntap); const int tunnel_topology = TUNNEL_TOPOLOGY(mi->context.c1.tuntap); msg( M_INFO, "MULTI_sva: pool returned IPv4=%s, IPv6=%s", print_in_addr_t( remote, 0, &gc ), (mi->context.options.ifconfig_ipv6_pool_defined ? print_in6_addr( remote_ipv6, 0, &gc ) : "(Not enabled)") ); /* set push_ifconfig_remote_netmask from pool ifconfig address(es) */ mi->context.c2.push_ifconfig_local = remote; if (tunnel_type == DEV_TYPE_TAP || (tunnel_type == DEV_TYPE_TUN && tunnel_topology == TOP_SUBNET)) { mi->context.c2.push_ifconfig_remote_netmask = mi->context.options.ifconfig_pool_netmask; if (!mi->context.c2.push_ifconfig_remote_netmask) { mi->context.c2.push_ifconfig_remote_netmask = mi->context.c1.tuntap->remote_netmask; } } else if (tunnel_type == DEV_TYPE_TUN) { if (tunnel_topology == TOP_P2P) { mi->context.c2.push_ifconfig_remote_netmask = mi->context.c1.tuntap->local; } else if (tunnel_topology == TOP_NET30) { mi->context.c2.push_ifconfig_remote_netmask = local; } } if (mi->context.c2.push_ifconfig_remote_netmask) { mi->context.c2.push_ifconfig_defined = true; } else { msg(D_MULTI_ERRORS, "MULTI: no --ifconfig-pool netmask parameter is available to push to %s", multi_instance_string(mi, false, &gc)); } if (mi->context.options.ifconfig_ipv6_pool_defined) { mi->context.c2.push_ifconfig_ipv6_local = remote_ipv6; mi->context.c2.push_ifconfig_ipv6_remote = mi->context.c1.tuntap->local_ipv6; mi->context.c2.push_ifconfig_ipv6_netbits = mi->context.options.ifconfig_ipv6_netbits; mi->context.c2.push_ifconfig_ipv6_defined = true; } } else { msg(D_MULTI_ERRORS, "MULTI: no free --ifconfig-pool addresses are available"); } } /* IPv6 push_ifconfig is a bit problematic - since IPv6 shares the * pool handling with IPv4, the combination "static IPv4, dynamic IPv6" * will fail (because no pool will be allocated in this case). * OTOH, this doesn't make too much sense in reality - and the other * way round ("dynamic IPv4, static IPv6") or "both static" makes sense * -> and so it's implemented right now */ if (mi->context.options.push_ifconfig_ipv6_defined) { mi->context.c2.push_ifconfig_ipv6_local = mi->context.options.push_ifconfig_ipv6_local; mi->context.c2.push_ifconfig_ipv6_remote = mi->context.options.push_ifconfig_ipv6_remote; mi->context.c2.push_ifconfig_ipv6_netbits = mi->context.options.push_ifconfig_ipv6_netbits; mi->context.c2.push_ifconfig_ipv6_defined = true; msg( M_INFO, "MULTI_sva: push_ifconfig_ipv6 %s/%d", print_in6_addr( mi->context.c2.push_ifconfig_ipv6_local, 0, &gc ), mi->context.c2.push_ifconfig_ipv6_netbits ); } gc_free(&gc); } /* * Set virtual address environmental variables. */ static void multi_set_virtual_addr_env(struct multi_instance *mi) { setenv_del(mi->context.c2.es, "ifconfig_pool_local_ip"); setenv_del(mi->context.c2.es, "ifconfig_pool_remote_ip"); setenv_del(mi->context.c2.es, "ifconfig_pool_netmask"); if (mi->context.c2.push_ifconfig_defined) { const int tunnel_type = TUNNEL_TYPE(mi->context.c1.tuntap); const int tunnel_topology = TUNNEL_TOPOLOGY(mi->context.c1.tuntap); setenv_in_addr_t(mi->context.c2.es, "ifconfig_pool_remote_ip", mi->context.c2.push_ifconfig_local, SA_SET_IF_NONZERO); if (tunnel_type == DEV_TYPE_TAP || (tunnel_type == DEV_TYPE_TUN && tunnel_topology == TOP_SUBNET)) { setenv_in_addr_t(mi->context.c2.es, "ifconfig_pool_netmask", mi->context.c2.push_ifconfig_remote_netmask, SA_SET_IF_NONZERO); } else if (tunnel_type == DEV_TYPE_TUN) { setenv_in_addr_t(mi->context.c2.es, "ifconfig_pool_local_ip", mi->context.c2.push_ifconfig_remote_netmask, SA_SET_IF_NONZERO); } } setenv_del(mi->context.c2.es, "ifconfig_pool_local_ip6"); setenv_del(mi->context.c2.es, "ifconfig_pool_remote_ip6"); setenv_del(mi->context.c2.es, "ifconfig_pool_ip6_netbits"); if (mi->context.c2.push_ifconfig_ipv6_defined) { setenv_in6_addr(mi->context.c2.es, "ifconfig_pool_remote", &mi->context.c2.push_ifconfig_ipv6_local, SA_SET_IF_NONZERO); setenv_in6_addr(mi->context.c2.es, "ifconfig_pool_local", &mi->context.c2.push_ifconfig_ipv6_remote, SA_SET_IF_NONZERO); setenv_int(mi->context.c2.es, "ifconfig_pool_ip6_netbits", mi->context.c2.push_ifconfig_ipv6_netbits); } } /* * Called after client-connect script is called */ static void multi_client_connect_post(struct multi_context *m, struct multi_instance *mi, const char *dc_file, unsigned int option_permissions_mask, unsigned int *option_types_found) { /* Did script generate a dynamic config file? */ if (platform_test_file(dc_file)) { options_server_import(&mi->context.options, dc_file, D_IMPORT_ERRORS|M_OPTERR, option_permissions_mask, option_types_found, mi->context.c2.es); /* * If the --client-connect script generates a config file * with an --ifconfig-push directive, it will override any * --ifconfig-push directive from the --client-config-dir * directory or any --ifconfig-pool dynamic address. */ multi_select_virtual_addr(m, mi); multi_set_virtual_addr_env(mi); } } #ifdef ENABLE_PLUGIN /* * Called after client-connect plug-in is called */ static void multi_client_connect_post_plugin(struct multi_context *m, struct multi_instance *mi, const struct plugin_return *pr, unsigned int option_permissions_mask, unsigned int *option_types_found) { struct plugin_return config; plugin_return_get_column(pr, &config, "config"); /* Did script generate a dynamic config file? */ if (plugin_return_defined(&config)) { int i; for (i = 0; i < config.n; ++i) { if (config.list[i] && config.list[i]->value) { options_string_import(&mi->context.options, config.list[i]->value, D_IMPORT_ERRORS|M_OPTERR, option_permissions_mask, option_types_found, mi->context.c2.es); } } /* * If the --client-connect script generates a config file * with an --ifconfig-push directive, it will override any * --ifconfig-push directive from the --client-config-dir * directory or any --ifconfig-pool dynamic address. */ multi_select_virtual_addr(m, mi); multi_set_virtual_addr_env(mi); } } #endif /* ifdef ENABLE_PLUGIN */ #ifdef MANAGEMENT_DEF_AUTH /* * Called to load management-derived client-connect config */ static void multi_client_connect_mda(struct multi_context *m, struct multi_instance *mi, const struct buffer_list *config, unsigned int option_permissions_mask, unsigned int *option_types_found) { if (config) { struct buffer_entry *be; for (be = config->head; be != NULL; be = be->next) { const char *opt = BSTR(&be->buf); options_string_import(&mi->context.options, opt, D_IMPORT_ERRORS|M_OPTERR, option_permissions_mask, option_types_found, mi->context.c2.es); } /* * If the --client-connect script generates a config file * with an --ifconfig-push directive, it will override any * --ifconfig-push directive from the --client-config-dir * directory or any --ifconfig-pool dynamic address. */ multi_select_virtual_addr(m, mi); multi_set_virtual_addr_env(mi); } } #endif /* ifdef MANAGEMENT_DEF_AUTH */ static void multi_client_connect_setenv(struct multi_context *m, struct multi_instance *mi) { struct gc_arena gc = gc_new(); /* setenv incoming cert common name for script */ setenv_str(mi->context.c2.es, "common_name", tls_common_name(mi->context.c2.tls_multi, true)); /* setenv client real IP address */ setenv_trusted(mi->context.c2.es, get_link_socket_info(&mi->context)); /* setenv client virtual IP address */ multi_set_virtual_addr_env(mi); /* setenv connection time */ { const char *created_ascii = time_string(mi->created, 0, false, &gc); setenv_str(mi->context.c2.es, "time_ascii", created_ascii); setenv_long_long(mi->context.c2.es, "time_unix", mi->created); } gc_free(&gc); } /* * Called as soon as the SSL/TLS connection authenticates. * * Instance-specific directives to be processed: * * iroute start-ip end-ip * ifconfig-push local remote-netmask * push */ static void multi_connection_established(struct multi_context *m, struct multi_instance *mi) { if (tls_authentication_status(mi->context.c2.tls_multi, 0) == TLS_AUTHENTICATION_SUCCEEDED) { struct gc_arena gc = gc_new(); unsigned int option_types_found = 0; const unsigned int option_permissions_mask = OPT_P_INSTANCE | OPT_P_INHERIT | OPT_P_PUSH | OPT_P_TIMER | OPT_P_CONFIG | OPT_P_ECHO | OPT_P_COMP | OPT_P_SOCKFLAGS; int cc_succeeded = true; /* client connect script status */ int cc_succeeded_count = 0; ASSERT(mi->context.c1.tuntap); /* lock down the common name and cert hashes so they can't change during future TLS renegotiations */ tls_lock_common_name(mi->context.c2.tls_multi); tls_lock_cert_hash_set(mi->context.c2.tls_multi); /* generate a msg() prefix for this client instance */ generate_prefix(mi); /* delete instances of previous clients with same common-name */ if (!mi->context.options.duplicate_cn) { multi_delete_dup(m, mi); } /* reset pool handle to null */ mi->vaddr_handle = -1; /* * Try to source a dynamic config file from the * --client-config-dir directory. */ if (mi->context.options.client_config_dir) { const char *ccd_file; ccd_file = platform_gen_path(mi->context.options.client_config_dir, tls_common_name(mi->context.c2.tls_multi, false), &gc); /* try common-name file */ if (platform_test_file(ccd_file)) { options_server_import(&mi->context.options, ccd_file, D_IMPORT_ERRORS|M_OPTERR, option_permissions_mask, &option_types_found, mi->context.c2.es); } else /* try default file */ { ccd_file = platform_gen_path(mi->context.options.client_config_dir, CCD_DEFAULT, &gc); if (platform_test_file(ccd_file)) { options_server_import(&mi->context.options, ccd_file, D_IMPORT_ERRORS|M_OPTERR, option_permissions_mask, &option_types_found, mi->context.c2.es); } } } /* * Select a virtual address from either --ifconfig-push in --client-config-dir file * or --ifconfig-pool. */ multi_select_virtual_addr(m, mi); /* do --client-connect setenvs */ multi_client_connect_setenv(m, mi); #ifdef ENABLE_PLUGIN /* * Call client-connect plug-in. */ /* deprecated callback, use a file for passing back return info */ if (plugin_defined(mi->context.plugins, OPENVPN_PLUGIN_CLIENT_CONNECT)) { struct argv argv = argv_new(); const char *dc_file = platform_create_temp_file(mi->context.options.tmp_dir, "cc", &gc); if (!dc_file) { cc_succeeded = false; goto script_depr_failed; } argv_printf(&argv, "%s", dc_file); if (plugin_call(mi->context.plugins, OPENVPN_PLUGIN_CLIENT_CONNECT, &argv, NULL, mi->context.c2.es) != OPENVPN_PLUGIN_FUNC_SUCCESS) { msg(M_WARN, "WARNING: client-connect plugin call failed"); cc_succeeded = false; } else { multi_client_connect_post(m, mi, dc_file, option_permissions_mask, &option_types_found); ++cc_succeeded_count; } if (!platform_unlink(dc_file)) { msg(D_MULTI_ERRORS, "MULTI: problem deleting temporary file: %s", dc_file); } script_depr_failed: argv_free(&argv); } /* V2 callback, use a plugin_return struct for passing back return info */ if (plugin_defined(mi->context.plugins, OPENVPN_PLUGIN_CLIENT_CONNECT_V2)) { struct plugin_return pr; plugin_return_init(&pr); if (plugin_call(mi->context.plugins, OPENVPN_PLUGIN_CLIENT_CONNECT_V2, NULL, &pr, mi->context.c2.es) != OPENVPN_PLUGIN_FUNC_SUCCESS) { msg(M_WARN, "WARNING: client-connect-v2 plugin call failed"); cc_succeeded = false; } else { multi_client_connect_post_plugin(m, mi, &pr, option_permissions_mask, &option_types_found); ++cc_succeeded_count; } plugin_return_free(&pr); } #endif /* ifdef ENABLE_PLUGIN */ /* * Run --client-connect script. */ if (mi->context.options.client_connect_script && cc_succeeded) { struct argv argv = argv_new(); const char *dc_file = NULL; setenv_str(mi->context.c2.es, "script_type", "client-connect"); dc_file = platform_create_temp_file(mi->context.options.tmp_dir, "cc", &gc); if (!dc_file) { cc_succeeded = false; goto script_failed; } argv_parse_cmd(&argv, mi->context.options.client_connect_script); argv_printf_cat(&argv, "%s", dc_file); if (openvpn_run_script(&argv, mi->context.c2.es, 0, "--client-connect")) { multi_client_connect_post(m, mi, dc_file, option_permissions_mask, &option_types_found); ++cc_succeeded_count; } else { cc_succeeded = false; } if (!platform_unlink(dc_file)) { msg(D_MULTI_ERRORS, "MULTI: problem deleting temporary file: %s", dc_file); } script_failed: argv_free(&argv); } /* * Check for client-connect script left by management interface client */ #ifdef MANAGEMENT_DEF_AUTH if (cc_succeeded && mi->cc_config) { multi_client_connect_mda(m, mi, mi->cc_config, option_permissions_mask, &option_types_found); ++cc_succeeded_count; } #endif /* * Check for "disable" directive in client-config-dir file * or config file generated by --client-connect script. */ if (mi->context.options.disable) { msg(D_MULTI_ERRORS, "MULTI: client has been rejected due to 'disable' directive"); cc_succeeded = false; cc_succeeded_count = 0; } if (cc_succeeded) { /* * Process sourced options. */ do_deferred_options(&mi->context, option_types_found); /* * make sure we got ifconfig settings from somewhere */ if (!mi->context.c2.push_ifconfig_defined) { msg(D_MULTI_ERRORS, "MULTI: no dynamic or static remote --ifconfig address is available for %s", multi_instance_string(mi, false, &gc)); } /* * make sure that ifconfig settings comply with constraints */ if (!ifconfig_push_constraint_satisfied(&mi->context)) { /* JYFIXME -- this should cause the connection to fail */ msg(D_MULTI_ERRORS, "MULTI ERROR: primary virtual IP for %s (%s) violates tunnel network/netmask constraint (%s/%s)", multi_instance_string(mi, false, &gc), print_in_addr_t(mi->context.c2.push_ifconfig_local, 0, &gc), print_in_addr_t(mi->context.options.push_ifconfig_constraint_network, 0, &gc), print_in_addr_t(mi->context.options.push_ifconfig_constraint_netmask, 0, &gc)); } /* * For routed tunnels, set up internal route to endpoint * plus add all iroute routes. */ if (TUNNEL_TYPE(mi->context.c1.tuntap) == DEV_TYPE_TUN) { if (mi->context.c2.push_ifconfig_defined) { multi_learn_in_addr_t(m, mi, mi->context.c2.push_ifconfig_local, -1, true); msg(D_MULTI_LOW, "MULTI: primary virtual IP for %s: %s", multi_instance_string(mi, false, &gc), print_in_addr_t(mi->context.c2.push_ifconfig_local, 0, &gc)); } if (mi->context.c2.push_ifconfig_ipv6_defined) { multi_learn_in6_addr(m, mi, mi->context.c2.push_ifconfig_ipv6_local, -1, true); /* TODO: find out where addresses are "unlearned"!! */ msg(D_MULTI_LOW, "MULTI: primary virtual IPv6 for %s: %s", multi_instance_string(mi, false, &gc), print_in6_addr(mi->context.c2.push_ifconfig_ipv6_local, 0, &gc)); } /* add routes locally, pointing to new client, if * --iroute options have been specified */ multi_add_iroutes(m, mi); /* * iroutes represent subnets which are "owned" by a particular * client. Therefore, do not actually push a route to a client * if it matches one of the client's iroutes. */ remove_iroutes_from_push_route_list(&mi->context.options); } else if (mi->context.options.iroutes) { msg(D_MULTI_ERRORS, "MULTI: --iroute options rejected for %s -- iroute only works with tun-style tunnels", multi_instance_string(mi, false, &gc)); } /* set our client's VPN endpoint for status reporting purposes */ mi->reporting_addr = mi->context.c2.push_ifconfig_local; mi->reporting_addr_ipv6 = mi->context.c2.push_ifconfig_ipv6_local; /* set context-level authentication flag */ mi->context.c2.context_auth = CAS_SUCCEEDED; #ifdef ENABLE_ASYNC_PUSH /* authentication complete, send push reply */ if (mi->context.c2.push_request_received) { process_incoming_push_request(&mi->context); } #endif } else { /* set context-level authentication flag */ mi->context.c2.context_auth = cc_succeeded_count ? CAS_PARTIAL : CAS_FAILED; } /* set flag so we don't get called again */ mi->connection_established_flag = true; /* increment number of current authenticated clients */ ++m->n_clients; update_mstat_n_clients(m->n_clients); --mi->n_clients_delta; #ifdef MANAGEMENT_DEF_AUTH if (management) { management_connection_established(management, &mi->context.c2.mda_context, mi->context.c2.es); } #endif gc_free(&gc); } /* * Reply now to client's PUSH_REQUEST query */ mi->context.c2.push_reply_deferred = false; } #ifdef ENABLE_ASYNC_PUSH /* * Called when inotify event is fired, which happens when acf file is closed or deleted. * Continues authentication and sends push_reply. */ void multi_process_file_closed(struct multi_context *m, const unsigned int mpp_flags) { char buffer[INOTIFY_EVENT_BUFFER_SIZE]; size_t buffer_i = 0; int r = read(m->top.c2.inotify_fd, buffer, INOTIFY_EVENT_BUFFER_SIZE); while (buffer_i < r) { /* parse inotify events */ struct inotify_event *pevent = (struct inotify_event *) &buffer[buffer_i]; size_t event_size = sizeof(struct inotify_event) + pevent->len; buffer_i += event_size; msg(D_MULTI_DEBUG, "MULTI: modified fd %d, mask %d", pevent->wd, pevent->mask); struct multi_instance *mi = hash_lookup(m->inotify_watchers, (void *) (unsigned long) pevent->wd); if (pevent->mask & IN_CLOSE_WRITE) { if (mi) { /* continue authentication, perform NCP negotiation and send push_reply */ multi_process_post(m, mi, mpp_flags); /* With NCP and deferred authentication, we perform cipher negotiation and * data channel keys generation on incoming push request, assuming that auth * succeeded. When auth succeeds in between push requests and async push is used, * we send push reply immediately. Above multi_process_post() call performs * NCP negotiation and here we do keys generation. */ struct context *c = &mi->context; struct frame *frame_fragment = NULL; #ifdef ENABLE_FRAGMENT if (c->options.ce.fragment) { frame_fragment = &c->c2.frame_fragment; } #endif struct tls_session *session = &c->c2.tls_multi->session[TM_ACTIVE]; if (!tls_session_update_crypto_params(session, &c->options, &c->c2.frame, frame_fragment)) { msg(D_TLS_ERRORS, "TLS Error: initializing data channel failed"); register_signal(c, SIGUSR1, "init-data-channel-failed"); } } else { msg(D_MULTI_ERRORS, "MULTI: multi_instance not found!"); } } else if (pevent->mask & IN_IGNORED) { /* this event is _always_ fired when watch is removed or file is deleted */ if (mi) { hash_remove(m->inotify_watchers, (void *) (unsigned long) pevent->wd); mi->inotify_watch = -1; } } else { msg(D_MULTI_ERRORS, "MULTI: unknown mask %d", pevent->mask); } } } #endif /* ifdef ENABLE_ASYNC_PUSH */ /* * Add a mbuf buffer to a particular * instance. */ void multi_add_mbuf(struct multi_context *m, struct multi_instance *mi, struct mbuf_buffer *mb) { if (multi_output_queue_ready(m, mi)) { struct mbuf_item item; item.buffer = mb; item.instance = mi; mbuf_add_item(m->mbuf, &item); } else { msg(D_MULTI_DROPPED, "MULTI: packet dropped due to output saturation (multi_add_mbuf)"); } } /* * Add a packet to a client instance output queue. */ static inline void multi_unicast(struct multi_context *m, const struct buffer *buf, struct multi_instance *mi) { struct mbuf_buffer *mb; if (BLEN(buf) > 0) { mb = mbuf_alloc_buf(buf); mb->flags = MF_UNICAST; multi_add_mbuf(m, mi, mb); mbuf_free_buf(mb); } } /* * Broadcast a packet to all clients. */ static void multi_bcast(struct multi_context *m, const struct buffer *buf, const struct multi_instance *sender_instance, const struct mroute_addr *sender_addr, uint16_t vid) { struct hash_iterator hi; struct hash_element *he; struct multi_instance *mi; struct mbuf_buffer *mb; if (BLEN(buf) > 0) { perf_push(PERF_MULTI_BCAST); #ifdef MULTI_DEBUG_EVENT_LOOP printf("BCAST len=%d\n", BLEN(buf)); #endif mb = mbuf_alloc_buf(buf); hash_iterator_init(m->iter, &hi); while ((he = hash_iterator_next(&hi))) { mi = (struct multi_instance *) he->value; if (mi != sender_instance && !mi->halt) { #ifdef ENABLE_PF if (sender_instance) { if (!pf_c2c_test(&sender_instance->context.c2.pf, sender_instance->context.c2.tls_multi, &mi->context.c2.pf, mi->context.c2.tls_multi, "bcast_c2c")) { msg(D_PF_DROPPED_BCAST, "PF: client[%s] -> client[%s] packet dropped by BCAST packet filter", mi_prefix(sender_instance), mi_prefix(mi)); continue; } } if (sender_addr) { if (!pf_addr_test(&mi->context.c2.pf, &mi->context, sender_addr, "bcast_src_addr")) { struct gc_arena gc = gc_new(); msg(D_PF_DROPPED_BCAST, "PF: addr[%s] -> client[%s] packet dropped by BCAST packet filter", mroute_addr_print_ex(sender_addr, MAPF_SHOW_ARP, &gc), mi_prefix(mi)); gc_free(&gc); continue; } } #endif /* ifdef ENABLE_PF */ if (vid != 0 && vid != mi->context.options.vlan_pvid) { continue; } multi_add_mbuf(m, mi, mb); } } hash_iterator_free(&hi); mbuf_free_buf(mb); perf_pop(); } } /* * Given a time delta, indicating that we wish to be * awoken by the scheduler at time now + delta, figure * a sigma parameter (in microseconds) that represents * a sort of fuzz factor around delta, so that we're * really telling the scheduler to wake us up any time * between now + delta - sigma and now + delta + sigma. * * The sigma parameter helps the scheduler to run more efficiently. * Sigma should be no larger than TV_WITHIN_SIGMA_MAX_USEC */ static inline unsigned int compute_wakeup_sigma(const struct timeval *delta) { if (delta->tv_sec < 1) { /* if < 1 sec, fuzz = # of microseconds / 8 */ return delta->tv_usec >> 3; } else { /* if < 10 minutes, fuzz = 13.1% of timeout */ if (delta->tv_sec < 600) { return delta->tv_sec << 17; } else { return 120000000; /* if >= 10 minutes, fuzz = 2 minutes */ } } } static void multi_schedule_context_wakeup(struct multi_context *m, struct multi_instance *mi) { /* calculate an absolute wakeup time */ ASSERT(!openvpn_gettimeofday(&mi->wakeup, NULL)); tv_add(&mi->wakeup, &mi->context.c2.timeval); /* tell scheduler to wake us up at some point in the future */ schedule_add_entry(m->schedule, (struct schedule_entry *) mi, &mi->wakeup, compute_wakeup_sigma(&mi->context.c2.timeval)); } /* * Figure instance-specific timers, convert * earliest to absolute time in mi->wakeup, * call scheduler with our future wakeup time. * * Also close context on signal. */ bool multi_process_post(struct multi_context *m, struct multi_instance *mi, const unsigned int flags) { bool ret = true; if (!IS_SIG(&mi->context) && ((flags & MPP_PRE_SELECT) || ((flags & MPP_CONDITIONAL_PRE_SELECT) && !ANY_OUT(&mi->context)))) { #if defined(ENABLE_ASYNC_PUSH) && defined(ENABLE_DEF_AUTH) bool was_authenticated = false; struct key_state *ks = NULL; if (mi->context.c2.tls_multi) { ks = &mi->context.c2.tls_multi->session[TM_ACTIVE].key[KS_PRIMARY]; was_authenticated = ks->authenticated; } #endif /* figure timeouts and fetch possible outgoing * to_link packets (such as ping or TLS control) */ pre_select(&mi->context); #if defined(ENABLE_ASYNC_PUSH) && defined(ENABLE_DEF_AUTH) if (ks && ks->auth_control_file && ks->auth_deferred && !was_authenticated) { /* watch acf file */ long watch_descriptor = inotify_add_watch(m->top.c2.inotify_fd, ks->auth_control_file, IN_CLOSE_WRITE | IN_ONESHOT); if (watch_descriptor >= 0) { if (mi->inotify_watch != -1) { hash_remove(m->inotify_watchers, (void *) (unsigned long)mi->inotify_watch); } hash_add(m->inotify_watchers, (const uintptr_t *)watch_descriptor, mi, true); mi->inotify_watch = watch_descriptor; } else { msg(M_NONFATAL | M_ERRNO, "MULTI: inotify_add_watch error"); } } #endif if (!IS_SIG(&mi->context)) { /* connection is "established" when SSL/TLS key negotiation succeeds * and (if specified) auth user/pass succeeds */ if (!mi->connection_established_flag && CONNECTION_ESTABLISHED(&mi->context)) { multi_connection_established(m, mi); } /* tell scheduler to wake us up at some point in the future */ multi_schedule_context_wakeup(m, mi); } } if (IS_SIG(&mi->context)) { if (flags & MPP_CLOSE_ON_SIGNAL) { multi_close_instance_on_signal(m, mi); ret = false; } } else { /* continue to pend on output? */ multi_set_pending(m, ANY_OUT(&mi->context) ? mi : NULL); #ifdef MULTI_DEBUG_EVENT_LOOP printf("POST %s[%d] to=%d lo=%d/%d w=%" PRIi64 "/%ld\n", id(mi), (int) (mi == m->pending), mi ? mi->context.c2.to_tun.len : -1, mi ? mi->context.c2.to_link.len : -1, (mi && mi->context.c2.fragment) ? mi->context.c2.fragment->outgoing.len : -1, (int64_t)mi->context.c2.timeval.tv_sec, (long)mi->context.c2.timeval.tv_usec); #endif } if ((flags & MPP_RECORD_TOUCH) && m->mpp_touched) { *m->mpp_touched = mi; } return ret; } void multi_process_float(struct multi_context *m, struct multi_instance *mi) { struct mroute_addr real; struct hash *hash = m->hash; struct gc_arena gc = gc_new(); if (!mroute_extract_openvpn_sockaddr(&real, &m->top.c2.from.dest, true)) { goto done; } const uint32_t hv = hash_value(hash, &real); struct hash_bucket *bucket = hash_bucket(hash, hv); /* make sure that we don't float to an address taken by another client */ struct hash_element *he = hash_lookup_fast(hash, bucket, &real, hv); if (he) { struct multi_instance *ex_mi = (struct multi_instance *) he->value; struct tls_multi *m1 = mi->context.c2.tls_multi; struct tls_multi *m2 = ex_mi->context.c2.tls_multi; /* do not float if target address is taken by client with another cert */ if (!cert_hash_compare(m1->locked_cert_hash_set, m2->locked_cert_hash_set)) { msg(D_MULTI_LOW, "Disallow float to an address taken by another client %s", multi_instance_string(ex_mi, false, &gc)); mi->context.c2.buf.len = 0; goto done; } msg(D_MULTI_MEDIUM, "closing instance %s", multi_instance_string(ex_mi, false, &gc)); multi_close_instance(m, ex_mi, false); } msg(D_MULTI_MEDIUM, "peer %" PRIu32 " (%s) floated from %s to %s", mi->context.c2.tls_multi->peer_id, tls_common_name(mi->context.c2.tls_multi, false), mroute_addr_print(&mi->real, &gc), print_link_socket_actual(&m->top.c2.from, &gc)); /* remove old address from hash table before changing address */ ASSERT(hash_remove(m->hash, &mi->real)); ASSERT(hash_remove(m->iter, &mi->real)); /* change external network address of the remote peer */ mi->real = real; generate_prefix(mi); mi->context.c2.from = m->top.c2.from; mi->context.c2.to_link_addr = &mi->context.c2.from; /* inherit parent link_socket and link_socket_info */ mi->context.c2.link_socket = m->top.c2.link_socket; mi->context.c2.link_socket_info->lsa->actual = m->top.c2.from; tls_update_remote_addr(mi->context.c2.tls_multi, &mi->context.c2.from); ASSERT(hash_add(m->hash, &mi->real, mi, false)); ASSERT(hash_add(m->iter, &mi->real, mi, false)); #ifdef MANAGEMENT_DEF_AUTH ASSERT(hash_add(m->cid_hash, &mi->context.c2.mda_context.cid, mi, true)); #endif done: gc_free(&gc); } /* * Process packets in the TCP/UDP socket -> TUN/TAP interface direction, * i.e. client -> server direction. */ bool multi_process_incoming_link(struct multi_context *m, struct multi_instance *instance, const unsigned int mpp_flags) { struct gc_arena gc = gc_new(); struct context *c; struct mroute_addr src, dest; unsigned int mroute_flags; struct multi_instance *mi; bool ret = true; bool floated = false; if (m->pending) { return true; } if (!instance) { #ifdef MULTI_DEBUG_EVENT_LOOP printf("TCP/UDP -> TUN [%d]\n", BLEN(&m->top.c2.buf)); #endif multi_set_pending(m, multi_get_create_instance_udp(m, &floated)); } else { multi_set_pending(m, instance); } if (m->pending) { set_prefix(m->pending); /* get instance context */ c = &m->pending->context; if (!instance) { /* transfer packet pointer from top-level context buffer to instance */ c->c2.buf = m->top.c2.buf; /* transfer from-addr from top-level context buffer to instance */ if (!floated) { c->c2.from = m->top.c2.from; } } if (BLEN(&c->c2.buf) > 0) { struct link_socket_info *lsi; const uint8_t *orig_buf; /* decrypt in instance context */ perf_push(PERF_PROC_IN_LINK); lsi = get_link_socket_info(c); orig_buf = c->c2.buf.data; if (process_incoming_link_part1(c, lsi, floated)) { /* nonzero length means that we have a valid, decrypted packed */ if (floated && c->c2.buf.len > 0) { multi_process_float(m, m->pending); } process_incoming_link_part2(c, lsi, orig_buf); } perf_pop(); if (TUNNEL_TYPE(m->top.c1.tuntap) == DEV_TYPE_TUN) { /* extract packet source and dest addresses */ mroute_flags = mroute_extract_addr_from_packet(&src, &dest, NULL, NULL, 0, &c->c2.to_tun, DEV_TYPE_TUN); /* drop packet if extract failed */ if (!(mroute_flags & MROUTE_EXTRACT_SUCCEEDED)) { c->c2.to_tun.len = 0; } /* make sure that source address is associated with this client */ else if (multi_get_instance_by_virtual_addr(m, &src, true) != m->pending) { /* IPv6 link-local address (fe80::xxx)? */ if ( (src.type & MR_ADDR_MASK) == MR_ADDR_IPV6 && IN6_IS_ADDR_LINKLOCAL(&src.v6.addr) ) { /* do nothing, for now. TODO: add address learning */ } else { msg(D_MULTI_DROPPED, "MULTI: bad source address from client [%s], packet dropped", mroute_addr_print(&src, &gc)); } c->c2.to_tun.len = 0; } /* client-to-client communication enabled? */ else if (m->enable_c2c) { /* multicast? */ if (mroute_flags & MROUTE_EXTRACT_MCAST) { /* for now, treat multicast as broadcast */ multi_bcast(m, &c->c2.to_tun, m->pending, NULL, 0); } else /* possible client to client routing */ { ASSERT(!(mroute_flags & MROUTE_EXTRACT_BCAST)); mi = multi_get_instance_by_virtual_addr(m, &dest, true); /* if dest addr is a known client, route to it */ if (mi) { #ifdef ENABLE_PF if (!pf_c2c_test(&c->c2.pf, c->c2.tls_multi, &mi->context.c2.pf, mi->context.c2.tls_multi, "tun_c2c")) { msg(D_PF_DROPPED, "PF: client -> client[%s] packet dropped by TUN packet filter", mi_prefix(mi)); } else #endif { multi_unicast(m, &c->c2.to_tun, mi); register_activity(c, BLEN(&c->c2.to_tun)); } c->c2.to_tun.len = 0; } } } #ifdef ENABLE_PF if (c->c2.to_tun.len && !pf_addr_test(&c->c2.pf, c, &dest, "tun_dest_addr")) { msg(D_PF_DROPPED, "PF: client -> addr[%s] packet dropped by TUN packet filter", mroute_addr_print_ex(&dest, MAPF_SHOW_ARP, &gc)); c->c2.to_tun.len = 0; } #endif } else if (TUNNEL_TYPE(m->top.c1.tuntap) == DEV_TYPE_TAP) { uint16_t vid = 0; #ifdef ENABLE_PF struct mroute_addr edest; mroute_addr_reset(&edest); #endif if (m->top.options.vlan_tagging) { if (vlan_is_tagged(&c->c2.to_tun)) { /* Drop VLAN-tagged frame. */ msg(D_VLAN_DEBUG, "dropping incoming VLAN-tagged frame"); c->c2.to_tun.len = 0; } else { vid = c->options.vlan_pvid; } } /* extract packet source and dest addresses */ mroute_flags = mroute_extract_addr_from_packet(&src, &dest, NULL, #ifdef ENABLE_PF &edest, #else NULL, #endif vid, &c->c2.to_tun, DEV_TYPE_TAP); if (mroute_flags & MROUTE_EXTRACT_SUCCEEDED) { if (multi_learn_addr(m, m->pending, &src, 0) == m->pending) { /* check for broadcast */ if (m->enable_c2c) { if (mroute_flags & (MROUTE_EXTRACT_BCAST|MROUTE_EXTRACT_MCAST)) { multi_bcast(m, &c->c2.to_tun, m->pending, NULL, vid); } else /* try client-to-client routing */ { mi = multi_get_instance_by_virtual_addr(m, &dest, false); /* if dest addr is a known client, route to it */ if (mi) { #ifdef ENABLE_PF if (!pf_c2c_test(&c->c2.pf, c->c2.tls_multi, &mi->context.c2.pf, mi->context.c2.tls_multi, "tap_c2c")) { msg(D_PF_DROPPED, "PF: client -> client[%s] packet dropped by TAP packet filter", mi_prefix(mi)); } else #endif { multi_unicast(m, &c->c2.to_tun, mi); register_activity(c, BLEN(&c->c2.to_tun)); } c->c2.to_tun.len = 0; } } } #ifdef ENABLE_PF if (c->c2.to_tun.len && !pf_addr_test(&c->c2.pf, c, &edest, "tap_dest_addr")) { msg(D_PF_DROPPED, "PF: client -> addr[%s] packet dropped by TAP packet filter", mroute_addr_print_ex(&edest, MAPF_SHOW_ARP, &gc)); c->c2.to_tun.len = 0; } #endif } else { msg(D_MULTI_DROPPED, "MULTI: bad source address from client [%s], packet dropped", mroute_addr_print(&src, &gc)); c->c2.to_tun.len = 0; } } else { c->c2.to_tun.len = 0; } } } /* postprocess and set wakeup */ ret = multi_process_post(m, m->pending, mpp_flags); clear_prefix(); } gc_free(&gc); return ret; } /* * Process packets in the TUN/TAP interface -> TCP/UDP socket direction, * i.e. server -> client direction. */ bool multi_process_incoming_tun(struct multi_context *m, const unsigned int mpp_flags) { struct gc_arena gc = gc_new(); bool ret = true; if (BLEN(&m->top.c2.buf) > 0) { unsigned int mroute_flags; struct mroute_addr src, dest; const int dev_type = TUNNEL_TYPE(m->top.c1.tuntap); int16_t vid = 0; #ifdef ENABLE_PF struct mroute_addr esrc, *e1, *e2; if (dev_type == DEV_TYPE_TUN) { e1 = NULL; e2 = &src; } else { e1 = e2 = &esrc; mroute_addr_reset(&esrc); } #endif #ifdef MULTI_DEBUG_EVENT_LOOP printf("TUN -> TCP/UDP [%d]\n", BLEN(&m->top.c2.buf)); #endif if (m->pending) { return true; } if (dev_type == DEV_TYPE_TAP && m->top.options.vlan_tagging) { vid = vlan_decapsulate(&m->top, &m->top.c2.buf); if (vid < 0) { return false; } } /* * Route an incoming tun/tap packet to * the appropriate multi_instance object. */ mroute_flags = mroute_extract_addr_from_packet(&src, &dest, #ifdef ENABLE_PF e1, #else NULL, #endif NULL, vid, &m->top.c2.buf, dev_type); if (mroute_flags & MROUTE_EXTRACT_SUCCEEDED) { struct context *c; /* broadcast or multicast dest addr? */ if (mroute_flags & (MROUTE_EXTRACT_BCAST|MROUTE_EXTRACT_MCAST)) { /* for now, treat multicast as broadcast */ #ifdef ENABLE_PF multi_bcast(m, &m->top.c2.buf, NULL, e2, vid); #else multi_bcast(m, &m->top.c2.buf, NULL, NULL, vid); #endif } else { multi_set_pending(m, multi_get_instance_by_virtual_addr(m, &dest, dev_type == DEV_TYPE_TUN)); if (m->pending) { /* get instance context */ c = &m->pending->context; set_prefix(m->pending); #ifdef ENABLE_PF if (!pf_addr_test(&c->c2.pf, c, e2, "tun_tap_src_addr")) { msg(D_PF_DROPPED, "PF: addr[%s] -> client packet dropped by packet filter", mroute_addr_print_ex(&src, MAPF_SHOW_ARP, &gc)); buf_reset_len(&c->c2.buf); } else #endif { if (multi_output_queue_ready(m, m->pending)) { /* transfer packet pointer from top-level context buffer to instance */ c->c2.buf = m->top.c2.buf; } else { /* drop packet */ msg(D_MULTI_DROPPED, "MULTI: packet dropped due to output saturation (multi_process_incoming_tun)"); buf_reset_len(&c->c2.buf); } } /* encrypt in instance context */ process_incoming_tun(c); /* postprocess and set wakeup */ ret = multi_process_post(m, m->pending, mpp_flags); clear_prefix(); } } } } gc_free(&gc); return ret; } /* * Process a possible client-to-client/bcast/mcast message in the * queue. */ struct multi_instance * multi_get_queue(struct mbuf_set *ms) { struct mbuf_item item; if (mbuf_extract_item(ms, &item)) /* cleartext IP packet */ { unsigned int pip_flags = PIPV4_PASSTOS | PIPV6_IMCP_NOHOST_SERVER; set_prefix(item.instance); item.instance->context.c2.buf = item.buffer->buf; if (item.buffer->flags & MF_UNICAST) /* --mssfix doesn't make sense for broadcast or multicast */ { pip_flags |= PIP_MSSFIX; } process_ip_header(&item.instance->context, pip_flags, &item.instance->context.c2.buf); encrypt_sign(&item.instance->context, true); mbuf_free_buf(item.buffer); dmsg(D_MULTI_DEBUG, "MULTI: C2C/MCAST/BCAST"); clear_prefix(); return item.instance; } else { return NULL; } } /* * Called when an I/O wait times out. Usually means that a particular * client instance object needs timer-based service. */ bool multi_process_timeout(struct multi_context *m, const unsigned int mpp_flags) { bool ret = true; #ifdef MULTI_DEBUG_EVENT_LOOP printf("%s -> TIMEOUT\n", id(m->earliest_wakeup)); #endif /* instance marked for wakeup? */ if (m->earliest_wakeup) { if (m->earliest_wakeup == (struct multi_instance *)&m->deferred_shutdown_signal) { schedule_remove_entry(m->schedule, (struct schedule_entry *) &m->deferred_shutdown_signal); throw_signal(m->deferred_shutdown_signal.signal_received); } else { set_prefix(m->earliest_wakeup); ret = multi_process_post(m, m->earliest_wakeup, mpp_flags); clear_prefix(); } m->earliest_wakeup = NULL; } return ret; } /* * Drop a TUN/TAP outgoing packet.. */ void multi_process_drop_outgoing_tun(struct multi_context *m, const unsigned int mpp_flags) { struct multi_instance *mi = m->pending; ASSERT(mi); set_prefix(mi); msg(D_MULTI_ERRORS, "MULTI: Outgoing TUN queue full, dropped packet len=%d", mi->context.c2.to_tun.len); buf_reset(&mi->context.c2.to_tun); multi_process_post(m, mi, mpp_flags); clear_prefix(); } /* * Per-client route quota management */ void route_quota_exceeded(const struct multi_instance *mi) { struct gc_arena gc = gc_new(); msg(D_ROUTE_QUOTA, "MULTI ROUTE: route quota (%d) exceeded for %s (see --max-routes-per-client option)", mi->context.options.max_routes_per_client, multi_instance_string(mi, false, &gc)); gc_free(&gc); } #ifdef ENABLE_DEBUG /* * Flood clients with random packets */ static void gremlin_flood_clients(struct multi_context *m) { const int level = GREMLIN_PACKET_FLOOD_LEVEL(m->top.options.gremlin); if (level) { struct gc_arena gc = gc_new(); struct buffer buf = alloc_buf_gc(BUF_SIZE(&m->top.c2.frame), &gc); struct packet_flood_parms parm = get_packet_flood_parms(level); int i; ASSERT(buf_init(&buf, FRAME_HEADROOM(&m->top.c2.frame))); parm.packet_size = min_int(parm.packet_size, MAX_RW_SIZE_TUN(&m->top.c2.frame)); msg(D_GREMLIN, "GREMLIN_FLOOD_CLIENTS: flooding clients with %d packets of size %d", parm.n_packets, parm.packet_size); for (i = 0; i < parm.packet_size; ++i) { ASSERT(buf_write_u8(&buf, get_random() & 0xFF)); } for (i = 0; i < parm.n_packets; ++i) { multi_bcast(m, &buf, NULL, NULL, 0); } gc_free(&gc); } } #endif /* ifdef ENABLE_DEBUG */ static bool stale_route_check_trigger(struct multi_context *m) { struct timeval null; CLEAR(null); return event_timeout_trigger(&m->stale_routes_check_et, &null, ETT_DEFAULT); } /* * Process timers in the top-level context */ void multi_process_per_second_timers_dowork(struct multi_context *m) { /* possibly reap instances/routes in vhash */ multi_reap_process(m); /* possibly print to status log */ if (m->top.c1.status_output) { if (status_trigger(m->top.c1.status_output)) { multi_print_status(m, m->top.c1.status_output, m->status_file_version); } } /* possibly flush ifconfig-pool file */ multi_ifconfig_pool_persist(m, false); #ifdef ENABLE_DEBUG gremlin_flood_clients(m); #endif /* Should we check for stale routes? */ if (m->top.options.stale_routes_check_interval && stale_route_check_trigger(m)) { check_stale_routes(m); } } void multi_top_init(struct multi_context *m, const struct context *top) { inherit_context_top(&m->top, top); m->top.c2.buffers = init_context_buffers(&top->c2.frame); } void multi_top_free(struct multi_context *m) { close_context(&m->top, -1, CC_GC_FREE); free_context_buffers(m->top.c2.buffers); } static bool is_exit_restart(int sig) { return (sig == SIGUSR1 || sig == SIGTERM || sig == SIGHUP || sig == SIGINT); } static void multi_push_restart_schedule_exit(struct multi_context *m, bool next_server) { struct hash_iterator hi; struct hash_element *he; struct timeval tv; /* tell all clients to restart */ hash_iterator_init(m->iter, &hi); while ((he = hash_iterator_next(&hi))) { struct multi_instance *mi = (struct multi_instance *) he->value; if (!mi->halt) { send_control_channel_string(&mi->context, next_server ? "RESTART,[N]" : "RESTART", D_PUSH); multi_schedule_context_wakeup(m, mi); } } hash_iterator_free(&hi); /* reschedule signal */ ASSERT(!openvpn_gettimeofday(&m->deferred_shutdown_signal.wakeup, NULL)); tv.tv_sec = 2; tv.tv_usec = 0; tv_add(&m->deferred_shutdown_signal.wakeup, &tv); m->deferred_shutdown_signal.signal_received = m->top.sig->signal_received; schedule_add_entry(m->schedule, (struct schedule_entry *) &m->deferred_shutdown_signal, &m->deferred_shutdown_signal.wakeup, compute_wakeup_sigma(&m->deferred_shutdown_signal.wakeup)); m->top.sig->signal_received = 0; } /* * Return true if event loop should break, * false if it should continue. */ bool multi_process_signal(struct multi_context *m) { if (m->top.sig->signal_received == SIGUSR2) { struct status_output *so = status_open(NULL, 0, M_INFO, NULL, 0); multi_print_status(m, so, m->status_file_version); status_close(so); m->top.sig->signal_received = 0; return false; } else if (proto_is_dgram(m->top.options.ce.proto) && is_exit_restart(m->top.sig->signal_received) && (m->deferred_shutdown_signal.signal_received == 0) && m->top.options.ce.explicit_exit_notification != 0) { multi_push_restart_schedule_exit(m, m->top.options.ce.explicit_exit_notification == 2); return false; } return true; } /* * Called when an instance should be closed due to the * reception of a soft signal. */ void multi_close_instance_on_signal(struct multi_context *m, struct multi_instance *mi) { remap_signal(&mi->context); set_prefix(mi); print_signal(mi->context.sig, "client-instance", D_MULTI_LOW); clear_prefix(); multi_close_instance(m, mi, false); } static void multi_signal_instance(struct multi_context *m, struct multi_instance *mi, const int sig) { mi->context.sig->signal_received = sig; multi_close_instance_on_signal(m, mi); } /* * Management subsystem callbacks */ #ifdef ENABLE_MANAGEMENT static void management_callback_status(void *arg, const int version, struct status_output *so) { struct multi_context *m = (struct multi_context *) arg; if (!version) { multi_print_status(m, so, m->status_file_version); } else { multi_print_status(m, so, version); } } static int management_callback_n_clients(void *arg) { struct multi_context *m = (struct multi_context *) arg; return m->n_clients; } static int management_callback_kill_by_cn(void *arg, const char *del_cn) { struct multi_context *m = (struct multi_context *) arg; struct hash_iterator hi; struct hash_element *he; int count = 0; hash_iterator_init(m->iter, &hi); while ((he = hash_iterator_next(&hi))) { struct multi_instance *mi = (struct multi_instance *) he->value; if (!mi->halt) { const char *cn = tls_common_name(mi->context.c2.tls_multi, false); if (cn && !strcmp(cn, del_cn)) { multi_signal_instance(m, mi, SIGTERM); ++count; } } } hash_iterator_free(&hi); return count; } static int management_callback_kill_by_addr(void *arg, const in_addr_t addr, const int port) { struct multi_context *m = (struct multi_context *) arg; struct hash_iterator hi; struct hash_element *he; struct openvpn_sockaddr saddr; struct mroute_addr maddr; int count = 0; CLEAR(saddr); saddr.addr.in4.sin_family = AF_INET; saddr.addr.in4.sin_addr.s_addr = htonl(addr); saddr.addr.in4.sin_port = htons(port); if (mroute_extract_openvpn_sockaddr(&maddr, &saddr, true)) { hash_iterator_init(m->iter, &hi); while ((he = hash_iterator_next(&hi))) { struct multi_instance *mi = (struct multi_instance *) he->value; if (!mi->halt && mroute_addr_equal(&maddr, &mi->real)) { multi_signal_instance(m, mi, SIGTERM); ++count; } } hash_iterator_free(&hi); } return count; } static void management_delete_event(void *arg, event_t event) { struct multi_context *m = (struct multi_context *) arg; if (m->mtcp) { multi_tcp_delete_event(m->mtcp, event); } } #endif /* ifdef ENABLE_MANAGEMENT */ #ifdef MANAGEMENT_DEF_AUTH static struct multi_instance * lookup_by_cid(struct multi_context *m, const unsigned long cid) { if (m) { struct multi_instance *mi = (struct multi_instance *) hash_lookup(m->cid_hash, &cid); if (mi && !mi->halt) { return mi; } } return NULL; } static bool management_kill_by_cid(void *arg, const unsigned long cid, const char *kill_msg) { struct multi_context *m = (struct multi_context *) arg; struct multi_instance *mi = lookup_by_cid(m, cid); if (mi) { send_restart(&mi->context, kill_msg); /* was: multi_signal_instance (m, mi, SIGTERM); */ multi_schedule_context_wakeup(m, mi); return true; } else { return false; } } static bool management_client_auth(void *arg, const unsigned long cid, const unsigned int mda_key_id, const bool auth, const char *reason, const char *client_reason, struct buffer_list *cc_config) /* ownership transferred */ { struct multi_context *m = (struct multi_context *) arg; struct multi_instance *mi = lookup_by_cid(m, cid); bool cc_config_owned = true; bool ret = false; if (mi) { ret = tls_authenticate_key(mi->context.c2.tls_multi, mda_key_id, auth, client_reason); if (ret) { if (auth) { if (!mi->connection_established_flag) { set_cc_config(mi, cc_config); cc_config_owned = false; } } else { if (reason) { msg(D_MULTI_LOW, "MULTI: connection rejected: %s, CLI:%s", reason, np(client_reason)); } if (mi->connection_established_flag) { send_auth_failed(&mi->context, client_reason); /* mid-session reauth failed */ multi_schedule_context_wakeup(m, mi); } } } } if (cc_config_owned && cc_config) { buffer_list_free(cc_config); } return ret; } static char * management_get_peer_info(void *arg, const unsigned long cid) { struct multi_context *m = (struct multi_context *) arg; struct multi_instance *mi = lookup_by_cid(m, cid); char *ret = NULL; if (mi) { ret = tls_get_peer_info(mi->context.c2.tls_multi); } return ret; } #endif /* ifdef MANAGEMENT_DEF_AUTH */ #ifdef MANAGEMENT_PF static bool management_client_pf(void *arg, const unsigned long cid, struct buffer_list *pf_config) /* ownership transferred */ { struct multi_context *m = (struct multi_context *) arg; struct multi_instance *mi = lookup_by_cid(m, cid); bool ret = false; if (mi && pf_config) { ret = pf_load_from_buffer_list(&mi->context, pf_config); } if (pf_config) { buffer_list_free(pf_config); } return ret; } #endif /* ifdef MANAGEMENT_PF */ void init_management_callback_multi(struct multi_context *m) { #ifdef ENABLE_MANAGEMENT if (management) { struct management_callback cb; CLEAR(cb); cb.arg = m; cb.flags = MCF_SERVER; cb.status = management_callback_status; cb.show_net = management_show_net_callback; cb.kill_by_cn = management_callback_kill_by_cn; cb.kill_by_addr = management_callback_kill_by_addr; cb.delete_event = management_delete_event; cb.n_clients = management_callback_n_clients; #ifdef MANAGEMENT_DEF_AUTH cb.kill_by_cid = management_kill_by_cid; cb.client_auth = management_client_auth; cb.get_peer_info = management_get_peer_info; #endif #ifdef MANAGEMENT_PF cb.client_pf = management_client_pf; #endif management_set_callback(management, &cb); } #endif /* ifdef ENABLE_MANAGEMENT */ } /* * Top level event loop. */ void tunnel_server(struct context *top) { ASSERT(top->options.mode == MODE_SERVER); if (proto_is_dgram(top->options.ce.proto)) { tunnel_server_udp(top); } else { tunnel_server_tcp(top); } } #else /* if P2MP_SERVER */ static void dummy(void) { } #endif /* P2MP_SERVER */
./CrossVul/dataset_final_sorted/CWE-362/c/good_3971_0
crossvul-cpp_data_good_4933_1
#include <linux/init.h> #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/cpu.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/cache.h> #include <asm/apic.h> #include <asm/uv/uv.h> #include <linux/debugfs.h> /* * Smarter SMP flushing macros. * c/o Linus Torvalds. * * These mean you can really definitely utterly forget about * writing to user space from interrupts. (Its not allowed anyway). * * Optimizations Manfred Spraul <manfred@colorfullife.com> * * More scalable flush, from Andi Kleen * * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi */ struct flush_tlb_info { struct mm_struct *flush_mm; unsigned long flush_start; unsigned long flush_end; }; /* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. */ void leave_mm(int cpu) { struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) BUG(); if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); load_cr3(swapper_pg_dir); /* * This gets called in the idle path where RCU * functions differently. Tracing normally * uses RCU, so we have to call the tracepoint * specially here. */ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } } EXPORT_SYMBOL_GPL(leave_mm); /* * The flush IPI assumes that a thread switch happens in this order: * [cpu0: the cpu that switches] * 1) switch_mm() either 1a) or 1b) * 1a) thread switch to a different mm * 1a1) set cpu_tlbstate to TLBSTATE_OK * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm * if cpu0 was in lazy tlb mode. * 1a2) update cpu active_mm * Now cpu0 accepts tlb flushes for the new mm. * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); * Now the other cpus will send tlb flush ipis. * 1a4) change cr3. * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); * Stop ipi delivery for the old mm. This is not synchronized with * the other cpus, but flush_tlb_func ignore flush ipis for the wrong * mm, and in the worst case we perform a superfluous tlb flush. * 1b) thread switch without mm change * cpu active_mm is correct, cpu0 already handles flush ipis. * 1b1) set cpu_tlbstate to TLBSTATE_OK * 1b2) test_and_set the cpu bit in cpu_vm_mask. * Atomically set the bit [other cpus will start sending flush ipis], * and test the bit. * 1b3) if the bit was 0: leave_mm was called, flush the tlb. * 2) switch %%esp, ie current * * The interrupt must handle 2 special cases: * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. * - the cpu performs speculative tlb reads, i.e. even if the cpu only * runs in kernel space, the cpu could load tlb entries for user space * pages. * * The good news is that cpu_tlbstate is local to each cpu, no * write/read ordering problems. */ /* * TLB flush funcation: * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. * 2) Leave the mm if we are in the lazy tlb mode. */ static void flush_tlb_func(void *info) { struct flush_tlb_info *f = info; inc_irq_stat(irq_tlb_count); if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) return; if (!f->flush_end) f->flush_end = f->flush_start + PAGE_SIZE; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (f->flush_end == TLB_FLUSH_ALL) { local_flush_tlb(); trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); } else { unsigned long addr; unsigned long nr_pages = (f->flush_end - f->flush_start) / PAGE_SIZE; addr = f->flush_start; while (addr < f->flush_end) { __flush_tlb_single(addr); addr += PAGE_SIZE; } trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); } } else leave_mm(smp_processor_id()); } void native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long start, unsigned long end) { struct flush_tlb_info info; info.flush_mm = mm; info.flush_start = start; info.flush_end = end; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start); if (is_uv_system()) { unsigned int cpu; cpu = smp_processor_id(); cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); if (cpumask) smp_call_function_many(cpumask, flush_tlb_func, &info, 1); return; } smp_call_function_many(cpumask, flush_tlb_func, &info, 1); } void flush_tlb_current_task(void) { struct mm_struct *mm = current->mm; preempt_disable(); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); /* This is an implicit full barrier that synchronizes with switch_mm. */ local_flush_tlb(); trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); preempt_enable(); } /* * See Documentation/x86/tlb.txt for details. We choose 33 * because it is large enough to cover the vast majority (at * least 95%) of allocations, and is small enough that we are * confident it will not cause too much overhead. Each single * flush is about 100 ns, so this caps the maximum overhead at * _about_ 3,000 ns. * * This is in units of pages. */ static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { unsigned long addr; /* do a global flush by default */ unsigned long base_pages_to_flush = TLB_FLUSH_ALL; preempt_disable(); if (current->active_mm != mm) { /* Synchronize with switch_mm. */ smp_mb(); goto out; } if (!current->mm) { leave_mm(smp_processor_id()); /* Synchronize with switch_mm. */ smp_mb(); goto out; } if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) base_pages_to_flush = (end - start) >> PAGE_SHIFT; /* * Both branches below are implicit full barriers (MOV to CR or * INVLPG) that synchronize with switch_mm. */ if (base_pages_to_flush > tlb_single_page_flush_ceiling) { base_pages_to_flush = TLB_FLUSH_ALL; count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); } else { /* flush range by one by one 'invlpg' */ for (addr = start; addr < end; addr += PAGE_SIZE) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } } trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); out: if (base_pages_to_flush == TLB_FLUSH_ALL) { start = 0UL; end = TLB_FLUSH_ALL; } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, end); preempt_enable(); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) { struct mm_struct *mm = vma->vm_mm; preempt_disable(); if (current->active_mm == mm) { if (current->mm) { /* * Implicit full barrier (INVLPG) that synchronizes * with switch_mm. */ __flush_tlb_one(start); } else { leave_mm(smp_processor_id()); /* Synchronize with switch_mm. */ smp_mb(); } } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); preempt_enable(); } static void do_flush_tlb_all(void *info) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); __flush_tlb_all(); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) leave_mm(smp_processor_id()); } void flush_tlb_all(void) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); on_each_cpu(do_flush_tlb_all, NULL, 1); } static void do_kernel_range_flush(void *info) { struct flush_tlb_info *f = info; unsigned long addr; /* flush range by one by one 'invlpg' */ for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) __flush_tlb_single(addr); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { /* Balance as user space task's flush, a bit conservative */ if (end == TLB_FLUSH_ALL || (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { on_each_cpu(do_flush_tlb_all, NULL, 1); } else { struct flush_tlb_info info; info.flush_start = start; info.flush_end = end; on_each_cpu(do_kernel_range_flush, &info, 1); } } static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; unsigned int len; len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t tlbflush_write_file(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; ssize_t len; int ceiling; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (kstrtoint(buf, 0, &ceiling)) return -EINVAL; if (ceiling < 0) return -EINVAL; tlb_single_page_flush_ceiling = ceiling; return count; } static const struct file_operations fops_tlbflush = { .read = tlbflush_read_file, .write = tlbflush_write_file, .llseek = default_llseek, }; static int __init create_tlb_single_page_flush_ceiling(void) { debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, arch_debugfs_dir, NULL, &fops_tlbflush); return 0; } late_initcall(create_tlb_single_page_flush_ceiling);
./CrossVul/dataset_final_sorted/CWE-362/c/good_4933_1
crossvul-cpp_data_bad_5188_0
/* * cros_ec_dev - expose the Chrome OS Embedded Controller to user-space * * Copyright (C) 2014 Google, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/fs.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/uaccess.h> #include "cros_ec_dev.h" /* Device variables */ #define CROS_MAX_DEV 128 static int ec_major; static const struct attribute_group *cros_ec_groups[] = { &cros_ec_attr_group, &cros_ec_lightbar_attr_group, &cros_ec_vbc_attr_group, NULL, }; static struct class cros_class = { .owner = THIS_MODULE, .name = "chromeos", .dev_groups = cros_ec_groups, }; /* Basic communication */ static int ec_get_version(struct cros_ec_dev *ec, char *str, int maxlen) { struct ec_response_get_version *resp; static const char * const current_image_name[] = { "unknown", "read-only", "read-write", "invalid", }; struct cros_ec_command *msg; int ret; msg = kmalloc(sizeof(*msg) + sizeof(*resp), GFP_KERNEL); if (!msg) return -ENOMEM; msg->version = 0; msg->command = EC_CMD_GET_VERSION + ec->cmd_offset; msg->insize = sizeof(*resp); msg->outsize = 0; ret = cros_ec_cmd_xfer(ec->ec_dev, msg); if (ret < 0) goto exit; if (msg->result != EC_RES_SUCCESS) { snprintf(str, maxlen, "%s\nUnknown EC version: EC returned %d\n", CROS_EC_DEV_VERSION, msg->result); ret = -EINVAL; goto exit; } resp = (struct ec_response_get_version *)msg->data; if (resp->current_image >= ARRAY_SIZE(current_image_name)) resp->current_image = 3; /* invalid */ snprintf(str, maxlen, "%s\n%s\n%s\n%s\n", CROS_EC_DEV_VERSION, resp->version_string_ro, resp->version_string_rw, current_image_name[resp->current_image]); ret = 0; exit: kfree(msg); return ret; } /* Device file ops */ static int ec_device_open(struct inode *inode, struct file *filp) { struct cros_ec_dev *ec = container_of(inode->i_cdev, struct cros_ec_dev, cdev); filp->private_data = ec; nonseekable_open(inode, filp); return 0; } static int ec_device_release(struct inode *inode, struct file *filp) { return 0; } static ssize_t ec_device_read(struct file *filp, char __user *buffer, size_t length, loff_t *offset) { struct cros_ec_dev *ec = filp->private_data; char msg[sizeof(struct ec_response_get_version) + sizeof(CROS_EC_DEV_VERSION)]; size_t count; int ret; if (*offset != 0) return 0; ret = ec_get_version(ec, msg, sizeof(msg)); if (ret) return ret; count = min(length, strlen(msg)); if (copy_to_user(buffer, msg, count)) return -EFAULT; *offset = count; return count; } /* Ioctls */ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg) { long ret; struct cros_ec_command u_cmd; struct cros_ec_command *s_cmd; if (copy_from_user(&u_cmd, arg, sizeof(u_cmd))) return -EFAULT; if ((u_cmd.outsize > EC_MAX_MSG_BYTES) || (u_cmd.insize > EC_MAX_MSG_BYTES)) return -EINVAL; s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize), GFP_KERNEL); if (!s_cmd) return -ENOMEM; if (copy_from_user(s_cmd, arg, sizeof(*s_cmd) + u_cmd.outsize)) { ret = -EFAULT; goto exit; } s_cmd->command += ec->cmd_offset; ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd); /* Only copy data to userland if data was received. */ if (ret < 0) goto exit; if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize)) ret = -EFAULT; exit: kfree(s_cmd); return ret; } static long ec_device_ioctl_readmem(struct cros_ec_dev *ec, void __user *arg) { struct cros_ec_device *ec_dev = ec->ec_dev; struct cros_ec_readmem s_mem = { }; long num; /* Not every platform supports direct reads */ if (!ec_dev->cmd_readmem) return -ENOTTY; if (copy_from_user(&s_mem, arg, sizeof(s_mem))) return -EFAULT; num = ec_dev->cmd_readmem(ec_dev, s_mem.offset, s_mem.bytes, s_mem.buffer); if (num <= 0) return num; if (copy_to_user((void __user *)arg, &s_mem, sizeof(s_mem))) return -EFAULT; return 0; } static long ec_device_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct cros_ec_dev *ec = filp->private_data; if (_IOC_TYPE(cmd) != CROS_EC_DEV_IOC) return -ENOTTY; switch (cmd) { case CROS_EC_DEV_IOCXCMD: return ec_device_ioctl_xcmd(ec, (void __user *)arg); case CROS_EC_DEV_IOCRDMEM: return ec_device_ioctl_readmem(ec, (void __user *)arg); } return -ENOTTY; } /* Module initialization */ static const struct file_operations fops = { .open = ec_device_open, .release = ec_device_release, .read = ec_device_read, .unlocked_ioctl = ec_device_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ec_device_ioctl, #endif }; static void __remove(struct device *dev) { struct cros_ec_dev *ec = container_of(dev, struct cros_ec_dev, class_dev); kfree(ec); } static int ec_device_probe(struct platform_device *pdev) { int retval = -ENOMEM; struct device *dev = &pdev->dev; struct cros_ec_platform *ec_platform = dev_get_platdata(dev); dev_t devno = MKDEV(ec_major, pdev->id); struct cros_ec_dev *ec = kzalloc(sizeof(*ec), GFP_KERNEL); if (!ec) return retval; dev_set_drvdata(dev, ec); ec->ec_dev = dev_get_drvdata(dev->parent); ec->dev = dev; ec->cmd_offset = ec_platform->cmd_offset; device_initialize(&ec->class_dev); cdev_init(&ec->cdev, &fops); /* * Add the character device * Link cdev to the class device to be sure device is not used * before unbinding it. */ ec->cdev.kobj.parent = &ec->class_dev.kobj; retval = cdev_add(&ec->cdev, devno, 1); if (retval) { dev_err(dev, ": failed to add character device\n"); goto cdev_add_failed; } /* * Add the class device * Link to the character device for creating the /dev entry * in devtmpfs. */ ec->class_dev.devt = ec->cdev.dev; ec->class_dev.class = &cros_class; ec->class_dev.parent = dev; ec->class_dev.release = __remove; retval = dev_set_name(&ec->class_dev, "%s", ec_platform->ec_name); if (retval) { dev_err(dev, "dev_set_name failed => %d\n", retval); goto set_named_failed; } retval = device_add(&ec->class_dev); if (retval) { dev_err(dev, "device_register failed => %d\n", retval); goto dev_reg_failed; } return 0; dev_reg_failed: set_named_failed: dev_set_drvdata(dev, NULL); cdev_del(&ec->cdev); cdev_add_failed: kfree(ec); return retval; } static int ec_device_remove(struct platform_device *pdev) { struct cros_ec_dev *ec = dev_get_drvdata(&pdev->dev); cdev_del(&ec->cdev); device_unregister(&ec->class_dev); return 0; } static const struct platform_device_id cros_ec_id[] = { { "cros-ec-ctl", 0 }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(platform, cros_ec_id); static struct platform_driver cros_ec_dev_driver = { .driver = { .name = "cros-ec-ctl", }, .probe = ec_device_probe, .remove = ec_device_remove, }; static int __init cros_ec_dev_init(void) { int ret; dev_t dev = 0; ret = class_register(&cros_class); if (ret) { pr_err(CROS_EC_DEV_NAME ": failed to register device class\n"); return ret; } /* Get a range of minor numbers (starting with 0) to work with */ ret = alloc_chrdev_region(&dev, 0, CROS_MAX_DEV, CROS_EC_DEV_NAME); if (ret < 0) { pr_err(CROS_EC_DEV_NAME ": alloc_chrdev_region() failed\n"); goto failed_chrdevreg; } ec_major = MAJOR(dev); /* Register the driver */ ret = platform_driver_register(&cros_ec_dev_driver); if (ret < 0) { pr_warn(CROS_EC_DEV_NAME ": can't register driver: %d\n", ret); goto failed_devreg; } return 0; failed_devreg: unregister_chrdev_region(MKDEV(ec_major, 0), CROS_MAX_DEV); failed_chrdevreg: class_unregister(&cros_class); return ret; } static void __exit cros_ec_dev_exit(void) { platform_driver_unregister(&cros_ec_dev_driver); unregister_chrdev(ec_major, CROS_EC_DEV_NAME); class_unregister(&cros_class); } module_init(cros_ec_dev_init); module_exit(cros_ec_dev_exit); MODULE_AUTHOR("Bill Richardson <wfrichar@chromium.org>"); MODULE_DESCRIPTION("Userspace interface to the Chrome OS Embedded Controller"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-362/c/bad_5188_0
crossvul-cpp_data_good_829_1
// SPDX-License-Identifier: GPL-2.0 #include <linux/mm.h> #include <linux/vmacache.h> #include <linux/hugetlb.h> #include <linux/huge_mm.h> #include <linux/mount.h> #include <linux/seq_file.h> #include <linux/highmem.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/mempolicy.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/sched/mm.h> #include <linux/swapops.h> #include <linux/mmu_notifier.h> #include <linux/page_idle.h> #include <linux/shmem_fs.h> #include <linux/uaccess.h> #include <linux/pkeys.h> #include <asm/elf.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include "internal.h" #define SEQ_PUT_DEC(str, val) \ seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8) void task_mem(struct seq_file *m, struct mm_struct *mm) { unsigned long text, lib, swap, anon, file, shmem; unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; anon = get_mm_counter(mm, MM_ANONPAGES); file = get_mm_counter(mm, MM_FILEPAGES); shmem = get_mm_counter(mm, MM_SHMEMPAGES); /* * Note: to minimize their overhead, mm maintains hiwater_vm and * hiwater_rss only when about to *lower* total_vm or rss. Any * collector of these hiwater stats must therefore get total_vm * and rss too, which will usually be the higher. Barriers? not * worth the effort, such snapshots can always be inconsistent. */ hiwater_vm = total_vm = mm->total_vm; if (hiwater_vm < mm->hiwater_vm) hiwater_vm = mm->hiwater_vm; hiwater_rss = total_rss = anon + file + shmem; if (hiwater_rss < mm->hiwater_rss) hiwater_rss = mm->hiwater_rss; /* split executable areas between text and lib */ text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK); text = min(text, mm->exec_vm << PAGE_SHIFT); lib = (mm->exec_vm << PAGE_SHIFT) - text; swap = get_mm_counter(mm, MM_SWAPENTS); SEQ_PUT_DEC("VmPeak:\t", hiwater_vm); SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm); SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm); SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm)); SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss); SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss); SEQ_PUT_DEC(" kB\nRssAnon:\t", anon); SEQ_PUT_DEC(" kB\nRssFile:\t", file); SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem); SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm); SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm); seq_put_decimal_ull_width(m, " kB\nVmExe:\t", text >> 10, 8); seq_put_decimal_ull_width(m, " kB\nVmLib:\t", lib >> 10, 8); seq_put_decimal_ull_width(m, " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8); SEQ_PUT_DEC(" kB\nVmSwap:\t", swap); seq_puts(m, " kB\n"); hugetlb_report_usage(m, mm); } #undef SEQ_PUT_DEC unsigned long task_vsize(struct mm_struct *mm) { return PAGE_SIZE * mm->total_vm; } unsigned long task_statm(struct mm_struct *mm, unsigned long *shared, unsigned long *text, unsigned long *data, unsigned long *resident) { *shared = get_mm_counter(mm, MM_FILEPAGES) + get_mm_counter(mm, MM_SHMEMPAGES); *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> PAGE_SHIFT; *data = mm->data_vm + mm->stack_vm; *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); return mm->total_vm; } #ifdef CONFIG_NUMA /* * Save get_task_policy() for show_numa_map(). */ static void hold_task_mempolicy(struct proc_maps_private *priv) { struct task_struct *task = priv->task; task_lock(task); priv->task_mempolicy = get_task_policy(task); mpol_get(priv->task_mempolicy); task_unlock(task); } static void release_task_mempolicy(struct proc_maps_private *priv) { mpol_put(priv->task_mempolicy); } #else static void hold_task_mempolicy(struct proc_maps_private *priv) { } static void release_task_mempolicy(struct proc_maps_private *priv) { } #endif static void vma_stop(struct proc_maps_private *priv) { struct mm_struct *mm = priv->mm; release_task_mempolicy(priv); up_read(&mm->mmap_sem); mmput(mm); } static struct vm_area_struct * m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma) { if (vma == priv->tail_vma) return NULL; return vma->vm_next ?: priv->tail_vma; } static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma) { if (m->count < m->size) /* vma is copied successfully */ m->version = m_next_vma(m->private, vma) ? vma->vm_end : -1UL; } static void *m_start(struct seq_file *m, loff_t *ppos) { struct proc_maps_private *priv = m->private; unsigned long last_addr = m->version; struct mm_struct *mm; struct vm_area_struct *vma; unsigned int pos = *ppos; /* See m_cache_vma(). Zero at the start or after lseek. */ if (last_addr == -1UL) return NULL; priv->task = get_proc_task(priv->inode); if (!priv->task) return ERR_PTR(-ESRCH); mm = priv->mm; if (!mm || !mmget_not_zero(mm)) return NULL; down_read(&mm->mmap_sem); hold_task_mempolicy(priv); priv->tail_vma = get_gate_vma(mm); if (last_addr) { vma = find_vma(mm, last_addr - 1); if (vma && vma->vm_start <= last_addr) vma = m_next_vma(priv, vma); if (vma) return vma; } m->version = 0; if (pos < mm->map_count) { for (vma = mm->mmap; pos; pos--) { m->version = vma->vm_start; vma = vma->vm_next; } return vma; } /* we do not bother to update m->version in this case */ if (pos == mm->map_count && priv->tail_vma) return priv->tail_vma; vma_stop(priv); return NULL; } static void *m_next(struct seq_file *m, void *v, loff_t *pos) { struct proc_maps_private *priv = m->private; struct vm_area_struct *next; (*pos)++; next = m_next_vma(priv, v); if (!next) vma_stop(priv); return next; } static void m_stop(struct seq_file *m, void *v) { struct proc_maps_private *priv = m->private; if (!IS_ERR_OR_NULL(v)) vma_stop(priv); if (priv->task) { put_task_struct(priv->task); priv->task = NULL; } } static int proc_maps_open(struct inode *inode, struct file *file, const struct seq_operations *ops, int psize) { struct proc_maps_private *priv = __seq_open_private(file, ops, psize); if (!priv) return -ENOMEM; priv->inode = inode; priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); if (IS_ERR(priv->mm)) { int err = PTR_ERR(priv->mm); seq_release_private(inode, file); return err; } return 0; } static int proc_map_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; struct proc_maps_private *priv = seq->private; if (priv->mm) mmdrop(priv->mm); return seq_release_private(inode, file); } static int do_maps_open(struct inode *inode, struct file *file, const struct seq_operations *ops) { return proc_maps_open(inode, file, ops, sizeof(struct proc_maps_private)); } /* * Indicate if the VMA is a stack for the given task; for * /proc/PID/maps that is the stack of the main task. */ static int is_stack(struct vm_area_struct *vma) { /* * We make no effort to guess what a given thread considers to be * its "stack". It's not even well-defined for programs written * languages like Go. */ return vma->vm_start <= vma->vm_mm->start_stack && vma->vm_end >= vma->vm_mm->start_stack; } static void show_vma_header_prefix(struct seq_file *m, unsigned long start, unsigned long end, vm_flags_t flags, unsigned long long pgoff, dev_t dev, unsigned long ino) { seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); seq_put_hex_ll(m, NULL, start, 8); seq_put_hex_ll(m, "-", end, 8); seq_putc(m, ' '); seq_putc(m, flags & VM_READ ? 'r' : '-'); seq_putc(m, flags & VM_WRITE ? 'w' : '-'); seq_putc(m, flags & VM_EXEC ? 'x' : '-'); seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p'); seq_put_hex_ll(m, " ", pgoff, 8); seq_put_hex_ll(m, " ", MAJOR(dev), 2); seq_put_hex_ll(m, ":", MINOR(dev), 2); seq_put_decimal_ull(m, " ", ino); seq_putc(m, ' '); } static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) { struct mm_struct *mm = vma->vm_mm; struct file *file = vma->vm_file; vm_flags_t flags = vma->vm_flags; unsigned long ino = 0; unsigned long long pgoff = 0; unsigned long start, end; dev_t dev = 0; const char *name = NULL; if (file) { struct inode *inode = file_inode(vma->vm_file); dev = inode->i_sb->s_dev; ino = inode->i_ino; pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; } start = vma->vm_start; end = vma->vm_end; show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino); /* * Print the dentry name for named mappings, and a * special [heap] marker for the heap: */ if (file) { seq_pad(m, ' '); seq_file_path(m, file, "\n"); goto done; } if (vma->vm_ops && vma->vm_ops->name) { name = vma->vm_ops->name(vma); if (name) goto done; } name = arch_vma_name(vma); if (!name) { if (!mm) { name = "[vdso]"; goto done; } if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { name = "[heap]"; goto done; } if (is_stack(vma)) name = "[stack]"; } done: if (name) { seq_pad(m, ' '); seq_puts(m, name); } seq_putc(m, '\n'); } static int show_map(struct seq_file *m, void *v) { show_map_vma(m, v); m_cache_vma(m, v); return 0; } static const struct seq_operations proc_pid_maps_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = show_map }; static int pid_maps_open(struct inode *inode, struct file *file) { return do_maps_open(inode, file, &proc_pid_maps_op); } const struct file_operations proc_pid_maps_operations = { .open = pid_maps_open, .read = seq_read, .llseek = seq_lseek, .release = proc_map_release, }; /* * Proportional Set Size(PSS): my share of RSS. * * PSS of a process is the count of pages it has in memory, where each * page is divided by the number of processes sharing it. So if a * process has 1000 pages all to itself, and 1000 shared with one other * process, its PSS will be 1500. * * To keep (accumulated) division errors low, we adopt a 64bit * fixed-point pss counter to minimize division errors. So (pss >> * PSS_SHIFT) would be the real byte count. * * A shift of 12 before division means (assuming 4K page size): * - 1M 3-user-pages add up to 8KB errors; * - supports mapcount up to 2^24, or 16M; * - supports PSS up to 2^52 bytes, or 4PB. */ #define PSS_SHIFT 12 #ifdef CONFIG_PROC_PAGE_MONITOR struct mem_size_stats { unsigned long resident; unsigned long shared_clean; unsigned long shared_dirty; unsigned long private_clean; unsigned long private_dirty; unsigned long referenced; unsigned long anonymous; unsigned long lazyfree; unsigned long anonymous_thp; unsigned long shmem_thp; unsigned long swap; unsigned long shared_hugetlb; unsigned long private_hugetlb; u64 pss; u64 pss_locked; u64 swap_pss; bool check_shmem_swap; }; static void smaps_account(struct mem_size_stats *mss, struct page *page, bool compound, bool young, bool dirty, bool locked) { int i, nr = compound ? 1 << compound_order(page) : 1; unsigned long size = nr * PAGE_SIZE; if (PageAnon(page)) { mss->anonymous += size; if (!PageSwapBacked(page) && !dirty && !PageDirty(page)) mss->lazyfree += size; } mss->resident += size; /* Accumulate the size in pages that have been accessed. */ if (young || page_is_young(page) || PageReferenced(page)) mss->referenced += size; /* * page_count(page) == 1 guarantees the page is mapped exactly once. * If any subpage of the compound page mapped with PTE it would elevate * page_count(). */ if (page_count(page) == 1) { if (dirty || PageDirty(page)) mss->private_dirty += size; else mss->private_clean += size; mss->pss += (u64)size << PSS_SHIFT; if (locked) mss->pss_locked += (u64)size << PSS_SHIFT; return; } for (i = 0; i < nr; i++, page++) { int mapcount = page_mapcount(page); unsigned long pss = (PAGE_SIZE << PSS_SHIFT); if (mapcount >= 2) { if (dirty || PageDirty(page)) mss->shared_dirty += PAGE_SIZE; else mss->shared_clean += PAGE_SIZE; mss->pss += pss / mapcount; if (locked) mss->pss_locked += pss / mapcount; } else { if (dirty || PageDirty(page)) mss->private_dirty += PAGE_SIZE; else mss->private_clean += PAGE_SIZE; mss->pss += pss; if (locked) mss->pss_locked += pss; } } } #ifdef CONFIG_SHMEM static int smaps_pte_hole(unsigned long addr, unsigned long end, struct mm_walk *walk) { struct mem_size_stats *mss = walk->private; mss->swap += shmem_partial_swap_usage( walk->vma->vm_file->f_mapping, addr, end); return 0; } #endif static void smaps_pte_entry(pte_t *pte, unsigned long addr, struct mm_walk *walk) { struct mem_size_stats *mss = walk->private; struct vm_area_struct *vma = walk->vma; bool locked = !!(vma->vm_flags & VM_LOCKED); struct page *page = NULL; if (pte_present(*pte)) { page = vm_normal_page(vma, addr, *pte); } else if (is_swap_pte(*pte)) { swp_entry_t swpent = pte_to_swp_entry(*pte); if (!non_swap_entry(swpent)) { int mapcount; mss->swap += PAGE_SIZE; mapcount = swp_swapcount(swpent); if (mapcount >= 2) { u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT; do_div(pss_delta, mapcount); mss->swap_pss += pss_delta; } else { mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; } } else if (is_migration_entry(swpent)) page = migration_entry_to_page(swpent); else if (is_device_private_entry(swpent)) page = device_private_entry_to_page(swpent); } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap && pte_none(*pte))) { page = find_get_entry(vma->vm_file->f_mapping, linear_page_index(vma, addr)); if (!page) return; if (xa_is_value(page)) mss->swap += PAGE_SIZE; else put_page(page); return; } if (!page) return; smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, struct mm_walk *walk) { struct mem_size_stats *mss = walk->private; struct vm_area_struct *vma = walk->vma; bool locked = !!(vma->vm_flags & VM_LOCKED); struct page *page; /* FOLL_DUMP will return -EFAULT on huge zero page */ page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); if (IS_ERR_OR_NULL(page)) return; if (PageAnon(page)) mss->anonymous_thp += HPAGE_PMD_SIZE; else if (PageSwapBacked(page)) mss->shmem_thp += HPAGE_PMD_SIZE; else if (is_zone_device_page(page)) /* pass */; else VM_BUG_ON_PAGE(1, page); smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked); } #else static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, struct mm_walk *walk) { } #endif static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; pte_t *pte; spinlock_t *ptl; ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { if (pmd_present(*pmd)) smaps_pmd_entry(pmd, addr, walk); spin_unlock(ptl); goto out; } if (pmd_trans_unstable(pmd)) goto out; /* * The mmap_sem held all the way back in m_start() is what * keeps khugepaged out of here and from collapsing things * in here. */ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) smaps_pte_entry(pte, addr, walk); pte_unmap_unlock(pte - 1, ptl); out: cond_resched(); return 0; } static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) { /* * Don't forget to update Documentation/ on changes. */ static const char mnemonics[BITS_PER_LONG][2] = { /* * In case if we meet a flag we don't know about. */ [0 ... (BITS_PER_LONG-1)] = "??", [ilog2(VM_READ)] = "rd", [ilog2(VM_WRITE)] = "wr", [ilog2(VM_EXEC)] = "ex", [ilog2(VM_SHARED)] = "sh", [ilog2(VM_MAYREAD)] = "mr", [ilog2(VM_MAYWRITE)] = "mw", [ilog2(VM_MAYEXEC)] = "me", [ilog2(VM_MAYSHARE)] = "ms", [ilog2(VM_GROWSDOWN)] = "gd", [ilog2(VM_PFNMAP)] = "pf", [ilog2(VM_DENYWRITE)] = "dw", #ifdef CONFIG_X86_INTEL_MPX [ilog2(VM_MPX)] = "mp", #endif [ilog2(VM_LOCKED)] = "lo", [ilog2(VM_IO)] = "io", [ilog2(VM_SEQ_READ)] = "sr", [ilog2(VM_RAND_READ)] = "rr", [ilog2(VM_DONTCOPY)] = "dc", [ilog2(VM_DONTEXPAND)] = "de", [ilog2(VM_ACCOUNT)] = "ac", [ilog2(VM_NORESERVE)] = "nr", [ilog2(VM_HUGETLB)] = "ht", [ilog2(VM_SYNC)] = "sf", [ilog2(VM_ARCH_1)] = "ar", [ilog2(VM_WIPEONFORK)] = "wf", [ilog2(VM_DONTDUMP)] = "dd", #ifdef CONFIG_MEM_SOFT_DIRTY [ilog2(VM_SOFTDIRTY)] = "sd", #endif [ilog2(VM_MIXEDMAP)] = "mm", [ilog2(VM_HUGEPAGE)] = "hg", [ilog2(VM_NOHUGEPAGE)] = "nh", [ilog2(VM_MERGEABLE)] = "mg", [ilog2(VM_UFFD_MISSING)]= "um", [ilog2(VM_UFFD_WP)] = "uw", #ifdef CONFIG_ARCH_HAS_PKEYS /* These come out via ProtectionKey: */ [ilog2(VM_PKEY_BIT0)] = "", [ilog2(VM_PKEY_BIT1)] = "", [ilog2(VM_PKEY_BIT2)] = "", [ilog2(VM_PKEY_BIT3)] = "", #if VM_PKEY_BIT4 [ilog2(VM_PKEY_BIT4)] = "", #endif #endif /* CONFIG_ARCH_HAS_PKEYS */ }; size_t i; seq_puts(m, "VmFlags: "); for (i = 0; i < BITS_PER_LONG; i++) { if (!mnemonics[i][0]) continue; if (vma->vm_flags & (1UL << i)) { seq_putc(m, mnemonics[i][0]); seq_putc(m, mnemonics[i][1]); seq_putc(m, ' '); } } seq_putc(m, '\n'); } #ifdef CONFIG_HUGETLB_PAGE static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct mem_size_stats *mss = walk->private; struct vm_area_struct *vma = walk->vma; struct page *page = NULL; if (pte_present(*pte)) { page = vm_normal_page(vma, addr, *pte); } else if (is_swap_pte(*pte)) { swp_entry_t swpent = pte_to_swp_entry(*pte); if (is_migration_entry(swpent)) page = migration_entry_to_page(swpent); else if (is_device_private_entry(swpent)) page = device_private_entry_to_page(swpent); } if (page) { int mapcount = page_mapcount(page); if (mapcount >= 2) mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); else mss->private_hugetlb += huge_page_size(hstate_vma(vma)); } return 0; } #endif /* HUGETLB_PAGE */ static void smap_gather_stats(struct vm_area_struct *vma, struct mem_size_stats *mss) { struct mm_walk smaps_walk = { .pmd_entry = smaps_pte_range, #ifdef CONFIG_HUGETLB_PAGE .hugetlb_entry = smaps_hugetlb_range, #endif .mm = vma->vm_mm, }; smaps_walk.private = mss; #ifdef CONFIG_SHMEM /* In case of smaps_rollup, reset the value from previous vma */ mss->check_shmem_swap = false; if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) { /* * For shared or readonly shmem mappings we know that all * swapped out pages belong to the shmem object, and we can * obtain the swap value much more efficiently. For private * writable mappings, we might have COW pages that are * not affected by the parent swapped out pages of the shmem * object, so we have to distinguish them during the page walk. * Unless we know that the shmem object (or the part mapped by * our VMA) has no swapped out pages at all. */ unsigned long shmem_swapped = shmem_swap_usage(vma); if (!shmem_swapped || (vma->vm_flags & VM_SHARED) || !(vma->vm_flags & VM_WRITE)) { mss->swap += shmem_swapped; } else { mss->check_shmem_swap = true; smaps_walk.pte_hole = smaps_pte_hole; } } #endif /* mmap_sem is held in m_start */ walk_page_vma(vma, &smaps_walk); } #define SEQ_PUT_DEC(str, val) \ seq_put_decimal_ull_width(m, str, (val) >> 10, 8) /* Show the contents common for smaps and smaps_rollup */ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss) { SEQ_PUT_DEC("Rss: ", mss->resident); SEQ_PUT_DEC(" kB\nPss: ", mss->pss >> PSS_SHIFT); SEQ_PUT_DEC(" kB\nShared_Clean: ", mss->shared_clean); SEQ_PUT_DEC(" kB\nShared_Dirty: ", mss->shared_dirty); SEQ_PUT_DEC(" kB\nPrivate_Clean: ", mss->private_clean); SEQ_PUT_DEC(" kB\nPrivate_Dirty: ", mss->private_dirty); SEQ_PUT_DEC(" kB\nReferenced: ", mss->referenced); SEQ_PUT_DEC(" kB\nAnonymous: ", mss->anonymous); SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree); SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp); SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp); SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb); seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ", mss->private_hugetlb >> 10, 7); SEQ_PUT_DEC(" kB\nSwap: ", mss->swap); SEQ_PUT_DEC(" kB\nSwapPss: ", mss->swap_pss >> PSS_SHIFT); SEQ_PUT_DEC(" kB\nLocked: ", mss->pss_locked >> PSS_SHIFT); seq_puts(m, " kB\n"); } static int show_smap(struct seq_file *m, void *v) { struct vm_area_struct *vma = v; struct mem_size_stats mss; memset(&mss, 0, sizeof(mss)); smap_gather_stats(vma, &mss); show_map_vma(m, vma); SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start); SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma)); SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma)); seq_puts(m, " kB\n"); __show_smap(m, &mss); seq_printf(m, "THPeligible: %d\n", transparent_hugepage_enabled(vma)); if (arch_pkeys_enabled()) seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); show_smap_vma_flags(m, vma); m_cache_vma(m, vma); return 0; } static int show_smaps_rollup(struct seq_file *m, void *v) { struct proc_maps_private *priv = m->private; struct mem_size_stats mss; struct mm_struct *mm; struct vm_area_struct *vma; unsigned long last_vma_end = 0; int ret = 0; priv->task = get_proc_task(priv->inode); if (!priv->task) return -ESRCH; mm = priv->mm; if (!mm || !mmget_not_zero(mm)) { ret = -ESRCH; goto out_put_task; } memset(&mss, 0, sizeof(mss)); down_read(&mm->mmap_sem); hold_task_mempolicy(priv); for (vma = priv->mm->mmap; vma; vma = vma->vm_next) { smap_gather_stats(vma, &mss); last_vma_end = vma->vm_end; } show_vma_header_prefix(m, priv->mm->mmap->vm_start, last_vma_end, 0, 0, 0, 0); seq_pad(m, ' '); seq_puts(m, "[rollup]\n"); __show_smap(m, &mss); release_task_mempolicy(priv); up_read(&mm->mmap_sem); mmput(mm); out_put_task: put_task_struct(priv->task); priv->task = NULL; return ret; } #undef SEQ_PUT_DEC static const struct seq_operations proc_pid_smaps_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = show_smap }; static int pid_smaps_open(struct inode *inode, struct file *file) { return do_maps_open(inode, file, &proc_pid_smaps_op); } static int smaps_rollup_open(struct inode *inode, struct file *file) { int ret; struct proc_maps_private *priv; priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT); if (!priv) return -ENOMEM; ret = single_open(file, show_smaps_rollup, priv); if (ret) goto out_free; priv->inode = inode; priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); if (IS_ERR(priv->mm)) { ret = PTR_ERR(priv->mm); single_release(inode, file); goto out_free; } return 0; out_free: kfree(priv); return ret; } static int smaps_rollup_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; struct proc_maps_private *priv = seq->private; if (priv->mm) mmdrop(priv->mm); kfree(priv); return single_release(inode, file); } const struct file_operations proc_pid_smaps_operations = { .open = pid_smaps_open, .read = seq_read, .llseek = seq_lseek, .release = proc_map_release, }; const struct file_operations proc_pid_smaps_rollup_operations = { .open = smaps_rollup_open, .read = seq_read, .llseek = seq_lseek, .release = smaps_rollup_release, }; enum clear_refs_types { CLEAR_REFS_ALL = 1, CLEAR_REFS_ANON, CLEAR_REFS_MAPPED, CLEAR_REFS_SOFT_DIRTY, CLEAR_REFS_MM_HIWATER_RSS, CLEAR_REFS_LAST, }; struct clear_refs_private { enum clear_refs_types type; }; #ifdef CONFIG_MEM_SOFT_DIRTY static inline void clear_soft_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *pte) { /* * The soft-dirty tracker uses #PF-s to catch writes * to pages, so write-protect the pte as well. See the * Documentation/admin-guide/mm/soft-dirty.rst for full description * of how soft-dirty works. */ pte_t ptent = *pte; if (pte_present(ptent)) { pte_t old_pte; old_pte = ptep_modify_prot_start(vma, addr, pte); ptent = pte_wrprotect(old_pte); ptent = pte_clear_soft_dirty(ptent); ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent); } else if (is_swap_pte(ptent)) { ptent = pte_swp_clear_soft_dirty(ptent); set_pte_at(vma->vm_mm, addr, pte, ptent); } } #else static inline void clear_soft_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *pte) { } #endif #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE) static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) { pmd_t old, pmd = *pmdp; if (pmd_present(pmd)) { /* See comment in change_huge_pmd() */ old = pmdp_invalidate(vma, addr, pmdp); if (pmd_dirty(old)) pmd = pmd_mkdirty(pmd); if (pmd_young(old)) pmd = pmd_mkyoung(pmd); pmd = pmd_wrprotect(pmd); pmd = pmd_clear_soft_dirty(pmd); set_pmd_at(vma->vm_mm, addr, pmdp, pmd); } else if (is_migration_entry(pmd_to_swp_entry(pmd))) { pmd = pmd_swp_clear_soft_dirty(pmd); set_pmd_at(vma->vm_mm, addr, pmdp, pmd); } } #else static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) { } #endif static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct clear_refs_private *cp = walk->private; struct vm_area_struct *vma = walk->vma; pte_t *pte, ptent; spinlock_t *ptl; struct page *page; ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { if (cp->type == CLEAR_REFS_SOFT_DIRTY) { clear_soft_dirty_pmd(vma, addr, pmd); goto out; } if (!pmd_present(*pmd)) goto out; page = pmd_page(*pmd); /* Clear accessed and referenced bits. */ pmdp_test_and_clear_young(vma, addr, pmd); test_and_clear_page_young(page); ClearPageReferenced(page); out: spin_unlock(ptl); return 0; } if (pmd_trans_unstable(pmd)) return 0; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) { ptent = *pte; if (cp->type == CLEAR_REFS_SOFT_DIRTY) { clear_soft_dirty(vma, addr, pte); continue; } if (!pte_present(ptent)) continue; page = vm_normal_page(vma, addr, ptent); if (!page) continue; /* Clear accessed and referenced bits. */ ptep_test_and_clear_young(vma, addr, pte); test_and_clear_page_young(page); ClearPageReferenced(page); } pte_unmap_unlock(pte - 1, ptl); cond_resched(); return 0; } static int clear_refs_test_walk(unsigned long start, unsigned long end, struct mm_walk *walk) { struct clear_refs_private *cp = walk->private; struct vm_area_struct *vma = walk->vma; if (vma->vm_flags & VM_PFNMAP) return 1; /* * Writing 1 to /proc/pid/clear_refs affects all pages. * Writing 2 to /proc/pid/clear_refs only affects anonymous pages. * Writing 3 to /proc/pid/clear_refs only affects file mapped pages. * Writing 4 to /proc/pid/clear_refs affects all pages. */ if (cp->type == CLEAR_REFS_ANON && vma->vm_file) return 1; if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) return 1; return 0; } static ssize_t clear_refs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task; char buffer[PROC_NUMBUF]; struct mm_struct *mm; struct vm_area_struct *vma; enum clear_refs_types type; struct mmu_gather tlb; int itype; int rv; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) return -EFAULT; rv = kstrtoint(strstrip(buffer), 10, &itype); if (rv < 0) return rv; type = (enum clear_refs_types)itype; if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST) return -EINVAL; task = get_proc_task(file_inode(file)); if (!task) return -ESRCH; mm = get_task_mm(task); if (mm) { struct mmu_notifier_range range; struct clear_refs_private cp = { .type = type, }; struct mm_walk clear_refs_walk = { .pmd_entry = clear_refs_pte_range, .test_walk = clear_refs_test_walk, .mm = mm, .private = &cp, }; if (type == CLEAR_REFS_MM_HIWATER_RSS) { if (down_write_killable(&mm->mmap_sem)) { count = -EINTR; goto out_mm; } /* * Writing 5 to /proc/pid/clear_refs resets the peak * resident set size to this mm's current rss value. */ reset_mm_hiwater_rss(mm); up_write(&mm->mmap_sem); goto out_mm; } down_read(&mm->mmap_sem); tlb_gather_mmu(&tlb, mm, 0, -1); if (type == CLEAR_REFS_SOFT_DIRTY) { for (vma = mm->mmap; vma; vma = vma->vm_next) { if (!(vma->vm_flags & VM_SOFTDIRTY)) continue; up_read(&mm->mmap_sem); if (down_write_killable(&mm->mmap_sem)) { count = -EINTR; goto out_mm; } /* * Avoid to modify vma->vm_flags * without locked ops while the * coredump reads the vm_flags. */ if (!mmget_still_valid(mm)) { /* * Silently return "count" * like if get_task_mm() * failed. FIXME: should this * function have returned * -ESRCH if get_task_mm() * failed like if * get_proc_task() fails? */ up_write(&mm->mmap_sem); goto out_mm; } for (vma = mm->mmap; vma; vma = vma->vm_next) { vma->vm_flags &= ~VM_SOFTDIRTY; vma_set_page_prot(vma); } downgrade_write(&mm->mmap_sem); break; } mmu_notifier_range_init(&range, mm, 0, -1UL); mmu_notifier_invalidate_range_start(&range); } walk_page_range(0, mm->highest_vm_end, &clear_refs_walk); if (type == CLEAR_REFS_SOFT_DIRTY) mmu_notifier_invalidate_range_end(&range); tlb_finish_mmu(&tlb, 0, -1); up_read(&mm->mmap_sem); out_mm: mmput(mm); } put_task_struct(task); return count; } const struct file_operations proc_clear_refs_operations = { .write = clear_refs_write, .llseek = noop_llseek, }; typedef struct { u64 pme; } pagemap_entry_t; struct pagemapread { int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ pagemap_entry_t *buffer; bool show_pfn; }; #define PAGEMAP_WALK_SIZE (PMD_SIZE) #define PAGEMAP_WALK_MASK (PMD_MASK) #define PM_ENTRY_BYTES sizeof(pagemap_entry_t) #define PM_PFRAME_BITS 55 #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0) #define PM_SOFT_DIRTY BIT_ULL(55) #define PM_MMAP_EXCLUSIVE BIT_ULL(56) #define PM_FILE BIT_ULL(61) #define PM_SWAP BIT_ULL(62) #define PM_PRESENT BIT_ULL(63) #define PM_END_OF_BUFFER 1 static inline pagemap_entry_t make_pme(u64 frame, u64 flags) { return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags }; } static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme, struct pagemapread *pm) { pm->buffer[pm->pos++] = *pme; if (pm->pos >= pm->len) return PM_END_OF_BUFFER; return 0; } static int pagemap_pte_hole(unsigned long start, unsigned long end, struct mm_walk *walk) { struct pagemapread *pm = walk->private; unsigned long addr = start; int err = 0; while (addr < end) { struct vm_area_struct *vma = find_vma(walk->mm, addr); pagemap_entry_t pme = make_pme(0, 0); /* End of address space hole, which we mark as non-present. */ unsigned long hole_end; if (vma) hole_end = min(end, vma->vm_start); else hole_end = end; for (; addr < hole_end; addr += PAGE_SIZE) { err = add_to_pagemap(addr, &pme, pm); if (err) goto out; } if (!vma) break; /* Addresses in the VMA. */ if (vma->vm_flags & VM_SOFTDIRTY) pme = make_pme(0, PM_SOFT_DIRTY); for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { err = add_to_pagemap(addr, &pme, pm); if (err) goto out; } } out: return err; } static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, struct vm_area_struct *vma, unsigned long addr, pte_t pte) { u64 frame = 0, flags = 0; struct page *page = NULL; if (pte_present(pte)) { if (pm->show_pfn) frame = pte_pfn(pte); flags |= PM_PRESENT; page = _vm_normal_page(vma, addr, pte, true); if (pte_soft_dirty(pte)) flags |= PM_SOFT_DIRTY; } else if (is_swap_pte(pte)) { swp_entry_t entry; if (pte_swp_soft_dirty(pte)) flags |= PM_SOFT_DIRTY; entry = pte_to_swp_entry(pte); if (pm->show_pfn) frame = swp_type(entry) | (swp_offset(entry) << MAX_SWAPFILES_SHIFT); flags |= PM_SWAP; if (is_migration_entry(entry)) page = migration_entry_to_page(entry); if (is_device_private_entry(entry)) page = device_private_entry_to_page(entry); } if (page && !PageAnon(page)) flags |= PM_FILE; if (page && page_mapcount(page) == 1) flags |= PM_MMAP_EXCLUSIVE; if (vma->vm_flags & VM_SOFTDIRTY) flags |= PM_SOFT_DIRTY; return make_pme(frame, flags); } static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; struct pagemapread *pm = walk->private; spinlock_t *ptl; pte_t *pte, *orig_pte; int err = 0; #ifdef CONFIG_TRANSPARENT_HUGEPAGE ptl = pmd_trans_huge_lock(pmdp, vma); if (ptl) { u64 flags = 0, frame = 0; pmd_t pmd = *pmdp; struct page *page = NULL; if (vma->vm_flags & VM_SOFTDIRTY) flags |= PM_SOFT_DIRTY; if (pmd_present(pmd)) { page = pmd_page(pmd); flags |= PM_PRESENT; if (pmd_soft_dirty(pmd)) flags |= PM_SOFT_DIRTY; if (pm->show_pfn) frame = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); } #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION else if (is_swap_pmd(pmd)) { swp_entry_t entry = pmd_to_swp_entry(pmd); unsigned long offset; if (pm->show_pfn) { offset = swp_offset(entry) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); frame = swp_type(entry) | (offset << MAX_SWAPFILES_SHIFT); } flags |= PM_SWAP; if (pmd_swp_soft_dirty(pmd)) flags |= PM_SOFT_DIRTY; VM_BUG_ON(!is_pmd_migration_entry(pmd)); page = migration_entry_to_page(entry); } #endif if (page && page_mapcount(page) == 1) flags |= PM_MMAP_EXCLUSIVE; for (; addr != end; addr += PAGE_SIZE) { pagemap_entry_t pme = make_pme(frame, flags); err = add_to_pagemap(addr, &pme, pm); if (err) break; if (pm->show_pfn) { if (flags & PM_PRESENT) frame++; else if (flags & PM_SWAP) frame += (1 << MAX_SWAPFILES_SHIFT); } } spin_unlock(ptl); return err; } if (pmd_trans_unstable(pmdp)) return 0; #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* * We can assume that @vma always points to a valid one and @end never * goes beyond vma->vm_end. */ orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); for (; addr < end; pte++, addr += PAGE_SIZE) { pagemap_entry_t pme; pme = pte_to_pagemap_entry(pm, vma, addr, *pte); err = add_to_pagemap(addr, &pme, pm); if (err) break; } pte_unmap_unlock(orig_pte, ptl); cond_resched(); return err; } #ifdef CONFIG_HUGETLB_PAGE /* This function walks within one hugetlb entry in the single call */ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct pagemapread *pm = walk->private; struct vm_area_struct *vma = walk->vma; u64 flags = 0, frame = 0; int err = 0; pte_t pte; if (vma->vm_flags & VM_SOFTDIRTY) flags |= PM_SOFT_DIRTY; pte = huge_ptep_get(ptep); if (pte_present(pte)) { struct page *page = pte_page(pte); if (!PageAnon(page)) flags |= PM_FILE; if (page_mapcount(page) == 1) flags |= PM_MMAP_EXCLUSIVE; flags |= PM_PRESENT; if (pm->show_pfn) frame = pte_pfn(pte) + ((addr & ~hmask) >> PAGE_SHIFT); } for (; addr != end; addr += PAGE_SIZE) { pagemap_entry_t pme = make_pme(frame, flags); err = add_to_pagemap(addr, &pme, pm); if (err) return err; if (pm->show_pfn && (flags & PM_PRESENT)) frame++; } cond_resched(); return err; } #endif /* HUGETLB_PAGE */ /* * /proc/pid/pagemap - an array mapping virtual pages to pfns * * For each page in the address space, this file contains one 64-bit entry * consisting of the following: * * Bits 0-54 page frame number (PFN) if present * Bits 0-4 swap type if swapped * Bits 5-54 swap offset if swapped * Bit 55 pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst) * Bit 56 page exclusively mapped * Bits 57-60 zero * Bit 61 page is file-page or shared-anon * Bit 62 page swapped * Bit 63 page present * * If the page is not present but in swap, then the PFN contains an * encoding of the swap file number and the page's offset into the * swap. Unmapped pages return a null PFN. This allows determining * precisely which pages are mapped (or in swap) and comparing mapped * pages between processes. * * Efficient users of this interface will use /proc/pid/maps to * determine which areas of memory are actually mapped and llseek to * skip over unmapped regions. */ static ssize_t pagemap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct mm_struct *mm = file->private_data; struct pagemapread pm; struct mm_walk pagemap_walk = {}; unsigned long src; unsigned long svpfn; unsigned long start_vaddr; unsigned long end_vaddr; int ret = 0, copied = 0; if (!mm || !mmget_not_zero(mm)) goto out; ret = -EINVAL; /* file position must be aligned */ if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) goto out_mm; ret = 0; if (!count) goto out_mm; /* do not disclose physical addresses: attack vector */ pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN); pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL); ret = -ENOMEM; if (!pm.buffer) goto out_mm; pagemap_walk.pmd_entry = pagemap_pmd_range; pagemap_walk.pte_hole = pagemap_pte_hole; #ifdef CONFIG_HUGETLB_PAGE pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; #endif pagemap_walk.mm = mm; pagemap_walk.private = &pm; src = *ppos; svpfn = src / PM_ENTRY_BYTES; start_vaddr = svpfn << PAGE_SHIFT; end_vaddr = mm->task_size; /* watch out for wraparound */ if (svpfn > mm->task_size >> PAGE_SHIFT) start_vaddr = end_vaddr; /* * The odds are that this will stop walking way * before end_vaddr, because the length of the * user buffer is tracked in "pm", and the walk * will stop when we hit the end of the buffer. */ ret = 0; while (count && (start_vaddr < end_vaddr)) { int len; unsigned long end; pm.pos = 0; end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; /* overflow ? */ if (end < start_vaddr || end > end_vaddr) end = end_vaddr; down_read(&mm->mmap_sem); ret = walk_page_range(start_vaddr, end, &pagemap_walk); up_read(&mm->mmap_sem); start_vaddr = end; len = min(count, PM_ENTRY_BYTES * pm.pos); if (copy_to_user(buf, pm.buffer, len)) { ret = -EFAULT; goto out_free; } copied += len; buf += len; count -= len; } *ppos += copied; if (!ret || ret == PM_END_OF_BUFFER) ret = copied; out_free: kfree(pm.buffer); out_mm: mmput(mm); out: return ret; } static int pagemap_open(struct inode *inode, struct file *file) { struct mm_struct *mm; mm = proc_mem_open(inode, PTRACE_MODE_READ); if (IS_ERR(mm)) return PTR_ERR(mm); file->private_data = mm; return 0; } static int pagemap_release(struct inode *inode, struct file *file) { struct mm_struct *mm = file->private_data; if (mm) mmdrop(mm); return 0; } const struct file_operations proc_pagemap_operations = { .llseek = mem_lseek, /* borrow this */ .read = pagemap_read, .open = pagemap_open, .release = pagemap_release, }; #endif /* CONFIG_PROC_PAGE_MONITOR */ #ifdef CONFIG_NUMA struct numa_maps { unsigned long pages; unsigned long anon; unsigned long active; unsigned long writeback; unsigned long mapcount_max; unsigned long dirty; unsigned long swapcache; unsigned long node[MAX_NUMNODES]; }; struct numa_maps_private { struct proc_maps_private proc_maps; struct numa_maps md; }; static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, unsigned long nr_pages) { int count = page_mapcount(page); md->pages += nr_pages; if (pte_dirty || PageDirty(page)) md->dirty += nr_pages; if (PageSwapCache(page)) md->swapcache += nr_pages; if (PageActive(page) || PageUnevictable(page)) md->active += nr_pages; if (PageWriteback(page)) md->writeback += nr_pages; if (PageAnon(page)) md->anon += nr_pages; if (count > md->mapcount_max) md->mapcount_max = count; md->node[page_to_nid(page)] += nr_pages; } static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, unsigned long addr) { struct page *page; int nid; if (!pte_present(pte)) return NULL; page = vm_normal_page(vma, addr, pte); if (!page) return NULL; if (PageReserved(page)) return NULL; nid = page_to_nid(page); if (!node_isset(nid, node_states[N_MEMORY])) return NULL; return page; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static struct page *can_gather_numa_stats_pmd(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr) { struct page *page; int nid; if (!pmd_present(pmd)) return NULL; page = vm_normal_page_pmd(vma, addr, pmd); if (!page) return NULL; if (PageReserved(page)) return NULL; nid = page_to_nid(page); if (!node_isset(nid, node_states[N_MEMORY])) return NULL; return page; } #endif static int gather_pte_stats(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct numa_maps *md = walk->private; struct vm_area_struct *vma = walk->vma; spinlock_t *ptl; pte_t *orig_pte; pte_t *pte; #ifdef CONFIG_TRANSPARENT_HUGEPAGE ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { struct page *page; page = can_gather_numa_stats_pmd(*pmd, vma, addr); if (page) gather_stats(page, md, pmd_dirty(*pmd), HPAGE_PMD_SIZE/PAGE_SIZE); spin_unlock(ptl); return 0; } if (pmd_trans_unstable(pmd)) return 0; #endif orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); do { struct page *page = can_gather_numa_stats(*pte, vma, addr); if (!page) continue; gather_stats(page, md, pte_dirty(*pte), 1); } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap_unlock(orig_pte, ptl); cond_resched(); return 0; } #ifdef CONFIG_HUGETLB_PAGE static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { pte_t huge_pte = huge_ptep_get(pte); struct numa_maps *md; struct page *page; if (!pte_present(huge_pte)) return 0; page = pte_page(huge_pte); if (!page) return 0; md = walk->private; gather_stats(page, md, pte_dirty(huge_pte), 1); return 0; } #else static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { return 0; } #endif /* * Display pages allocated per node and memory policy via /proc. */ static int show_numa_map(struct seq_file *m, void *v) { struct numa_maps_private *numa_priv = m->private; struct proc_maps_private *proc_priv = &numa_priv->proc_maps; struct vm_area_struct *vma = v; struct numa_maps *md = &numa_priv->md; struct file *file = vma->vm_file; struct mm_struct *mm = vma->vm_mm; struct mm_walk walk = { .hugetlb_entry = gather_hugetlb_stats, .pmd_entry = gather_pte_stats, .private = md, .mm = mm, }; struct mempolicy *pol; char buffer[64]; int nid; if (!mm) return 0; /* Ensure we start with an empty set of numa_maps statistics. */ memset(md, 0, sizeof(*md)); pol = __get_vma_policy(vma, vma->vm_start); if (pol) { mpol_to_str(buffer, sizeof(buffer), pol); mpol_cond_put(pol); } else { mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy); } seq_printf(m, "%08lx %s", vma->vm_start, buffer); if (file) { seq_puts(m, " file="); seq_file_path(m, file, "\n\t= "); } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { seq_puts(m, " heap"); } else if (is_stack(vma)) { seq_puts(m, " stack"); } if (is_vm_hugetlb_page(vma)) seq_puts(m, " huge"); /* mmap_sem is held by m_start */ walk_page_vma(vma, &walk); if (!md->pages) goto out; if (md->anon) seq_printf(m, " anon=%lu", md->anon); if (md->dirty) seq_printf(m, " dirty=%lu", md->dirty); if (md->pages != md->anon && md->pages != md->dirty) seq_printf(m, " mapped=%lu", md->pages); if (md->mapcount_max > 1) seq_printf(m, " mapmax=%lu", md->mapcount_max); if (md->swapcache) seq_printf(m, " swapcache=%lu", md->swapcache); if (md->active < md->pages && !is_vm_hugetlb_page(vma)) seq_printf(m, " active=%lu", md->active); if (md->writeback) seq_printf(m, " writeback=%lu", md->writeback); for_each_node_state(nid, N_MEMORY) if (md->node[nid]) seq_printf(m, " N%d=%lu", nid, md->node[nid]); seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10); out: seq_putc(m, '\n'); m_cache_vma(m, vma); return 0; } static const struct seq_operations proc_pid_numa_maps_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = show_numa_map, }; static int pid_numa_maps_open(struct inode *inode, struct file *file) { return proc_maps_open(inode, file, &proc_pid_numa_maps_op, sizeof(struct numa_maps_private)); } const struct file_operations proc_pid_numa_maps_operations = { .open = pid_numa_maps_open, .read = seq_read, .llseek = seq_lseek, .release = proc_map_release, }; #endif /* CONFIG_NUMA */
./CrossVul/dataset_final_sorted/CWE-362/c/good_829_1
crossvul-cpp_data_bad_5572_0
/* * x86 single-step support code, common to 32-bit and 64-bit. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/ptrace.h> #include <asm/desc.h> unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) { unsigned long addr, seg; addr = regs->ip; seg = regs->cs & 0xffff; if (v8086_mode(regs)) { addr = (addr & 0xffff) + (seg << 4); return addr; } /* * We'll assume that the code segments in the GDT * are all zero-based. That is largely true: the * TLS segments are used for data, and the PNPBIOS * and APM bios ones we just ignore here. */ if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { struct desc_struct *desc; unsigned long base; seg &= ~7UL; mutex_lock(&child->mm->context.lock); if (unlikely((seg >> 3) >= child->mm->context.size)) addr = -1L; /* bogus selector, access would fault */ else { desc = child->mm->context.ldt + seg; base = get_desc_base(desc); /* 16-bit code segment? */ if (!desc->d) addr &= 0xffff; addr += base; } mutex_unlock(&child->mm->context.lock); } return addr; } static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) { int i, copied; unsigned char opcode[15]; unsigned long addr = convert_ip_to_linear(child, regs); copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); for (i = 0; i < copied; i++) { switch (opcode[i]) { /* popf and iret */ case 0x9d: case 0xcf: return 1; /* CHECKME: 64 65 */ /* opcode and address size prefixes */ case 0x66: case 0x67: continue; /* irrelevant prefixes (segment overrides and repeats) */ case 0x26: case 0x2e: case 0x36: case 0x3e: case 0x64: case 0x65: case 0xf0: case 0xf2: case 0xf3: continue; #ifdef CONFIG_X86_64 case 0x40 ... 0x4f: if (!user_64bit_mode(regs)) /* 32-bit mode: register increment */ return 0; /* 64-bit mode: REX prefix */ continue; #endif /* CHECKME: f2, f3 */ /* * pushf: NOTE! We should probably not let * the user see the TF bit being set. But * it's more pain than it's worth to avoid * it, and a debugger could emulate this * all in user space if it _really_ cares. */ case 0x9c: default: return 0; } } return 0; } /* * Enable single-stepping. Return nonzero if user mode is not using TF itself. */ static int enable_single_step(struct task_struct *child) { struct pt_regs *regs = task_pt_regs(child); unsigned long oflags; /* * If we stepped into a sysenter/syscall insn, it trapped in * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. * If user-mode had set TF itself, then it's still clear from * do_debug() and we need to set it again to restore the user * state so we don't wrongly set TIF_FORCED_TF below. * If enable_single_step() was used last and that is what * set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are * already set and our bookkeeping is fine. */ if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP))) regs->flags |= X86_EFLAGS_TF; /* * Always set TIF_SINGLESTEP - this guarantees that * we single-step system calls etc.. This will also * cause us to set TF when returning to user mode. */ set_tsk_thread_flag(child, TIF_SINGLESTEP); oflags = regs->flags; /* Set TF on the kernel stack.. */ regs->flags |= X86_EFLAGS_TF; /* * ..but if TF is changed by the instruction we will trace, * don't mark it as being "us" that set it, so that we * won't clear it by hand later. * * Note that if we don't actually execute the popf because * of a signal arriving right now or suchlike, we will lose * track of the fact that it really was "us" that set it. */ if (is_setting_trap_flag(child, regs)) { clear_tsk_thread_flag(child, TIF_FORCED_TF); return 0; } /* * If TF was already set, check whether it was us who set it. * If not, we should never attempt a block step. */ if (oflags & X86_EFLAGS_TF) return test_tsk_thread_flag(child, TIF_FORCED_TF); set_tsk_thread_flag(child, TIF_FORCED_TF); return 1; } void set_task_blockstep(struct task_struct *task, bool on) { unsigned long debugctl; /* * Ensure irq/preemption can't change debugctl in between. * Note also that both TIF_BLOCKSTEP and debugctl should * be changed atomically wrt preemption. * FIXME: this means that set/clear TIF_BLOCKSTEP is simply * wrong if task != current, SIGKILL can wakeup the stopped * tracee and set/clear can play with the running task, this * can confuse the next __switch_to_xtra(). */ local_irq_disable(); debugctl = get_debugctlmsr(); if (on) { debugctl |= DEBUGCTLMSR_BTF; set_tsk_thread_flag(task, TIF_BLOCKSTEP); } else { debugctl &= ~DEBUGCTLMSR_BTF; clear_tsk_thread_flag(task, TIF_BLOCKSTEP); } if (task == current) update_debugctlmsr(debugctl); local_irq_enable(); } /* * Enable single or block step. */ static void enable_step(struct task_struct *child, bool block) { /* * Make sure block stepping (BTF) is not enabled unless it should be. * Note that we don't try to worry about any is_setting_trap_flag() * instructions after the first when using block stepping. * So no one should try to use debugger block stepping in a program * that uses user-mode single stepping itself. */ if (enable_single_step(child) && block) set_task_blockstep(child, true); else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) set_task_blockstep(child, false); } void user_enable_single_step(struct task_struct *child) { enable_step(child, 0); } void user_enable_block_step(struct task_struct *child) { enable_step(child, 1); } void user_disable_single_step(struct task_struct *child) { /* * Make sure block stepping (BTF) is disabled. */ if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) set_task_blockstep(child, false); /* Always clear TIF_SINGLESTEP... */ clear_tsk_thread_flag(child, TIF_SINGLESTEP); /* But touch TF only if it was set by us.. */ if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) task_pt_regs(child)->flags &= ~X86_EFLAGS_TF; }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_5572_0
crossvul-cpp_data_bad_2215_1
/* * Routines for driver control interface * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/threads.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/time.h> #include <sound/core.h> #include <sound/minors.h> #include <sound/info.h> #include <sound/control.h> /* max number of user-defined controls */ #define MAX_USER_CONTROLS 32 #define MAX_CONTROL_COUNT 1028 struct snd_kctl_ioctl { struct list_head list; /* list of all ioctls */ snd_kctl_ioctl_func_t fioctl; }; static DECLARE_RWSEM(snd_ioctl_rwsem); static LIST_HEAD(snd_control_ioctls); #ifdef CONFIG_COMPAT static LIST_HEAD(snd_control_compat_ioctls); #endif static int snd_ctl_open(struct inode *inode, struct file *file) { unsigned long flags; struct snd_card *card; struct snd_ctl_file *ctl; int err; err = nonseekable_open(inode, file); if (err < 0) return err; card = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_CONTROL); if (!card) { err = -ENODEV; goto __error1; } err = snd_card_file_add(card, file); if (err < 0) { err = -ENODEV; goto __error1; } if (!try_module_get(card->module)) { err = -EFAULT; goto __error2; } ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); if (ctl == NULL) { err = -ENOMEM; goto __error; } INIT_LIST_HEAD(&ctl->events); init_waitqueue_head(&ctl->change_sleep); spin_lock_init(&ctl->read_lock); ctl->card = card; ctl->prefer_pcm_subdevice = -1; ctl->prefer_rawmidi_subdevice = -1; ctl->pid = get_pid(task_pid(current)); file->private_data = ctl; write_lock_irqsave(&card->ctl_files_rwlock, flags); list_add_tail(&ctl->list, &card->ctl_files); write_unlock_irqrestore(&card->ctl_files_rwlock, flags); snd_card_unref(card); return 0; __error: module_put(card->module); __error2: snd_card_file_remove(card, file); __error1: if (card) snd_card_unref(card); return err; } static void snd_ctl_empty_read_queue(struct snd_ctl_file * ctl) { unsigned long flags; struct snd_kctl_event *cread; spin_lock_irqsave(&ctl->read_lock, flags); while (!list_empty(&ctl->events)) { cread = snd_kctl_event(ctl->events.next); list_del(&cread->list); kfree(cread); } spin_unlock_irqrestore(&ctl->read_lock, flags); } static int snd_ctl_release(struct inode *inode, struct file *file) { unsigned long flags; struct snd_card *card; struct snd_ctl_file *ctl; struct snd_kcontrol *control; unsigned int idx; ctl = file->private_data; file->private_data = NULL; card = ctl->card; write_lock_irqsave(&card->ctl_files_rwlock, flags); list_del(&ctl->list); write_unlock_irqrestore(&card->ctl_files_rwlock, flags); down_write(&card->controls_rwsem); list_for_each_entry(control, &card->controls, list) for (idx = 0; idx < control->count; idx++) if (control->vd[idx].owner == ctl) control->vd[idx].owner = NULL; up_write(&card->controls_rwsem); snd_ctl_empty_read_queue(ctl); put_pid(ctl->pid); kfree(ctl); module_put(card->module); snd_card_file_remove(card, file); return 0; } void snd_ctl_notify(struct snd_card *card, unsigned int mask, struct snd_ctl_elem_id *id) { unsigned long flags; struct snd_ctl_file *ctl; struct snd_kctl_event *ev; if (snd_BUG_ON(!card || !id)) return; read_lock(&card->ctl_files_rwlock); #if IS_ENABLED(CONFIG_SND_MIXER_OSS) card->mixer_oss_change_count++; #endif list_for_each_entry(ctl, &card->ctl_files, list) { if (!ctl->subscribed) continue; spin_lock_irqsave(&ctl->read_lock, flags); list_for_each_entry(ev, &ctl->events, list) { if (ev->id.numid == id->numid) { ev->mask |= mask; goto _found; } } ev = kzalloc(sizeof(*ev), GFP_ATOMIC); if (ev) { ev->id = *id; ev->mask = mask; list_add_tail(&ev->list, &ctl->events); } else { dev_err(card->dev, "No memory available to allocate event\n"); } _found: wake_up(&ctl->change_sleep); spin_unlock_irqrestore(&ctl->read_lock, flags); kill_fasync(&ctl->fasync, SIGIO, POLL_IN); } read_unlock(&card->ctl_files_rwlock); } EXPORT_SYMBOL(snd_ctl_notify); /** * snd_ctl_new - create a control instance from the template * @control: the control template * @access: the default control access * * Allocates a new struct snd_kcontrol instance and copies the given template * to the new instance. It does not copy volatile data (access). * * Return: The pointer of the new instance, or %NULL on failure. */ static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control, unsigned int access) { struct snd_kcontrol *kctl; unsigned int idx; if (snd_BUG_ON(!control || !control->count)) return NULL; if (control->count > MAX_CONTROL_COUNT) return NULL; kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL); if (kctl == NULL) { pr_err("ALSA: Cannot allocate control instance\n"); return NULL; } *kctl = *control; for (idx = 0; idx < kctl->count; idx++) kctl->vd[idx].access = access; return kctl; } /** * snd_ctl_new1 - create a control instance from the template * @ncontrol: the initialization record * @private_data: the private data to set * * Allocates a new struct snd_kcontrol instance and initialize from the given * template. When the access field of ncontrol is 0, it's assumed as * READWRITE access. When the count field is 0, it's assumes as one. * * Return: The pointer of the newly generated instance, or %NULL on failure. */ struct snd_kcontrol *snd_ctl_new1(const struct snd_kcontrol_new *ncontrol, void *private_data) { struct snd_kcontrol kctl; unsigned int access; if (snd_BUG_ON(!ncontrol || !ncontrol->info)) return NULL; memset(&kctl, 0, sizeof(kctl)); kctl.id.iface = ncontrol->iface; kctl.id.device = ncontrol->device; kctl.id.subdevice = ncontrol->subdevice; if (ncontrol->name) { strlcpy(kctl.id.name, ncontrol->name, sizeof(kctl.id.name)); if (strcmp(ncontrol->name, kctl.id.name) != 0) pr_warn("ALSA: Control name '%s' truncated to '%s'\n", ncontrol->name, kctl.id.name); } kctl.id.index = ncontrol->index; kctl.count = ncontrol->count ? ncontrol->count : 1; access = ncontrol->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : (ncontrol->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| SNDRV_CTL_ELEM_ACCESS_VOLATILE| SNDRV_CTL_ELEM_ACCESS_INACTIVE| SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE| SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND| SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK)); kctl.info = ncontrol->info; kctl.get = ncontrol->get; kctl.put = ncontrol->put; kctl.tlv.p = ncontrol->tlv.p; kctl.private_value = ncontrol->private_value; kctl.private_data = private_data; return snd_ctl_new(&kctl, access); } EXPORT_SYMBOL(snd_ctl_new1); /** * snd_ctl_free_one - release the control instance * @kcontrol: the control instance * * Releases the control instance created via snd_ctl_new() * or snd_ctl_new1(). * Don't call this after the control was added to the card. */ void snd_ctl_free_one(struct snd_kcontrol *kcontrol) { if (kcontrol) { if (kcontrol->private_free) kcontrol->private_free(kcontrol); kfree(kcontrol); } } EXPORT_SYMBOL(snd_ctl_free_one); static bool snd_ctl_remove_numid_conflict(struct snd_card *card, unsigned int count) { struct snd_kcontrol *kctl; list_for_each_entry(kctl, &card->controls, list) { if (kctl->id.numid < card->last_numid + 1 + count && kctl->id.numid + kctl->count > card->last_numid + 1) { card->last_numid = kctl->id.numid + kctl->count - 1; return true; } } return false; } static int snd_ctl_find_hole(struct snd_card *card, unsigned int count) { unsigned int iter = 100000; while (snd_ctl_remove_numid_conflict(card, count)) { if (--iter == 0) { /* this situation is very unlikely */ dev_err(card->dev, "unable to allocate new control numid\n"); return -ENOMEM; } } return 0; } /** * snd_ctl_add - add the control instance to the card * @card: the card instance * @kcontrol: the control instance to add * * Adds the control instance created via snd_ctl_new() or * snd_ctl_new1() to the given card. Assigns also an unique * numid used for fast search. * * It frees automatically the control which cannot be added. * * Return: Zero if successful, or a negative error code on failure. * */ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol) { struct snd_ctl_elem_id id; unsigned int idx; int err = -EINVAL; if (! kcontrol) return err; if (snd_BUG_ON(!card || !kcontrol->info)) goto error; id = kcontrol->id; down_write(&card->controls_rwsem); if (snd_ctl_find_id(card, &id)) { up_write(&card->controls_rwsem); dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n", id.iface, id.device, id.subdevice, id.name, id.index); err = -EBUSY; goto error; } if (snd_ctl_find_hole(card, kcontrol->count) < 0) { up_write(&card->controls_rwsem); err = -ENOMEM; goto error; } list_add_tail(&kcontrol->list, &card->controls); card->controls_count += kcontrol->count; kcontrol->id.numid = card->last_numid + 1; card->last_numid += kcontrol->count; up_write(&card->controls_rwsem); for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++) snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id); return 0; error: snd_ctl_free_one(kcontrol); return err; } EXPORT_SYMBOL(snd_ctl_add); /** * snd_ctl_replace - replace the control instance of the card * @card: the card instance * @kcontrol: the control instance to replace * @add_on_replace: add the control if not already added * * Replaces the given control. If the given control does not exist * and the add_on_replace flag is set, the control is added. If the * control exists, it is destroyed first. * * It frees automatically the control which cannot be added or replaced. * * Return: Zero if successful, or a negative error code on failure. */ int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol, bool add_on_replace) { struct snd_ctl_elem_id id; unsigned int idx; struct snd_kcontrol *old; int ret; if (!kcontrol) return -EINVAL; if (snd_BUG_ON(!card || !kcontrol->info)) { ret = -EINVAL; goto error; } id = kcontrol->id; down_write(&card->controls_rwsem); old = snd_ctl_find_id(card, &id); if (!old) { if (add_on_replace) goto add; up_write(&card->controls_rwsem); ret = -EINVAL; goto error; } ret = snd_ctl_remove(card, old); if (ret < 0) { up_write(&card->controls_rwsem); goto error; } add: if (snd_ctl_find_hole(card, kcontrol->count) < 0) { up_write(&card->controls_rwsem); ret = -ENOMEM; goto error; } list_add_tail(&kcontrol->list, &card->controls); card->controls_count += kcontrol->count; kcontrol->id.numid = card->last_numid + 1; card->last_numid += kcontrol->count; up_write(&card->controls_rwsem); for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++) snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id); return 0; error: snd_ctl_free_one(kcontrol); return ret; } EXPORT_SYMBOL(snd_ctl_replace); /** * snd_ctl_remove - remove the control from the card and release it * @card: the card instance * @kcontrol: the control instance to remove * * Removes the control from the card and then releases the instance. * You don't need to call snd_ctl_free_one(). You must be in * the write lock - down_write(&card->controls_rwsem). * * Return: 0 if successful, or a negative error code on failure. */ int snd_ctl_remove(struct snd_card *card, struct snd_kcontrol *kcontrol) { struct snd_ctl_elem_id id; unsigned int idx; if (snd_BUG_ON(!card || !kcontrol)) return -EINVAL; list_del(&kcontrol->list); card->controls_count -= kcontrol->count; id = kcontrol->id; for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++) snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_REMOVE, &id); snd_ctl_free_one(kcontrol); return 0; } EXPORT_SYMBOL(snd_ctl_remove); /** * snd_ctl_remove_id - remove the control of the given id and release it * @card: the card instance * @id: the control id to remove * * Finds the control instance with the given id, removes it from the * card list and releases it. * * Return: 0 if successful, or a negative error code on failure. */ int snd_ctl_remove_id(struct snd_card *card, struct snd_ctl_elem_id *id) { struct snd_kcontrol *kctl; int ret; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, id); if (kctl == NULL) { up_write(&card->controls_rwsem); return -ENOENT; } ret = snd_ctl_remove(card, kctl); up_write(&card->controls_rwsem); return ret; } EXPORT_SYMBOL(snd_ctl_remove_id); /** * snd_ctl_remove_user_ctl - remove and release the unlocked user control * @file: active control handle * @id: the control id to remove * * Finds the control instance with the given id, removes it from the * card list and releases it. * * Return: 0 if successful, or a negative error code on failure. */ static int snd_ctl_remove_user_ctl(struct snd_ctl_file * file, struct snd_ctl_elem_id *id) { struct snd_card *card = file->card; struct snd_kcontrol *kctl; int idx, ret; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, id); if (kctl == NULL) { ret = -ENOENT; goto error; } if (!(kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_USER)) { ret = -EINVAL; goto error; } for (idx = 0; idx < kctl->count; idx++) if (kctl->vd[idx].owner != NULL && kctl->vd[idx].owner != file) { ret = -EBUSY; goto error; } ret = snd_ctl_remove(card, kctl); if (ret < 0) goto error; card->user_ctl_count--; error: up_write(&card->controls_rwsem); return ret; } /** * snd_ctl_activate_id - activate/inactivate the control of the given id * @card: the card instance * @id: the control id to activate/inactivate * @active: non-zero to activate * * Finds the control instance with the given id, and activate or * inactivate the control together with notification, if changed. * * Return: 0 if unchanged, 1 if changed, or a negative error code on failure. */ int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id, int active) { struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int index_offset; int ret; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, id); if (kctl == NULL) { ret = -ENOENT; goto unlock; } index_offset = snd_ctl_get_ioff(kctl, &kctl->id); vd = &kctl->vd[index_offset]; ret = 0; if (active) { if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_INACTIVE)) goto unlock; vd->access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; } else { if (vd->access & SNDRV_CTL_ELEM_ACCESS_INACTIVE) goto unlock; vd->access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; } ret = 1; unlock: up_write(&card->controls_rwsem); if (ret > 0) snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO, id); return ret; } EXPORT_SYMBOL_GPL(snd_ctl_activate_id); /** * snd_ctl_rename_id - replace the id of a control on the card * @card: the card instance * @src_id: the old id * @dst_id: the new id * * Finds the control with the old id from the card, and replaces the * id with the new one. * * Return: Zero if successful, or a negative error code on failure. */ int snd_ctl_rename_id(struct snd_card *card, struct snd_ctl_elem_id *src_id, struct snd_ctl_elem_id *dst_id) { struct snd_kcontrol *kctl; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, src_id); if (kctl == NULL) { up_write(&card->controls_rwsem); return -ENOENT; } kctl->id = *dst_id; kctl->id.numid = card->last_numid + 1; card->last_numid += kctl->count; up_write(&card->controls_rwsem); return 0; } EXPORT_SYMBOL(snd_ctl_rename_id); /** * snd_ctl_find_numid - find the control instance with the given number-id * @card: the card instance * @numid: the number-id to search * * Finds the control instance with the given number-id from the card. * * The caller must down card->controls_rwsem before calling this function * (if the race condition can happen). * * Return: The pointer of the instance if found, or %NULL if not. * */ struct snd_kcontrol *snd_ctl_find_numid(struct snd_card *card, unsigned int numid) { struct snd_kcontrol *kctl; if (snd_BUG_ON(!card || !numid)) return NULL; list_for_each_entry(kctl, &card->controls, list) { if (kctl->id.numid <= numid && kctl->id.numid + kctl->count > numid) return kctl; } return NULL; } EXPORT_SYMBOL(snd_ctl_find_numid); /** * snd_ctl_find_id - find the control instance with the given id * @card: the card instance * @id: the id to search * * Finds the control instance with the given id from the card. * * The caller must down card->controls_rwsem before calling this function * (if the race condition can happen). * * Return: The pointer of the instance if found, or %NULL if not. * */ struct snd_kcontrol *snd_ctl_find_id(struct snd_card *card, struct snd_ctl_elem_id *id) { struct snd_kcontrol *kctl; if (snd_BUG_ON(!card || !id)) return NULL; if (id->numid != 0) return snd_ctl_find_numid(card, id->numid); list_for_each_entry(kctl, &card->controls, list) { if (kctl->id.iface != id->iface) continue; if (kctl->id.device != id->device) continue; if (kctl->id.subdevice != id->subdevice) continue; if (strncmp(kctl->id.name, id->name, sizeof(kctl->id.name))) continue; if (kctl->id.index > id->index) continue; if (kctl->id.index + kctl->count <= id->index) continue; return kctl; } return NULL; } EXPORT_SYMBOL(snd_ctl_find_id); static int snd_ctl_card_info(struct snd_card *card, struct snd_ctl_file * ctl, unsigned int cmd, void __user *arg) { struct snd_ctl_card_info *info; info = kzalloc(sizeof(*info), GFP_KERNEL); if (! info) return -ENOMEM; down_read(&snd_ioctl_rwsem); info->card = card->number; strlcpy(info->id, card->id, sizeof(info->id)); strlcpy(info->driver, card->driver, sizeof(info->driver)); strlcpy(info->name, card->shortname, sizeof(info->name)); strlcpy(info->longname, card->longname, sizeof(info->longname)); strlcpy(info->mixername, card->mixername, sizeof(info->mixername)); strlcpy(info->components, card->components, sizeof(info->components)); up_read(&snd_ioctl_rwsem); if (copy_to_user(arg, info, sizeof(struct snd_ctl_card_info))) { kfree(info); return -EFAULT; } kfree(info); return 0; } static int snd_ctl_elem_list(struct snd_card *card, struct snd_ctl_elem_list __user *_list) { struct list_head *plist; struct snd_ctl_elem_list list; struct snd_kcontrol *kctl; struct snd_ctl_elem_id *dst, *id; unsigned int offset, space, jidx; if (copy_from_user(&list, _list, sizeof(list))) return -EFAULT; offset = list.offset; space = list.space; /* try limit maximum space */ if (space > 16384) return -ENOMEM; if (space > 0) { /* allocate temporary buffer for atomic operation */ dst = vmalloc(space * sizeof(struct snd_ctl_elem_id)); if (dst == NULL) return -ENOMEM; down_read(&card->controls_rwsem); list.count = card->controls_count; plist = card->controls.next; while (plist != &card->controls) { if (offset == 0) break; kctl = snd_kcontrol(plist); if (offset < kctl->count) break; offset -= kctl->count; plist = plist->next; } list.used = 0; id = dst; while (space > 0 && plist != &card->controls) { kctl = snd_kcontrol(plist); for (jidx = offset; space > 0 && jidx < kctl->count; jidx++) { snd_ctl_build_ioff(id, kctl, jidx); id++; space--; list.used++; } plist = plist->next; offset = 0; } up_read(&card->controls_rwsem); if (list.used > 0 && copy_to_user(list.pids, dst, list.used * sizeof(struct snd_ctl_elem_id))) { vfree(dst); return -EFAULT; } vfree(dst); } else { down_read(&card->controls_rwsem); list.count = card->controls_count; up_read(&card->controls_rwsem); } if (copy_to_user(_list, &list, sizeof(list))) return -EFAULT; return 0; } static int snd_ctl_elem_info(struct snd_ctl_file *ctl, struct snd_ctl_elem_info *info) { struct snd_card *card = ctl->card; struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int index_offset; int result; down_read(&card->controls_rwsem); kctl = snd_ctl_find_id(card, &info->id); if (kctl == NULL) { up_read(&card->controls_rwsem); return -ENOENT; } #ifdef CONFIG_SND_DEBUG info->access = 0; #endif result = kctl->info(kctl, info); if (result >= 0) { snd_BUG_ON(info->access); index_offset = snd_ctl_get_ioff(kctl, &info->id); vd = &kctl->vd[index_offset]; snd_ctl_build_ioff(&info->id, kctl, index_offset); info->access = vd->access; if (vd->owner) { info->access |= SNDRV_CTL_ELEM_ACCESS_LOCK; if (vd->owner == ctl) info->access |= SNDRV_CTL_ELEM_ACCESS_OWNER; info->owner = pid_vnr(vd->owner->pid); } else { info->owner = -1; } } up_read(&card->controls_rwsem); return result; } static int snd_ctl_elem_info_user(struct snd_ctl_file *ctl, struct snd_ctl_elem_info __user *_info) { struct snd_ctl_elem_info info; int result; if (copy_from_user(&info, _info, sizeof(info))) return -EFAULT; snd_power_lock(ctl->card); result = snd_power_wait(ctl->card, SNDRV_CTL_POWER_D0); if (result >= 0) result = snd_ctl_elem_info(ctl, &info); snd_power_unlock(ctl->card); if (result >= 0) if (copy_to_user(_info, &info, sizeof(info))) return -EFAULT; return result; } static int snd_ctl_elem_read(struct snd_card *card, struct snd_ctl_elem_value *control) { struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int index_offset; int result; down_read(&card->controls_rwsem); kctl = snd_ctl_find_id(card, &control->id); if (kctl == NULL) { result = -ENOENT; } else { index_offset = snd_ctl_get_ioff(kctl, &control->id); vd = &kctl->vd[index_offset]; if ((vd->access & SNDRV_CTL_ELEM_ACCESS_READ) && kctl->get != NULL) { snd_ctl_build_ioff(&control->id, kctl, index_offset); result = kctl->get(kctl, control); } else result = -EPERM; } up_read(&card->controls_rwsem); return result; } static int snd_ctl_elem_read_user(struct snd_card *card, struct snd_ctl_elem_value __user *_control) { struct snd_ctl_elem_value *control; int result; control = memdup_user(_control, sizeof(*control)); if (IS_ERR(control)) return PTR_ERR(control); snd_power_lock(card); result = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (result >= 0) result = snd_ctl_elem_read(card, control); snd_power_unlock(card); if (result >= 0) if (copy_to_user(_control, control, sizeof(*control))) result = -EFAULT; kfree(control); return result; } static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file, struct snd_ctl_elem_value *control) { struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int index_offset; int result; down_read(&card->controls_rwsem); kctl = snd_ctl_find_id(card, &control->id); if (kctl == NULL) { result = -ENOENT; } else { index_offset = snd_ctl_get_ioff(kctl, &control->id); vd = &kctl->vd[index_offset]; if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_WRITE) || kctl->put == NULL || (file && vd->owner && vd->owner != file)) { result = -EPERM; } else { snd_ctl_build_ioff(&control->id, kctl, index_offset); result = kctl->put(kctl, control); } if (result > 0) { up_read(&card->controls_rwsem); snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &control->id); return 0; } } up_read(&card->controls_rwsem); return result; } static int snd_ctl_elem_write_user(struct snd_ctl_file *file, struct snd_ctl_elem_value __user *_control) { struct snd_ctl_elem_value *control; struct snd_card *card; int result; control = memdup_user(_control, sizeof(*control)); if (IS_ERR(control)) return PTR_ERR(control); card = file->card; snd_power_lock(card); result = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (result >= 0) result = snd_ctl_elem_write(card, file, control); snd_power_unlock(card); if (result >= 0) if (copy_to_user(_control, control, sizeof(*control))) result = -EFAULT; kfree(control); return result; } static int snd_ctl_elem_lock(struct snd_ctl_file *file, struct snd_ctl_elem_id __user *_id) { struct snd_card *card = file->card; struct snd_ctl_elem_id id; struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; int result; if (copy_from_user(&id, _id, sizeof(id))) return -EFAULT; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, &id); if (kctl == NULL) { result = -ENOENT; } else { vd = &kctl->vd[snd_ctl_get_ioff(kctl, &id)]; if (vd->owner != NULL) result = -EBUSY; else { vd->owner = file; result = 0; } } up_write(&card->controls_rwsem); return result; } static int snd_ctl_elem_unlock(struct snd_ctl_file *file, struct snd_ctl_elem_id __user *_id) { struct snd_card *card = file->card; struct snd_ctl_elem_id id; struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; int result; if (copy_from_user(&id, _id, sizeof(id))) return -EFAULT; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, &id); if (kctl == NULL) { result = -ENOENT; } else { vd = &kctl->vd[snd_ctl_get_ioff(kctl, &id)]; if (vd->owner == NULL) result = -EINVAL; else if (vd->owner != file) result = -EPERM; else { vd->owner = NULL; result = 0; } } up_write(&card->controls_rwsem); return result; } struct user_element { struct snd_ctl_elem_info info; void *elem_data; /* element data */ unsigned long elem_data_size; /* size of element data in bytes */ void *tlv_data; /* TLV data */ unsigned long tlv_data_size; /* TLV data size */ void *priv_data; /* private data (like strings for enumerated type) */ }; static int snd_ctl_elem_user_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct user_element *ue = kcontrol->private_data; *uinfo = ue->info; return 0; } static int snd_ctl_elem_user_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct user_element *ue = kcontrol->private_data; const char *names; unsigned int item; item = uinfo->value.enumerated.item; *uinfo = ue->info; item = min(item, uinfo->value.enumerated.items - 1); uinfo->value.enumerated.item = item; names = ue->priv_data; for (; item > 0; --item) names += strlen(names) + 1; strcpy(uinfo->value.enumerated.name, names); return 0; } static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct user_element *ue = kcontrol->private_data; memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size); return 0; } static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int change; struct user_element *ue = kcontrol->private_data; change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0; if (change) memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size); return change; } static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *tlv) { struct user_element *ue = kcontrol->private_data; int change = 0; void *new_data; if (op_flag > 0) { if (size > 1024 * 128) /* sane value */ return -EINVAL; new_data = memdup_user(tlv, size); if (IS_ERR(new_data)) return PTR_ERR(new_data); change = ue->tlv_data_size != size; if (!change) change = memcmp(ue->tlv_data, new_data, size); kfree(ue->tlv_data); ue->tlv_data = new_data; ue->tlv_data_size = size; } else { if (! ue->tlv_data_size || ! ue->tlv_data) return -ENXIO; if (size < ue->tlv_data_size) return -ENOSPC; if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size)) return -EFAULT; } return change; } static int snd_ctl_elem_init_enum_names(struct user_element *ue) { char *names, *p; size_t buf_len, name_len; unsigned int i; const uintptr_t user_ptrval = ue->info.value.enumerated.names_ptr; if (ue->info.value.enumerated.names_length > 64 * 1024) return -EINVAL; names = memdup_user((const void __user *)user_ptrval, ue->info.value.enumerated.names_length); if (IS_ERR(names)) return PTR_ERR(names); /* check that there are enough valid names */ buf_len = ue->info.value.enumerated.names_length; p = names; for (i = 0; i < ue->info.value.enumerated.items; ++i) { name_len = strnlen(p, buf_len); if (name_len == 0 || name_len >= 64 || name_len == buf_len) { kfree(names); return -EINVAL; } p += name_len + 1; buf_len -= name_len + 1; } ue->priv_data = names; ue->info.value.enumerated.names_ptr = 0; return 0; } static void snd_ctl_elem_user_free(struct snd_kcontrol *kcontrol) { struct user_element *ue = kcontrol->private_data; kfree(ue->tlv_data); kfree(ue->priv_data); kfree(ue); } static int snd_ctl_elem_add(struct snd_ctl_file *file, struct snd_ctl_elem_info *info, int replace) { struct snd_card *card = file->card; struct snd_kcontrol kctl, *_kctl; unsigned int access; long private_size; struct user_element *ue; int idx, err; if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS) return -ENOMEM; if (info->count < 1) return -EINVAL; access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| SNDRV_CTL_ELEM_ACCESS_INACTIVE| SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE)); info->id.numid = 0; memset(&kctl, 0, sizeof(kctl)); down_write(&card->controls_rwsem); _kctl = snd_ctl_find_id(card, &info->id); err = 0; if (_kctl) { if (replace) err = snd_ctl_remove(card, _kctl); else err = -EBUSY; } else { if (replace) err = -ENOENT; } up_write(&card->controls_rwsem); if (err < 0) return err; memcpy(&kctl.id, &info->id, sizeof(info->id)); kctl.count = info->owner ? info->owner : 1; access |= SNDRV_CTL_ELEM_ACCESS_USER; if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) kctl.info = snd_ctl_elem_user_enum_info; else kctl.info = snd_ctl_elem_user_info; if (access & SNDRV_CTL_ELEM_ACCESS_READ) kctl.get = snd_ctl_elem_user_get; if (access & SNDRV_CTL_ELEM_ACCESS_WRITE) kctl.put = snd_ctl_elem_user_put; if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE) { kctl.tlv.c = snd_ctl_elem_user_tlv; access |= SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK; } switch (info->type) { case SNDRV_CTL_ELEM_TYPE_BOOLEAN: case SNDRV_CTL_ELEM_TYPE_INTEGER: private_size = sizeof(long); if (info->count > 128) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_INTEGER64: private_size = sizeof(long long); if (info->count > 64) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_ENUMERATED: private_size = sizeof(unsigned int); if (info->count > 128 || info->value.enumerated.items == 0) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_BYTES: private_size = sizeof(unsigned char); if (info->count > 512) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_IEC958: private_size = sizeof(struct snd_aes_iec958); if (info->count != 1) return -EINVAL; break; default: return -EINVAL; } private_size *= info->count; ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL); if (ue == NULL) return -ENOMEM; ue->info = *info; ue->info.access = 0; ue->elem_data = (char *)ue + sizeof(*ue); ue->elem_data_size = private_size; if (ue->info.type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) { err = snd_ctl_elem_init_enum_names(ue); if (err < 0) { kfree(ue); return err; } } kctl.private_free = snd_ctl_elem_user_free; _kctl = snd_ctl_new(&kctl, access); if (_kctl == NULL) { kfree(ue->priv_data); kfree(ue); return -ENOMEM; } _kctl->private_data = ue; for (idx = 0; idx < _kctl->count; idx++) _kctl->vd[idx].owner = file; err = snd_ctl_add(card, _kctl); if (err < 0) return err; down_write(&card->controls_rwsem); card->user_ctl_count++; up_write(&card->controls_rwsem); return 0; } static int snd_ctl_elem_add_user(struct snd_ctl_file *file, struct snd_ctl_elem_info __user *_info, int replace) { struct snd_ctl_elem_info info; if (copy_from_user(&info, _info, sizeof(info))) return -EFAULT; return snd_ctl_elem_add(file, &info, replace); } static int snd_ctl_elem_remove(struct snd_ctl_file *file, struct snd_ctl_elem_id __user *_id) { struct snd_ctl_elem_id id; if (copy_from_user(&id, _id, sizeof(id))) return -EFAULT; return snd_ctl_remove_user_ctl(file, &id); } static int snd_ctl_subscribe_events(struct snd_ctl_file *file, int __user *ptr) { int subscribe; if (get_user(subscribe, ptr)) return -EFAULT; if (subscribe < 0) { subscribe = file->subscribed; if (put_user(subscribe, ptr)) return -EFAULT; return 0; } if (subscribe) { file->subscribed = 1; return 0; } else if (file->subscribed) { snd_ctl_empty_read_queue(file); file->subscribed = 0; } return 0; } static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file, struct snd_ctl_tlv __user *_tlv, int op_flag) { struct snd_card *card = file->card; struct snd_ctl_tlv tlv; struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int len; int err = 0; if (copy_from_user(&tlv, _tlv, sizeof(tlv))) return -EFAULT; if (tlv.length < sizeof(unsigned int) * 2) return -EINVAL; down_read(&card->controls_rwsem); kctl = snd_ctl_find_numid(card, tlv.numid); if (kctl == NULL) { err = -ENOENT; goto __kctl_end; } if (kctl->tlv.p == NULL) { err = -ENXIO; goto __kctl_end; } vd = &kctl->vd[tlv.numid - kctl->id.numid]; if ((op_flag == 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ) == 0) || (op_flag > 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) == 0) || (op_flag < 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND) == 0)) { err = -ENXIO; goto __kctl_end; } if (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { if (vd->owner != NULL && vd->owner != file) { err = -EPERM; goto __kctl_end; } err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv); if (err > 0) { up_read(&card->controls_rwsem); snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &kctl->id); return 0; } } else { if (op_flag) { err = -ENXIO; goto __kctl_end; } len = kctl->tlv.p[1] + 2 * sizeof(unsigned int); if (tlv.length < len) { err = -ENOMEM; goto __kctl_end; } if (copy_to_user(_tlv->tlv, kctl->tlv.p, len)) err = -EFAULT; } __kctl_end: up_read(&card->controls_rwsem); return err; } static long snd_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_ctl_file *ctl; struct snd_card *card; struct snd_kctl_ioctl *p; void __user *argp = (void __user *)arg; int __user *ip = argp; int err; ctl = file->private_data; card = ctl->card; if (snd_BUG_ON(!card)) return -ENXIO; switch (cmd) { case SNDRV_CTL_IOCTL_PVERSION: return put_user(SNDRV_CTL_VERSION, ip) ? -EFAULT : 0; case SNDRV_CTL_IOCTL_CARD_INFO: return snd_ctl_card_info(card, ctl, cmd, argp); case SNDRV_CTL_IOCTL_ELEM_LIST: return snd_ctl_elem_list(card, argp); case SNDRV_CTL_IOCTL_ELEM_INFO: return snd_ctl_elem_info_user(ctl, argp); case SNDRV_CTL_IOCTL_ELEM_READ: return snd_ctl_elem_read_user(card, argp); case SNDRV_CTL_IOCTL_ELEM_WRITE: return snd_ctl_elem_write_user(ctl, argp); case SNDRV_CTL_IOCTL_ELEM_LOCK: return snd_ctl_elem_lock(ctl, argp); case SNDRV_CTL_IOCTL_ELEM_UNLOCK: return snd_ctl_elem_unlock(ctl, argp); case SNDRV_CTL_IOCTL_ELEM_ADD: return snd_ctl_elem_add_user(ctl, argp, 0); case SNDRV_CTL_IOCTL_ELEM_REPLACE: return snd_ctl_elem_add_user(ctl, argp, 1); case SNDRV_CTL_IOCTL_ELEM_REMOVE: return snd_ctl_elem_remove(ctl, argp); case SNDRV_CTL_IOCTL_SUBSCRIBE_EVENTS: return snd_ctl_subscribe_events(ctl, ip); case SNDRV_CTL_IOCTL_TLV_READ: return snd_ctl_tlv_ioctl(ctl, argp, 0); case SNDRV_CTL_IOCTL_TLV_WRITE: return snd_ctl_tlv_ioctl(ctl, argp, 1); case SNDRV_CTL_IOCTL_TLV_COMMAND: return snd_ctl_tlv_ioctl(ctl, argp, -1); case SNDRV_CTL_IOCTL_POWER: return -ENOPROTOOPT; case SNDRV_CTL_IOCTL_POWER_STATE: #ifdef CONFIG_PM return put_user(card->power_state, ip) ? -EFAULT : 0; #else return put_user(SNDRV_CTL_POWER_D0, ip) ? -EFAULT : 0; #endif } down_read(&snd_ioctl_rwsem); list_for_each_entry(p, &snd_control_ioctls, list) { err = p->fioctl(card, ctl, cmd, arg); if (err != -ENOIOCTLCMD) { up_read(&snd_ioctl_rwsem); return err; } } up_read(&snd_ioctl_rwsem); dev_dbg(card->dev, "unknown ioctl = 0x%x\n", cmd); return -ENOTTY; } static ssize_t snd_ctl_read(struct file *file, char __user *buffer, size_t count, loff_t * offset) { struct snd_ctl_file *ctl; int err = 0; ssize_t result = 0; ctl = file->private_data; if (snd_BUG_ON(!ctl || !ctl->card)) return -ENXIO; if (!ctl->subscribed) return -EBADFD; if (count < sizeof(struct snd_ctl_event)) return -EINVAL; spin_lock_irq(&ctl->read_lock); while (count >= sizeof(struct snd_ctl_event)) { struct snd_ctl_event ev; struct snd_kctl_event *kev; while (list_empty(&ctl->events)) { wait_queue_t wait; if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { err = -EAGAIN; goto __end_lock; } init_waitqueue_entry(&wait, current); add_wait_queue(&ctl->change_sleep, &wait); set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irq(&ctl->read_lock); schedule(); remove_wait_queue(&ctl->change_sleep, &wait); if (ctl->card->shutdown) return -ENODEV; if (signal_pending(current)) return -ERESTARTSYS; spin_lock_irq(&ctl->read_lock); } kev = snd_kctl_event(ctl->events.next); ev.type = SNDRV_CTL_EVENT_ELEM; ev.data.elem.mask = kev->mask; ev.data.elem.id = kev->id; list_del(&kev->list); spin_unlock_irq(&ctl->read_lock); kfree(kev); if (copy_to_user(buffer, &ev, sizeof(struct snd_ctl_event))) { err = -EFAULT; goto __end; } spin_lock_irq(&ctl->read_lock); buffer += sizeof(struct snd_ctl_event); count -= sizeof(struct snd_ctl_event); result += sizeof(struct snd_ctl_event); } __end_lock: spin_unlock_irq(&ctl->read_lock); __end: return result > 0 ? result : err; } static unsigned int snd_ctl_poll(struct file *file, poll_table * wait) { unsigned int mask; struct snd_ctl_file *ctl; ctl = file->private_data; if (!ctl->subscribed) return 0; poll_wait(file, &ctl->change_sleep, wait); mask = 0; if (!list_empty(&ctl->events)) mask |= POLLIN | POLLRDNORM; return mask; } /* * register the device-specific control-ioctls. * called from each device manager like pcm.c, hwdep.c, etc. */ static int _snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn, struct list_head *lists) { struct snd_kctl_ioctl *pn; pn = kzalloc(sizeof(struct snd_kctl_ioctl), GFP_KERNEL); if (pn == NULL) return -ENOMEM; pn->fioctl = fcn; down_write(&snd_ioctl_rwsem); list_add_tail(&pn->list, lists); up_write(&snd_ioctl_rwsem); return 0; } int snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn) { return _snd_ctl_register_ioctl(fcn, &snd_control_ioctls); } EXPORT_SYMBOL(snd_ctl_register_ioctl); #ifdef CONFIG_COMPAT int snd_ctl_register_ioctl_compat(snd_kctl_ioctl_func_t fcn) { return _snd_ctl_register_ioctl(fcn, &snd_control_compat_ioctls); } EXPORT_SYMBOL(snd_ctl_register_ioctl_compat); #endif /* * de-register the device-specific control-ioctls. */ static int _snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn, struct list_head *lists) { struct snd_kctl_ioctl *p; if (snd_BUG_ON(!fcn)) return -EINVAL; down_write(&snd_ioctl_rwsem); list_for_each_entry(p, lists, list) { if (p->fioctl == fcn) { list_del(&p->list); up_write(&snd_ioctl_rwsem); kfree(p); return 0; } } up_write(&snd_ioctl_rwsem); snd_BUG(); return -EINVAL; } int snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn) { return _snd_ctl_unregister_ioctl(fcn, &snd_control_ioctls); } EXPORT_SYMBOL(snd_ctl_unregister_ioctl); #ifdef CONFIG_COMPAT int snd_ctl_unregister_ioctl_compat(snd_kctl_ioctl_func_t fcn) { return _snd_ctl_unregister_ioctl(fcn, &snd_control_compat_ioctls); } EXPORT_SYMBOL(snd_ctl_unregister_ioctl_compat); #endif static int snd_ctl_fasync(int fd, struct file * file, int on) { struct snd_ctl_file *ctl; ctl = file->private_data; return fasync_helper(fd, file, on, &ctl->fasync); } /* * ioctl32 compat */ #ifdef CONFIG_COMPAT #include "control_compat.c" #else #define snd_ctl_ioctl_compat NULL #endif /* * INIT PART */ static const struct file_operations snd_ctl_f_ops = { .owner = THIS_MODULE, .read = snd_ctl_read, .open = snd_ctl_open, .release = snd_ctl_release, .llseek = no_llseek, .poll = snd_ctl_poll, .unlocked_ioctl = snd_ctl_ioctl, .compat_ioctl = snd_ctl_ioctl_compat, .fasync = snd_ctl_fasync, }; /* * registration of the control device */ static int snd_ctl_dev_register(struct snd_device *device) { struct snd_card *card = device->device_data; int err, cardnum; char name[16]; if (snd_BUG_ON(!card)) return -ENXIO; cardnum = card->number; if (snd_BUG_ON(cardnum < 0 || cardnum >= SNDRV_CARDS)) return -ENXIO; sprintf(name, "controlC%i", cardnum); if ((err = snd_register_device(SNDRV_DEVICE_TYPE_CONTROL, card, -1, &snd_ctl_f_ops, card, name)) < 0) return err; return 0; } /* * disconnection of the control device */ static int snd_ctl_dev_disconnect(struct snd_device *device) { struct snd_card *card = device->device_data; struct snd_ctl_file *ctl; int err, cardnum; if (snd_BUG_ON(!card)) return -ENXIO; cardnum = card->number; if (snd_BUG_ON(cardnum < 0 || cardnum >= SNDRV_CARDS)) return -ENXIO; read_lock(&card->ctl_files_rwlock); list_for_each_entry(ctl, &card->ctl_files, list) { wake_up(&ctl->change_sleep); kill_fasync(&ctl->fasync, SIGIO, POLL_ERR); } read_unlock(&card->ctl_files_rwlock); if ((err = snd_unregister_device(SNDRV_DEVICE_TYPE_CONTROL, card, -1)) < 0) return err; return 0; } /* * free all controls */ static int snd_ctl_dev_free(struct snd_device *device) { struct snd_card *card = device->device_data; struct snd_kcontrol *control; down_write(&card->controls_rwsem); while (!list_empty(&card->controls)) { control = snd_kcontrol(card->controls.next); snd_ctl_remove(card, control); } up_write(&card->controls_rwsem); return 0; } /* * create control core: * called from init.c */ int snd_ctl_create(struct snd_card *card) { static struct snd_device_ops ops = { .dev_free = snd_ctl_dev_free, .dev_register = snd_ctl_dev_register, .dev_disconnect = snd_ctl_dev_disconnect, }; if (snd_BUG_ON(!card)) return -ENXIO; return snd_device_new(card, SNDRV_DEV_CONTROL, card, &ops); } /* * Frequently used control callbacks/helpers */ int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } EXPORT_SYMBOL(snd_ctl_boolean_mono_info); int snd_ctl_boolean_stereo_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } EXPORT_SYMBOL(snd_ctl_boolean_stereo_info); /** * snd_ctl_enum_info - fills the info structure for an enumerated control * @info: the structure to be filled * @channels: the number of the control's channels; often one * @items: the number of control values; also the size of @names * @names: an array containing the names of all control values * * Sets all required fields in @info to their appropriate values. * If the control's accessibility is not the default (readable and writable), * the caller has to fill @info->access. * * Return: Zero. */ int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels, unsigned int items, const char *const names[]) { info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; info->count = channels; info->value.enumerated.items = items; if (info->value.enumerated.item >= items) info->value.enumerated.item = items - 1; strlcpy(info->value.enumerated.name, names[info->value.enumerated.item], sizeof(info->value.enumerated.name)); return 0; } EXPORT_SYMBOL(snd_ctl_enum_info);
./CrossVul/dataset_final_sorted/CWE-362/c/bad_2215_1
crossvul-cpp_data_bad_2215_2
/* * Initialization routines * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/device.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/ctype.h> #include <linux/pm.h> #include <linux/completion.h> #include <sound/core.h> #include <sound/control.h> #include <sound/info.h> /* monitor files for graceful shutdown (hotplug) */ struct snd_monitor_file { struct file *file; const struct file_operations *disconnected_f_op; struct list_head shutdown_list; /* still need to shutdown */ struct list_head list; /* link of monitor files */ }; static DEFINE_SPINLOCK(shutdown_lock); static LIST_HEAD(shutdown_files); static const struct file_operations snd_shutdown_f_ops; /* locked for registering/using */ static DECLARE_BITMAP(snd_cards_lock, SNDRV_CARDS); struct snd_card *snd_cards[SNDRV_CARDS]; EXPORT_SYMBOL(snd_cards); static DEFINE_MUTEX(snd_card_mutex); static char *slots[SNDRV_CARDS]; module_param_array(slots, charp, NULL, 0444); MODULE_PARM_DESC(slots, "Module names assigned to the slots."); /* return non-zero if the given index is reserved for the given * module via slots option */ static int module_slot_match(struct module *module, int idx) { int match = 1; #ifdef MODULE const char *s1, *s2; if (!module || !*module->name || !slots[idx]) return 0; s1 = module->name; s2 = slots[idx]; if (*s2 == '!') { match = 0; /* negative match */ s2++; } /* compare module name strings * hyphens are handled as equivalent with underscore */ for (;;) { char c1 = *s1++; char c2 = *s2++; if (c1 == '-') c1 = '_'; if (c2 == '-') c2 = '_'; if (c1 != c2) return !match; if (!c1) break; } #endif /* MODULE */ return match; } #if IS_ENABLED(CONFIG_SND_MIXER_OSS) int (*snd_mixer_oss_notify_callback)(struct snd_card *card, int free_flag); EXPORT_SYMBOL(snd_mixer_oss_notify_callback); #endif #ifdef CONFIG_PROC_FS static void snd_card_id_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { snd_iprintf(buffer, "%s\n", entry->card->id); } static inline int init_info_for_card(struct snd_card *card) { int err; struct snd_info_entry *entry; if ((err = snd_info_card_register(card)) < 0) { dev_dbg(card->dev, "unable to create card info\n"); return err; } if ((entry = snd_info_create_card_entry(card, "id", card->proc_root)) == NULL) { dev_dbg(card->dev, "unable to create card entry\n"); return err; } entry->c.text.read = snd_card_id_read; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } card->proc_id = entry; return 0; } #else /* !CONFIG_PROC_FS */ #define init_info_for_card(card) #endif static int check_empty_slot(struct module *module, int slot) { return !slots[slot] || !*slots[slot]; } /* return an empty slot number (>= 0) found in the given bitmask @mask. * @mask == -1 == 0xffffffff means: take any free slot up to 32 * when no slot is available, return the original @mask as is. */ static int get_slot_from_bitmask(int mask, int (*check)(struct module *, int), struct module *module) { int slot; for (slot = 0; slot < SNDRV_CARDS; slot++) { if (slot < 32 && !(mask & (1U << slot))) continue; if (!test_bit(slot, snd_cards_lock)) { if (check(module, slot)) return slot; /* found */ } } return mask; /* unchanged */ } static int snd_card_do_free(struct snd_card *card); static const struct attribute_group *card_dev_attr_groups[]; static void release_card_device(struct device *dev) { snd_card_do_free(dev_to_snd_card(dev)); } /** * snd_card_new - create and initialize a soundcard structure * @parent: the parent device object * @idx: card index (address) [0 ... (SNDRV_CARDS-1)] * @xid: card identification (ASCII string) * @module: top level module for locking * @extra_size: allocate this extra size after the main soundcard structure * @card_ret: the pointer to store the created card instance * * Creates and initializes a soundcard structure. * * The function allocates snd_card instance via kzalloc with the given * space for the driver to use freely. The allocated struct is stored * in the given card_ret pointer. * * Return: Zero if successful or a negative error code. */ int snd_card_new(struct device *parent, int idx, const char *xid, struct module *module, int extra_size, struct snd_card **card_ret) { struct snd_card *card; int err; if (snd_BUG_ON(!card_ret)) return -EINVAL; *card_ret = NULL; if (extra_size < 0) extra_size = 0; card = kzalloc(sizeof(*card) + extra_size, GFP_KERNEL); if (!card) return -ENOMEM; if (extra_size > 0) card->private_data = (char *)card + sizeof(struct snd_card); if (xid) strlcpy(card->id, xid, sizeof(card->id)); err = 0; mutex_lock(&snd_card_mutex); if (idx < 0) /* first check the matching module-name slot */ idx = get_slot_from_bitmask(idx, module_slot_match, module); if (idx < 0) /* if not matched, assign an empty slot */ idx = get_slot_from_bitmask(idx, check_empty_slot, module); if (idx < 0) err = -ENODEV; else if (idx < snd_ecards_limit) { if (test_bit(idx, snd_cards_lock)) err = -EBUSY; /* invalid */ } else if (idx >= SNDRV_CARDS) err = -ENODEV; if (err < 0) { mutex_unlock(&snd_card_mutex); dev_err(parent, "cannot find the slot for index %d (range 0-%i), error: %d\n", idx, snd_ecards_limit - 1, err); kfree(card); return err; } set_bit(idx, snd_cards_lock); /* lock it */ if (idx >= snd_ecards_limit) snd_ecards_limit = idx + 1; /* increase the limit */ mutex_unlock(&snd_card_mutex); card->dev = parent; card->number = idx; card->module = module; INIT_LIST_HEAD(&card->devices); init_rwsem(&card->controls_rwsem); rwlock_init(&card->ctl_files_rwlock); INIT_LIST_HEAD(&card->controls); INIT_LIST_HEAD(&card->ctl_files); spin_lock_init(&card->files_lock); INIT_LIST_HEAD(&card->files_list); #ifdef CONFIG_PM mutex_init(&card->power_lock); init_waitqueue_head(&card->power_sleep); #endif device_initialize(&card->card_dev); card->card_dev.parent = parent; card->card_dev.class = sound_class; card->card_dev.release = release_card_device; card->card_dev.groups = card_dev_attr_groups; err = kobject_set_name(&card->card_dev.kobj, "card%d", idx); if (err < 0) goto __error; /* the control interface cannot be accessed from the user space until */ /* snd_cards_bitmask and snd_cards are set with snd_card_register */ err = snd_ctl_create(card); if (err < 0) { dev_err(parent, "unable to register control minors\n"); goto __error; } err = snd_info_card_create(card); if (err < 0) { dev_err(parent, "unable to create card info\n"); goto __error_ctl; } *card_ret = card; return 0; __error_ctl: snd_device_free_all(card); __error: put_device(&card->card_dev); return err; } EXPORT_SYMBOL(snd_card_new); /* return non-zero if a card is already locked */ int snd_card_locked(int card) { int locked; mutex_lock(&snd_card_mutex); locked = test_bit(card, snd_cards_lock); mutex_unlock(&snd_card_mutex); return locked; } static loff_t snd_disconnect_llseek(struct file *file, loff_t offset, int orig) { return -ENODEV; } static ssize_t snd_disconnect_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { return -ENODEV; } static ssize_t snd_disconnect_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { return -ENODEV; } static int snd_disconnect_release(struct inode *inode, struct file *file) { struct snd_monitor_file *df = NULL, *_df; spin_lock(&shutdown_lock); list_for_each_entry(_df, &shutdown_files, shutdown_list) { if (_df->file == file) { df = _df; list_del_init(&df->shutdown_list); break; } } spin_unlock(&shutdown_lock); if (likely(df)) { if ((file->f_flags & FASYNC) && df->disconnected_f_op->fasync) df->disconnected_f_op->fasync(-1, file, 0); return df->disconnected_f_op->release(inode, file); } panic("%s(%p, %p) failed!", __func__, inode, file); } static unsigned int snd_disconnect_poll(struct file * file, poll_table * wait) { return POLLERR | POLLNVAL; } static long snd_disconnect_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return -ENODEV; } static int snd_disconnect_mmap(struct file *file, struct vm_area_struct *vma) { return -ENODEV; } static int snd_disconnect_fasync(int fd, struct file *file, int on) { return -ENODEV; } static const struct file_operations snd_shutdown_f_ops = { .owner = THIS_MODULE, .llseek = snd_disconnect_llseek, .read = snd_disconnect_read, .write = snd_disconnect_write, .release = snd_disconnect_release, .poll = snd_disconnect_poll, .unlocked_ioctl = snd_disconnect_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = snd_disconnect_ioctl, #endif .mmap = snd_disconnect_mmap, .fasync = snd_disconnect_fasync }; /** * snd_card_disconnect - disconnect all APIs from the file-operations (user space) * @card: soundcard structure * * Disconnects all APIs from the file-operations (user space). * * Return: Zero, otherwise a negative error code. * * Note: The current implementation replaces all active file->f_op with special * dummy file operations (they do nothing except release). */ int snd_card_disconnect(struct snd_card *card) { struct snd_monitor_file *mfile; int err; if (!card) return -EINVAL; spin_lock(&card->files_lock); if (card->shutdown) { spin_unlock(&card->files_lock); return 0; } card->shutdown = 1; spin_unlock(&card->files_lock); /* phase 1: disable fops (user space) operations for ALSA API */ mutex_lock(&snd_card_mutex); snd_cards[card->number] = NULL; clear_bit(card->number, snd_cards_lock); mutex_unlock(&snd_card_mutex); /* phase 2: replace file->f_op with special dummy operations */ spin_lock(&card->files_lock); list_for_each_entry(mfile, &card->files_list, list) { /* it's critical part, use endless loop */ /* we have no room to fail */ mfile->disconnected_f_op = mfile->file->f_op; spin_lock(&shutdown_lock); list_add(&mfile->shutdown_list, &shutdown_files); spin_unlock(&shutdown_lock); mfile->file->f_op = &snd_shutdown_f_ops; fops_get(mfile->file->f_op); } spin_unlock(&card->files_lock); /* phase 3: notify all connected devices about disconnection */ /* at this point, they cannot respond to any calls except release() */ #if IS_ENABLED(CONFIG_SND_MIXER_OSS) if (snd_mixer_oss_notify_callback) snd_mixer_oss_notify_callback(card, SND_MIXER_OSS_NOTIFY_DISCONNECT); #endif /* notify all devices that we are disconnected */ err = snd_device_disconnect_all(card); if (err < 0) dev_err(card->dev, "not all devices for card %i can be disconnected\n", card->number); snd_info_card_disconnect(card); if (card->registered) { device_del(&card->card_dev); card->registered = false; } #ifdef CONFIG_PM wake_up(&card->power_sleep); #endif return 0; } EXPORT_SYMBOL(snd_card_disconnect); /** * snd_card_free - frees given soundcard structure * @card: soundcard structure * * This function releases the soundcard structure and the all assigned * devices automatically. That is, you don't have to release the devices * by yourself. * * Return: Zero. Frees all associated devices and frees the control * interface associated to given soundcard. */ static int snd_card_do_free(struct snd_card *card) { #if IS_ENABLED(CONFIG_SND_MIXER_OSS) if (snd_mixer_oss_notify_callback) snd_mixer_oss_notify_callback(card, SND_MIXER_OSS_NOTIFY_FREE); #endif snd_device_free_all(card); if (card->private_free) card->private_free(card); snd_info_free_entry(card->proc_id); if (snd_info_card_free(card) < 0) { dev_warn(card->dev, "unable to free card info\n"); /* Not fatal error */ } if (card->release_completion) complete(card->release_completion); kfree(card); return 0; } int snd_card_free_when_closed(struct snd_card *card) { int ret = snd_card_disconnect(card); if (ret) return ret; put_device(&card->card_dev); return 0; } EXPORT_SYMBOL(snd_card_free_when_closed); int snd_card_free(struct snd_card *card) { struct completion released; int ret; init_completion(&released); card->release_completion = &released; ret = snd_card_free_when_closed(card); if (ret) return ret; /* wait, until all devices are ready for the free operation */ wait_for_completion(&released); return 0; } EXPORT_SYMBOL(snd_card_free); /* retrieve the last word of shortname or longname */ static const char *retrieve_id_from_card_name(const char *name) { const char *spos = name; while (*name) { if (isspace(*name) && isalnum(name[1])) spos = name + 1; name++; } return spos; } /* return true if the given id string doesn't conflict any other card ids */ static bool card_id_ok(struct snd_card *card, const char *id) { int i; if (!snd_info_check_reserved_words(id)) return false; for (i = 0; i < snd_ecards_limit; i++) { if (snd_cards[i] && snd_cards[i] != card && !strcmp(snd_cards[i]->id, id)) return false; } return true; } /* copy to card->id only with valid letters from nid */ static void copy_valid_id_string(struct snd_card *card, const char *src, const char *nid) { char *id = card->id; while (*nid && !isalnum(*nid)) nid++; if (isdigit(*nid)) *id++ = isalpha(*src) ? *src : 'D'; while (*nid && (size_t)(id - card->id) < sizeof(card->id) - 1) { if (isalnum(*nid)) *id++ = *nid; nid++; } *id = 0; } /* Set card->id from the given string * If the string conflicts with other ids, add a suffix to make it unique. */ static void snd_card_set_id_no_lock(struct snd_card *card, const char *src, const char *nid) { int len, loops; bool is_default = false; char *id; copy_valid_id_string(card, src, nid); id = card->id; again: /* use "Default" for obviously invalid strings * ("card" conflicts with proc directories) */ if (!*id || !strncmp(id, "card", 4)) { strcpy(id, "Default"); is_default = true; } len = strlen(id); for (loops = 0; loops < SNDRV_CARDS; loops++) { char *spos; char sfxstr[5]; /* "_012" */ int sfxlen; if (card_id_ok(card, id)) return; /* OK */ /* Add _XYZ suffix */ sprintf(sfxstr, "_%X", loops + 1); sfxlen = strlen(sfxstr); if (len + sfxlen >= sizeof(card->id)) spos = id + sizeof(card->id) - sfxlen - 1; else spos = id + len; strcpy(spos, sfxstr); } /* fallback to the default id */ if (!is_default) { *id = 0; goto again; } /* last resort... */ dev_err(card->dev, "unable to set card id (%s)\n", id); if (card->proc_root->name) strlcpy(card->id, card->proc_root->name, sizeof(card->id)); } /** * snd_card_set_id - set card identification name * @card: soundcard structure * @nid: new identification string * * This function sets the card identification and checks for name * collisions. */ void snd_card_set_id(struct snd_card *card, const char *nid) { /* check if user specified own card->id */ if (card->id[0] != '\0') return; mutex_lock(&snd_card_mutex); snd_card_set_id_no_lock(card, nid, nid); mutex_unlock(&snd_card_mutex); } EXPORT_SYMBOL(snd_card_set_id); static ssize_t card_id_show_attr(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_card *card = container_of(dev, struct snd_card, card_dev); return snprintf(buf, PAGE_SIZE, "%s\n", card->id); } static ssize_t card_id_store_attr(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct snd_card *card = container_of(dev, struct snd_card, card_dev); char buf1[sizeof(card->id)]; size_t copy = count > sizeof(card->id) - 1 ? sizeof(card->id) - 1 : count; size_t idx; int c; for (idx = 0; idx < copy; idx++) { c = buf[idx]; if (!isalnum(c) && c != '_' && c != '-') return -EINVAL; } memcpy(buf1, buf, copy); buf1[copy] = '\0'; mutex_lock(&snd_card_mutex); if (!card_id_ok(NULL, buf1)) { mutex_unlock(&snd_card_mutex); return -EEXIST; } strcpy(card->id, buf1); snd_info_card_id_change(card); mutex_unlock(&snd_card_mutex); return count; } static DEVICE_ATTR(id, S_IRUGO | S_IWUSR, card_id_show_attr, card_id_store_attr); static ssize_t card_number_show_attr(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_card *card = container_of(dev, struct snd_card, card_dev); return snprintf(buf, PAGE_SIZE, "%i\n", card->number); } static DEVICE_ATTR(number, S_IRUGO, card_number_show_attr, NULL); static struct attribute *card_dev_attrs[] = { &dev_attr_id.attr, &dev_attr_number.attr, NULL }; static struct attribute_group card_dev_attr_group = { .attrs = card_dev_attrs, }; static const struct attribute_group *card_dev_attr_groups[] = { &card_dev_attr_group, NULL }; /** * snd_card_register - register the soundcard * @card: soundcard structure * * This function registers all the devices assigned to the soundcard. * Until calling this, the ALSA control interface is blocked from the * external accesses. Thus, you should call this function at the end * of the initialization of the card. * * Return: Zero otherwise a negative error code if the registration failed. */ int snd_card_register(struct snd_card *card) { int err; if (snd_BUG_ON(!card)) return -EINVAL; if (!card->registered) { err = device_add(&card->card_dev); if (err < 0) return err; card->registered = true; } if ((err = snd_device_register_all(card)) < 0) return err; mutex_lock(&snd_card_mutex); if (snd_cards[card->number]) { /* already registered */ mutex_unlock(&snd_card_mutex); return 0; } if (*card->id) { /* make a unique id name from the given string */ char tmpid[sizeof(card->id)]; memcpy(tmpid, card->id, sizeof(card->id)); snd_card_set_id_no_lock(card, tmpid, tmpid); } else { /* create an id from either shortname or longname */ const char *src; src = *card->shortname ? card->shortname : card->longname; snd_card_set_id_no_lock(card, src, retrieve_id_from_card_name(src)); } snd_cards[card->number] = card; mutex_unlock(&snd_card_mutex); init_info_for_card(card); #if IS_ENABLED(CONFIG_SND_MIXER_OSS) if (snd_mixer_oss_notify_callback) snd_mixer_oss_notify_callback(card, SND_MIXER_OSS_NOTIFY_REGISTER); #endif return 0; } EXPORT_SYMBOL(snd_card_register); #ifdef CONFIG_PROC_FS static struct snd_info_entry *snd_card_info_entry; static void snd_card_info_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { int idx, count; struct snd_card *card; for (idx = count = 0; idx < SNDRV_CARDS; idx++) { mutex_lock(&snd_card_mutex); if ((card = snd_cards[idx]) != NULL) { count++; snd_iprintf(buffer, "%2i [%-15s]: %s - %s\n", idx, card->id, card->driver, card->shortname); snd_iprintf(buffer, " %s\n", card->longname); } mutex_unlock(&snd_card_mutex); } if (!count) snd_iprintf(buffer, "--- no soundcards ---\n"); } #ifdef CONFIG_SND_OSSEMUL void snd_card_info_read_oss(struct snd_info_buffer *buffer) { int idx, count; struct snd_card *card; for (idx = count = 0; idx < SNDRV_CARDS; idx++) { mutex_lock(&snd_card_mutex); if ((card = snd_cards[idx]) != NULL) { count++; snd_iprintf(buffer, "%s\n", card->longname); } mutex_unlock(&snd_card_mutex); } if (!count) { snd_iprintf(buffer, "--- no soundcards ---\n"); } } #endif #ifdef MODULE static struct snd_info_entry *snd_card_module_info_entry; static void snd_card_module_info_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { int idx; struct snd_card *card; for (idx = 0; idx < SNDRV_CARDS; idx++) { mutex_lock(&snd_card_mutex); if ((card = snd_cards[idx]) != NULL) snd_iprintf(buffer, "%2i %s\n", idx, card->module->name); mutex_unlock(&snd_card_mutex); } } #endif int __init snd_card_info_init(void) { struct snd_info_entry *entry; entry = snd_info_create_module_entry(THIS_MODULE, "cards", NULL); if (! entry) return -ENOMEM; entry->c.text.read = snd_card_info_read; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); return -ENOMEM; } snd_card_info_entry = entry; #ifdef MODULE entry = snd_info_create_module_entry(THIS_MODULE, "modules", NULL); if (entry) { entry->c.text.read = snd_card_module_info_read; if (snd_info_register(entry) < 0) snd_info_free_entry(entry); else snd_card_module_info_entry = entry; } #endif return 0; } int __exit snd_card_info_done(void) { snd_info_free_entry(snd_card_info_entry); #ifdef MODULE snd_info_free_entry(snd_card_module_info_entry); #endif return 0; } #endif /* CONFIG_PROC_FS */ /** * snd_component_add - add a component string * @card: soundcard structure * @component: the component id string * * This function adds the component id string to the supported list. * The component can be referred from the alsa-lib. * * Return: Zero otherwise a negative error code. */ int snd_component_add(struct snd_card *card, const char *component) { char *ptr; int len = strlen(component); ptr = strstr(card->components, component); if (ptr != NULL) { if (ptr[len] == '\0' || ptr[len] == ' ') /* already there */ return 1; } if (strlen(card->components) + 1 + len + 1 > sizeof(card->components)) { snd_BUG(); return -ENOMEM; } if (card->components[0] != '\0') strcat(card->components, " "); strcat(card->components, component); return 0; } EXPORT_SYMBOL(snd_component_add); /** * snd_card_file_add - add the file to the file list of the card * @card: soundcard structure * @file: file pointer * * This function adds the file to the file linked-list of the card. * This linked-list is used to keep tracking the connection state, * and to avoid the release of busy resources by hotplug. * * Return: zero or a negative error code. */ int snd_card_file_add(struct snd_card *card, struct file *file) { struct snd_monitor_file *mfile; mfile = kmalloc(sizeof(*mfile), GFP_KERNEL); if (mfile == NULL) return -ENOMEM; mfile->file = file; mfile->disconnected_f_op = NULL; INIT_LIST_HEAD(&mfile->shutdown_list); spin_lock(&card->files_lock); if (card->shutdown) { spin_unlock(&card->files_lock); kfree(mfile); return -ENODEV; } list_add(&mfile->list, &card->files_list); get_device(&card->card_dev); spin_unlock(&card->files_lock); return 0; } EXPORT_SYMBOL(snd_card_file_add); /** * snd_card_file_remove - remove the file from the file list * @card: soundcard structure * @file: file pointer * * This function removes the file formerly added to the card via * snd_card_file_add() function. * If all files are removed and snd_card_free_when_closed() was * called beforehand, it processes the pending release of * resources. * * Return: Zero or a negative error code. */ int snd_card_file_remove(struct snd_card *card, struct file *file) { struct snd_monitor_file *mfile, *found = NULL; spin_lock(&card->files_lock); list_for_each_entry(mfile, &card->files_list, list) { if (mfile->file == file) { list_del(&mfile->list); spin_lock(&shutdown_lock); list_del(&mfile->shutdown_list); spin_unlock(&shutdown_lock); if (mfile->disconnected_f_op) fops_put(mfile->disconnected_f_op); found = mfile; break; } } spin_unlock(&card->files_lock); if (!found) { dev_err(card->dev, "card file remove problem (%p)\n", file); return -ENOENT; } kfree(found); put_device(&card->card_dev); return 0; } EXPORT_SYMBOL(snd_card_file_remove); #ifdef CONFIG_PM /** * snd_power_wait - wait until the power-state is changed. * @card: soundcard structure * @power_state: expected power state * * Waits until the power-state is changed. * * Return: Zero if successful, or a negative error code. * * Note: the power lock must be active before call. */ int snd_power_wait(struct snd_card *card, unsigned int power_state) { wait_queue_t wait; int result = 0; /* fastpath */ if (snd_power_get_state(card) == power_state) return 0; init_waitqueue_entry(&wait, current); add_wait_queue(&card->power_sleep, &wait); while (1) { if (card->shutdown) { result = -ENODEV; break; } if (snd_power_get_state(card) == power_state) break; set_current_state(TASK_UNINTERRUPTIBLE); snd_power_unlock(card); schedule_timeout(30 * HZ); snd_power_lock(card); } remove_wait_queue(&card->power_sleep, &wait); return result; } EXPORT_SYMBOL(snd_power_wait); #endif /* CONFIG_PM */
./CrossVul/dataset_final_sorted/CWE-362/c/bad_2215_2
crossvul-cpp_data_bad_829_4
/* * mm/mmap.c * * Written by obz. * * Address space accounting code <alan@lxorguk.ukuu.org.uk> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/backing-dev.h> #include <linux/mm.h> #include <linux/vmacache.h> #include <linux/shm.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/syscalls.h> #include <linux/capability.h> #include <linux/init.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/personality.h> #include <linux/security.h> #include <linux/hugetlb.h> #include <linux/shmem_fs.h> #include <linux/profile.h> #include <linux/export.h> #include <linux/mount.h> #include <linux/mempolicy.h> #include <linux/rmap.h> #include <linux/mmu_notifier.h> #include <linux/mmdebug.h> #include <linux/perf_event.h> #include <linux/audit.h> #include <linux/khugepaged.h> #include <linux/uprobes.h> #include <linux/rbtree_augmented.h> #include <linux/notifier.h> #include <linux/memory.h> #include <linux/printk.h> #include <linux/userfaultfd_k.h> #include <linux/moduleparam.h> #include <linux/pkeys.h> #include <linux/oom.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlb.h> #include <asm/mmu_context.h> #include "internal.h" #ifndef arch_mmap_check #define arch_mmap_check(addr, len, flags) (0) #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX; int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; #endif static bool ignore_rlimit_data; core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end); /* description of effects of mapping type and prot in current implementation. * this is due to the limited x86 page protection hardware. The expected * behavior is in parens: * * map_type prot * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes * w: (no) no w: (no) no w: (yes) yes w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes * * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes * w: (no) no w: (no) no w: (copy) copy w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes * * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and * MAP_PRIVATE: * r: (no) no * w: (no) no * x: (yes) yes */ pgprot_t protection_map[16] __ro_after_init = { __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 }; #ifndef CONFIG_ARCH_HAS_FILTER_PGPROT static inline pgprot_t arch_filter_pgprot(pgprot_t prot) { return prot; } #endif pgprot_t vm_get_page_prot(unsigned long vm_flags) { pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | pgprot_val(arch_vm_get_page_prot(vm_flags))); return arch_filter_pgprot(ret); } EXPORT_SYMBOL(vm_get_page_prot); static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) { return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); } /* Update vma->vm_page_prot to reflect vma->vm_flags. */ void vma_set_page_prot(struct vm_area_struct *vma) { unsigned long vm_flags = vma->vm_flags; pgprot_t vm_page_prot; vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); if (vma_wants_writenotify(vma, vm_page_prot)) { vm_flags &= ~VM_SHARED; vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); } /* remove_protection_ptes reads vma->vm_page_prot without mmap_sem */ WRITE_ONCE(vma->vm_page_prot, vm_page_prot); } /* * Requires inode->i_mapping->i_mmap_rwsem */ static void __remove_shared_vm_struct(struct vm_area_struct *vma, struct file *file, struct address_space *mapping) { if (vma->vm_flags & VM_DENYWRITE) atomic_inc(&file_inode(file)->i_writecount); if (vma->vm_flags & VM_SHARED) mapping_unmap_writable(mapping); flush_dcache_mmap_lock(mapping); vma_interval_tree_remove(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); } /* * Unlink a file-based vm structure from its interval tree, to hide * vma from rmap and vmtruncate before freeing its page tables. */ void unlink_file_vma(struct vm_area_struct *vma) { struct file *file = vma->vm_file; if (file) { struct address_space *mapping = file->f_mapping; i_mmap_lock_write(mapping); __remove_shared_vm_struct(vma, file, mapping); i_mmap_unlock_write(mapping); } } /* * Close a vm structure and free it, returning the next. */ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) { struct vm_area_struct *next = vma->vm_next; might_sleep(); if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); vm_area_free(vma); return next; } static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf); SYSCALL_DEFINE1(brk, unsigned long, brk) { unsigned long retval; unsigned long newbrk, oldbrk, origbrk; struct mm_struct *mm = current->mm; struct vm_area_struct *next; unsigned long min_brk; bool populate; bool downgraded = false; LIST_HEAD(uf); if (down_write_killable(&mm->mmap_sem)) return -EINTR; origbrk = mm->brk; #ifdef CONFIG_COMPAT_BRK /* * CONFIG_COMPAT_BRK can still be overridden by setting * randomize_va_space to 2, which will still cause mm->start_brk * to be arbitrarily shifted */ if (current->brk_randomized) min_brk = mm->start_brk; else min_brk = mm->end_data; #else min_brk = mm->start_brk; #endif if (brk < min_brk) goto out; /* * Check against rlimit here. If this check is done later after the test * of oldbrk with newbrk then it can escape the test and let the data * segment grow beyond its set limit the in case where the limit is * not page aligned -Ram Gupta */ if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, mm->end_data, mm->start_data)) goto out; newbrk = PAGE_ALIGN(brk); oldbrk = PAGE_ALIGN(mm->brk); if (oldbrk == newbrk) { mm->brk = brk; goto success; } /* * Always allow shrinking brk. * __do_munmap() may downgrade mmap_sem to read. */ if (brk <= mm->brk) { int ret; /* * mm->brk must to be protected by write mmap_sem so update it * before downgrading mmap_sem. When __do_munmap() fails, * mm->brk will be restored from origbrk. */ mm->brk = brk; ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true); if (ret < 0) { mm->brk = origbrk; goto out; } else if (ret == 1) { downgraded = true; } goto success; } /* Check against existing mmap mappings. */ next = find_vma(mm, oldbrk); if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) goto out; /* Ok, looks good - let it rip. */ if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0) goto out; mm->brk = brk; success: populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; if (downgraded) up_read(&mm->mmap_sem); else up_write(&mm->mmap_sem); userfaultfd_unmap_complete(mm, &uf); if (populate) mm_populate(oldbrk, newbrk - oldbrk); return brk; out: retval = origbrk; up_write(&mm->mmap_sem); return retval; } static long vma_compute_subtree_gap(struct vm_area_struct *vma) { unsigned long max, prev_end, subtree_gap; /* * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we * allow two stack_guard_gaps between them here, and when choosing * an unmapped area; whereas when expanding we only require one. * That's a little inconsistent, but keeps the code here simpler. */ max = vm_start_gap(vma); if (vma->vm_prev) { prev_end = vm_end_gap(vma->vm_prev); if (max > prev_end) max -= prev_end; else max = 0; } if (vma->vm_rb.rb_left) { subtree_gap = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb)->rb_subtree_gap; if (subtree_gap > max) max = subtree_gap; } if (vma->vm_rb.rb_right) { subtree_gap = rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb)->rb_subtree_gap; if (subtree_gap > max) max = subtree_gap; } return max; } #ifdef CONFIG_DEBUG_VM_RB static int browse_rb(struct mm_struct *mm) { struct rb_root *root = &mm->mm_rb; int i = 0, j, bug = 0; struct rb_node *nd, *pn = NULL; unsigned long prev = 0, pend = 0; for (nd = rb_first(root); nd; nd = rb_next(nd)) { struct vm_area_struct *vma; vma = rb_entry(nd, struct vm_area_struct, vm_rb); if (vma->vm_start < prev) { pr_emerg("vm_start %lx < prev %lx\n", vma->vm_start, prev); bug = 1; } if (vma->vm_start < pend) { pr_emerg("vm_start %lx < pend %lx\n", vma->vm_start, pend); bug = 1; } if (vma->vm_start > vma->vm_end) { pr_emerg("vm_start %lx > vm_end %lx\n", vma->vm_start, vma->vm_end); bug = 1; } spin_lock(&mm->page_table_lock); if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { pr_emerg("free gap %lx, correct %lx\n", vma->rb_subtree_gap, vma_compute_subtree_gap(vma)); bug = 1; } spin_unlock(&mm->page_table_lock); i++; pn = nd; prev = vma->vm_start; pend = vma->vm_end; } j = 0; for (nd = pn; nd; nd = rb_prev(nd)) j++; if (i != j) { pr_emerg("backwards %d, forwards %d\n", j, i); bug = 1; } return bug ? -1 : i; } static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) { struct rb_node *nd; for (nd = rb_first(root); nd; nd = rb_next(nd)) { struct vm_area_struct *vma; vma = rb_entry(nd, struct vm_area_struct, vm_rb); VM_BUG_ON_VMA(vma != ignore && vma->rb_subtree_gap != vma_compute_subtree_gap(vma), vma); } } static void validate_mm(struct mm_struct *mm) { int bug = 0; int i = 0; unsigned long highest_address = 0; struct vm_area_struct *vma = mm->mmap; while (vma) { struct anon_vma *anon_vma = vma->anon_vma; struct anon_vma_chain *avc; if (anon_vma) { anon_vma_lock_read(anon_vma); list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_verify(avc); anon_vma_unlock_read(anon_vma); } highest_address = vm_end_gap(vma); vma = vma->vm_next; i++; } if (i != mm->map_count) { pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); bug = 1; } if (highest_address != mm->highest_vm_end) { pr_emerg("mm->highest_vm_end %lx, found %lx\n", mm->highest_vm_end, highest_address); bug = 1; } i = browse_rb(mm); if (i != mm->map_count) { if (i != -1) pr_emerg("map_count %d rb %d\n", mm->map_count, i); bug = 1; } VM_BUG_ON_MM(bug, mm); } #else #define validate_mm_rb(root, ignore) do { } while (0) #define validate_mm(mm) do { } while (0) #endif RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb, unsigned long, rb_subtree_gap, vma_compute_subtree_gap) /* * Update augmented rbtree rb_subtree_gap values after vma->vm_start or * vma->vm_prev->vm_end values changed, without modifying the vma's position * in the rbtree. */ static void vma_gap_update(struct vm_area_struct *vma) { /* * As it turns out, RB_DECLARE_CALLBACKS() already created a callback * function that does exactly what we want. */ vma_gap_callbacks_propagate(&vma->vm_rb, NULL); } static inline void vma_rb_insert(struct vm_area_struct *vma, struct rb_root *root) { /* All rb_subtree_gap values must be consistent prior to insertion */ validate_mm_rb(root, NULL); rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); } static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) { /* * Note rb_erase_augmented is a fairly large inline function, * so make sure we instantiate it only once with our desired * augmented rbtree callbacks. */ rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); } static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma, struct rb_root *root, struct vm_area_struct *ignore) { /* * All rb_subtree_gap values must be consistent prior to erase, * with the possible exception of the "next" vma being erased if * next->vm_start was reduced. */ validate_mm_rb(root, ignore); __vma_rb_erase(vma, root); } static __always_inline void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) { /* * All rb_subtree_gap values must be consistent prior to erase, * with the possible exception of the vma being erased. */ validate_mm_rb(root, vma); __vma_rb_erase(vma, root); } /* * vma has some anon_vma assigned, and is already inserted on that * anon_vma's interval trees. * * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the * vma must be removed from the anon_vma's interval trees using * anon_vma_interval_tree_pre_update_vma(). * * After the update, the vma will be reinserted using * anon_vma_interval_tree_post_update_vma(). * * The entire update must be protected by exclusive mmap_sem and by * the root anon_vma's mutex. */ static inline void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) { struct anon_vma_chain *avc; list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); } static inline void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) { struct anon_vma_chain *avc; list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); } static int find_vma_links(struct mm_struct *mm, unsigned long addr, unsigned long end, struct vm_area_struct **pprev, struct rb_node ***rb_link, struct rb_node **rb_parent) { struct rb_node **__rb_link, *__rb_parent, *rb_prev; __rb_link = &mm->mm_rb.rb_node; rb_prev = __rb_parent = NULL; while (*__rb_link) { struct vm_area_struct *vma_tmp; __rb_parent = *__rb_link; vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); if (vma_tmp->vm_end > addr) { /* Fail if an existing vma overlaps the area */ if (vma_tmp->vm_start < end) return -ENOMEM; __rb_link = &__rb_parent->rb_left; } else { rb_prev = __rb_parent; __rb_link = &__rb_parent->rb_right; } } *pprev = NULL; if (rb_prev) *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); *rb_link = __rb_link; *rb_parent = __rb_parent; return 0; } static unsigned long count_vma_pages_range(struct mm_struct *mm, unsigned long addr, unsigned long end) { unsigned long nr_pages = 0; struct vm_area_struct *vma; /* Find first overlaping mapping */ vma = find_vma_intersection(mm, addr, end); if (!vma) return 0; nr_pages = (min(end, vma->vm_end) - max(addr, vma->vm_start)) >> PAGE_SHIFT; /* Iterate over the rest of the overlaps */ for (vma = vma->vm_next; vma; vma = vma->vm_next) { unsigned long overlap_len; if (vma->vm_start > end) break; overlap_len = min(end, vma->vm_end) - vma->vm_start; nr_pages += overlap_len >> PAGE_SHIFT; } return nr_pages; } void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, struct rb_node **rb_link, struct rb_node *rb_parent) { /* Update tracking information for the gap following the new vma. */ if (vma->vm_next) vma_gap_update(vma->vm_next); else mm->highest_vm_end = vm_end_gap(vma); /* * vma->vm_prev wasn't known when we followed the rbtree to find the * correct insertion point for that vma. As a result, we could not * update the vma vm_rb parents rb_subtree_gap values on the way down. * So, we first insert the vma with a zero rb_subtree_gap value * (to be consistent with what we did on the way down), and then * immediately update the gap to the correct value. Finally we * rebalance the rbtree after all augmented values have been set. */ rb_link_node(&vma->vm_rb, rb_parent, rb_link); vma->rb_subtree_gap = 0; vma_gap_update(vma); vma_rb_insert(vma, &mm->mm_rb); } static void __vma_link_file(struct vm_area_struct *vma) { struct file *file; file = vma->vm_file; if (file) { struct address_space *mapping = file->f_mapping; if (vma->vm_flags & VM_DENYWRITE) atomic_dec(&file_inode(file)->i_writecount); if (vma->vm_flags & VM_SHARED) atomic_inc(&mapping->i_mmap_writable); flush_dcache_mmap_lock(mapping); vma_interval_tree_insert(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); } } static void __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, struct rb_node *rb_parent) { __vma_link_list(mm, vma, prev, rb_parent); __vma_link_rb(mm, vma, rb_link, rb_parent); } static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, struct rb_node *rb_parent) { struct address_space *mapping = NULL; if (vma->vm_file) { mapping = vma->vm_file->f_mapping; i_mmap_lock_write(mapping); } __vma_link(mm, vma, prev, rb_link, rb_parent); __vma_link_file(vma); if (mapping) i_mmap_unlock_write(mapping); mm->map_count++; validate_mm(mm); } /* * Helper for vma_adjust() in the split_vma insert case: insert a vma into the * mm's list and rbtree. It has already been inserted into the interval tree. */ static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { struct vm_area_struct *prev; struct rb_node **rb_link, *rb_parent; if (find_vma_links(mm, vma->vm_start, vma->vm_end, &prev, &rb_link, &rb_parent)) BUG(); __vma_link(mm, vma, prev, rb_link, rb_parent); mm->map_count++; } static __always_inline void __vma_unlink_common(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, bool has_prev, struct vm_area_struct *ignore) { struct vm_area_struct *next; vma_rb_erase_ignore(vma, &mm->mm_rb, ignore); next = vma->vm_next; if (has_prev) prev->vm_next = next; else { prev = vma->vm_prev; if (prev) prev->vm_next = next; else mm->mmap = next; } if (next) next->vm_prev = prev; /* Kill the cache */ vmacache_invalidate(mm); } static inline void __vma_unlink_prev(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev) { __vma_unlink_common(mm, vma, prev, true, vma); } /* * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that * is already present in an i_mmap tree without adjusting the tree. * The following helper function should be used when such adjustments * are necessary. The "insert" vma (if any) is to be inserted * before we drop the necessary locks. */ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, struct vm_area_struct *expand) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next = vma->vm_next, *orig_vma = vma; struct address_space *mapping = NULL; struct rb_root_cached *root = NULL; struct anon_vma *anon_vma = NULL; struct file *file = vma->vm_file; bool start_changed = false, end_changed = false; long adjust_next = 0; int remove_next = 0; if (next && !insert) { struct vm_area_struct *exporter = NULL, *importer = NULL; if (end >= next->vm_end) { /* * vma expands, overlapping all the next, and * perhaps the one after too (mprotect case 6). * The only other cases that gets here are * case 1, case 7 and case 8. */ if (next == expand) { /* * The only case where we don't expand "vma" * and we expand "next" instead is case 8. */ VM_WARN_ON(end != next->vm_end); /* * remove_next == 3 means we're * removing "vma" and that to do so we * swapped "vma" and "next". */ remove_next = 3; VM_WARN_ON(file != next->vm_file); swap(vma, next); } else { VM_WARN_ON(expand != vma); /* * case 1, 6, 7, remove_next == 2 is case 6, * remove_next == 1 is case 1 or 7. */ remove_next = 1 + (end > next->vm_end); VM_WARN_ON(remove_next == 2 && end != next->vm_next->vm_end); VM_WARN_ON(remove_next == 1 && end != next->vm_end); /* trim end to next, for case 6 first pass */ end = next->vm_end; } exporter = next; importer = vma; /* * If next doesn't have anon_vma, import from vma after * next, if the vma overlaps with it. */ if (remove_next == 2 && !next->anon_vma) exporter = next->vm_next; } else if (end > next->vm_start) { /* * vma expands, overlapping part of the next: * mprotect case 5 shifting the boundary up. */ adjust_next = (end - next->vm_start) >> PAGE_SHIFT; exporter = next; importer = vma; VM_WARN_ON(expand != importer); } else if (end < vma->vm_end) { /* * vma shrinks, and !insert tells it's not * split_vma inserting another: so it must be * mprotect case 4 shifting the boundary down. */ adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT); exporter = vma; importer = next; VM_WARN_ON(expand != importer); } /* * Easily overlooked: when mprotect shifts the boundary, * make sure the expanding vma has anon_vma set if the * shrinking vma had, to cover any anon pages imported. */ if (exporter && exporter->anon_vma && !importer->anon_vma) { int error; importer->anon_vma = exporter->anon_vma; error = anon_vma_clone(importer, exporter); if (error) return error; } } again: vma_adjust_trans_huge(orig_vma, start, end, adjust_next); if (file) { mapping = file->f_mapping; root = &mapping->i_mmap; uprobe_munmap(vma, vma->vm_start, vma->vm_end); if (adjust_next) uprobe_munmap(next, next->vm_start, next->vm_end); i_mmap_lock_write(mapping); if (insert) { /* * Put into interval tree now, so instantiated pages * are visible to arm/parisc __flush_dcache_page * throughout; but we cannot insert into address * space until vma start or end is updated. */ __vma_link_file(insert); } } anon_vma = vma->anon_vma; if (!anon_vma && adjust_next) anon_vma = next->anon_vma; if (anon_vma) { VM_WARN_ON(adjust_next && next->anon_vma && anon_vma != next->anon_vma); anon_vma_lock_write(anon_vma); anon_vma_interval_tree_pre_update_vma(vma); if (adjust_next) anon_vma_interval_tree_pre_update_vma(next); } if (root) { flush_dcache_mmap_lock(mapping); vma_interval_tree_remove(vma, root); if (adjust_next) vma_interval_tree_remove(next, root); } if (start != vma->vm_start) { vma->vm_start = start; start_changed = true; } if (end != vma->vm_end) { vma->vm_end = end; end_changed = true; } vma->vm_pgoff = pgoff; if (adjust_next) { next->vm_start += adjust_next << PAGE_SHIFT; next->vm_pgoff += adjust_next; } if (root) { if (adjust_next) vma_interval_tree_insert(next, root); vma_interval_tree_insert(vma, root); flush_dcache_mmap_unlock(mapping); } if (remove_next) { /* * vma_merge has merged next into vma, and needs * us to remove next before dropping the locks. */ if (remove_next != 3) __vma_unlink_prev(mm, next, vma); else /* * vma is not before next if they've been * swapped. * * pre-swap() next->vm_start was reduced so * tell validate_mm_rb to ignore pre-swap() * "next" (which is stored in post-swap() * "vma"). */ __vma_unlink_common(mm, next, NULL, false, vma); if (file) __remove_shared_vm_struct(next, file, mapping); } else if (insert) { /* * split_vma has split insert from vma, and needs * us to insert it before dropping the locks * (it may either follow vma or precede it). */ __insert_vm_struct(mm, insert); } else { if (start_changed) vma_gap_update(vma); if (end_changed) { if (!next) mm->highest_vm_end = vm_end_gap(vma); else if (!adjust_next) vma_gap_update(next); } } if (anon_vma) { anon_vma_interval_tree_post_update_vma(vma); if (adjust_next) anon_vma_interval_tree_post_update_vma(next); anon_vma_unlock_write(anon_vma); } if (mapping) i_mmap_unlock_write(mapping); if (root) { uprobe_mmap(vma); if (adjust_next) uprobe_mmap(next); } if (remove_next) { if (file) { uprobe_munmap(next, next->vm_start, next->vm_end); fput(file); } if (next->anon_vma) anon_vma_merge(vma, next); mm->map_count--; mpol_put(vma_policy(next)); vm_area_free(next); /* * In mprotect's case 6 (see comments on vma_merge), * we must remove another next too. It would clutter * up the code too much to do both in one go. */ if (remove_next != 3) { /* * If "next" was removed and vma->vm_end was * expanded (up) over it, in turn * "next->vm_prev->vm_end" changed and the * "vma->vm_next" gap must be updated. */ next = vma->vm_next; } else { /* * For the scope of the comment "next" and * "vma" considered pre-swap(): if "vma" was * removed, next->vm_start was expanded (down) * over it and the "next" gap must be updated. * Because of the swap() the post-swap() "vma" * actually points to pre-swap() "next" * (post-swap() "next" as opposed is now a * dangling pointer). */ next = vma; } if (remove_next == 2) { remove_next = 1; end = next->vm_end; goto again; } else if (next) vma_gap_update(next); else { /* * If remove_next == 2 we obviously can't * reach this path. * * If remove_next == 3 we can't reach this * path because pre-swap() next is always not * NULL. pre-swap() "next" is not being * removed and its next->vm_end is not altered * (and furthermore "end" already matches * next->vm_end in remove_next == 3). * * We reach this only in the remove_next == 1 * case if the "next" vma that was removed was * the highest vma of the mm. However in such * case next->vm_end == "end" and the extended * "vma" has vma->vm_end == next->vm_end so * mm->highest_vm_end doesn't need any update * in remove_next == 1 case. */ VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); } } if (insert && file) uprobe_mmap(insert); validate_mm(mm); return 0; } /* * If the vma has a ->close operation then the driver probably needs to release * per-vma resources, so we don't attempt to merge those. */ static inline int is_mergeable_vma(struct vm_area_struct *vma, struct file *file, unsigned long vm_flags, struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { /* * VM_SOFTDIRTY should not prevent from VMA merging, if we * match the flags but dirty bit -- the caller should mark * merged VMA as dirty. If dirty bit won't be excluded from * comparison, we increase pressure on the memory system forcing * the kernel to generate new VMAs when old one could be * extended instead. */ if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) return 0; if (vma->vm_file != file) return 0; if (vma->vm_ops && vma->vm_ops->close) return 0; if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) return 0; return 1; } static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, struct anon_vma *anon_vma2, struct vm_area_struct *vma) { /* * The list_is_singular() test is to avoid merging VMA cloned from * parents. This can improve scalability caused by anon_vma lock. */ if ((!anon_vma1 || !anon_vma2) && (!vma || list_is_singular(&vma->anon_vma_chain))) return 1; return anon_vma1 == anon_vma2; } /* * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) * in front of (at a lower virtual address and file offset than) the vma. * * We cannot merge two vmas if they have differently assigned (non-NULL) * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. * * We don't check here for the merged mmap wrapping around the end of pagecache * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which * wrap, nor mmaps which cover the final page at index -1UL. */ static int can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { if (vma->vm_pgoff == vm_pgoff) return 1; } return 0; } /* * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) * beyond (at a higher virtual address and file offset than) the vma. * * We cannot merge two vmas if they have differently assigned (non-NULL) * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. */ static int can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { pgoff_t vm_pglen; vm_pglen = vma_pages(vma); if (vma->vm_pgoff + vm_pglen == vm_pgoff) return 1; } return 0; } /* * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out * whether that can be merged with its predecessor or its successor. * Or both (it neatly fills a hole). * * In most cases - when called for mmap, brk or mremap - [addr,end) is * certain not to be mapped by the time vma_merge is called; but when * called for mprotect, it is certain to be already mapped (either at * an offset within prev, or at the start of next), and the flags of * this area are about to be changed to vm_flags - and the no-change * case has already been eliminated. * * The following mprotect cases have to be considered, where AAAA is * the area passed down from mprotect_fixup, never extending beyond one * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after: * * AAAA AAAA AAAA AAAA * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX * cannot merge might become might become might become * PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or * mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or * mremap move: PPPPXXXXXXXX 8 * AAAA * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN * might become case 1 below case 2 below case 3 below * * It is important for case 8 that the vma NNNN overlapping the * region AAAA is never going to extended over XXXX. Instead XXXX must * be extended in region AAAA and NNNN must be removed. This way in * all cases where vma_merge succeeds, the moment vma_adjust drops the * rmap_locks, the properties of the merged vma will be already * correct for the whole merged range. Some of those properties like * vm_page_prot/vm_flags may be accessed by rmap_walks and they must * be correct for the whole merged range immediately after the * rmap_locks are released. Otherwise if XXXX would be removed and * NNNN would be extended over the XXXX range, remove_migration_ptes * or other rmap walkers (if working on addresses beyond the "end" * parameter) may establish ptes with the wrong permissions of NNNN * instead of the right permissions of XXXX. */ struct vm_area_struct *vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t pgoff, struct mempolicy *policy, struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { pgoff_t pglen = (end - addr) >> PAGE_SHIFT; struct vm_area_struct *area, *next; int err; /* * We later require that vma->vm_flags == vm_flags, * so this tests vma->vm_flags & VM_SPECIAL, too. */ if (vm_flags & VM_SPECIAL) return NULL; if (prev) next = prev->vm_next; else next = mm->mmap; area = next; if (area && area->vm_end == end) /* cases 6, 7, 8 */ next = next->vm_next; /* verify some invariant that must be enforced by the caller */ VM_WARN_ON(prev && addr <= prev->vm_start); VM_WARN_ON(area && end > area->vm_end); VM_WARN_ON(addr >= end); /* * Can it merge with the predecessor? */ if (prev && prev->vm_end == addr && mpol_equal(vma_policy(prev), policy) && can_vma_merge_after(prev, vm_flags, anon_vma, file, pgoff, vm_userfaultfd_ctx)) { /* * OK, it can. Can we now merge in the successor as well? */ if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, vm_userfaultfd_ctx) && is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { /* cases 1, 6 */ err = __vma_adjust(prev, prev->vm_start, next->vm_end, prev->vm_pgoff, NULL, prev); } else /* cases 2, 5, 7 */ err = __vma_adjust(prev, prev->vm_start, end, prev->vm_pgoff, NULL, prev); if (err) return NULL; khugepaged_enter_vma_merge(prev, vm_flags); return prev; } /* * Can this new request be merged in front of next? */ if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, vm_userfaultfd_ctx)) { if (prev && addr < prev->vm_end) /* case 4 */ err = __vma_adjust(prev, prev->vm_start, addr, prev->vm_pgoff, NULL, next); else { /* cases 3, 8 */ err = __vma_adjust(area, addr, next->vm_end, next->vm_pgoff - pglen, NULL, next); /* * In case 3 area is already equal to next and * this is a noop, but in case 8 "area" has * been removed and next was expanded over it. */ area = next; } if (err) return NULL; khugepaged_enter_vma_merge(area, vm_flags); return area; } return NULL; } /* * Rough compatbility check to quickly see if it's even worth looking * at sharing an anon_vma. * * They need to have the same vm_file, and the flags can only differ * in things that mprotect may change. * * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that * we can merge the two vma's. For example, we refuse to merge a vma if * there is a vm_ops->close() function, because that indicates that the * driver is doing some kind of reference counting. But that doesn't * really matter for the anon_vma sharing case. */ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) { return a->vm_end == b->vm_start && mpol_equal(vma_policy(a), vma_policy(b)) && a->vm_file == b->vm_file && !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) && b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); } /* * Do some basic sanity checking to see if we can re-use the anon_vma * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be * the same as 'old', the other will be the new one that is trying * to share the anon_vma. * * NOTE! This runs with mm_sem held for reading, so it is possible that * the anon_vma of 'old' is concurrently in the process of being set up * by another page fault trying to merge _that_. But that's ok: if it * is being set up, that automatically means that it will be a singleton * acceptable for merging, so we can do all of this optimistically. But * we do that READ_ONCE() to make sure that we never re-load the pointer. * * IOW: that the "list_is_singular()" test on the anon_vma_chain only * matters for the 'stable anon_vma' case (ie the thing we want to avoid * is to return an anon_vma that is "complex" due to having gone through * a fork). * * We also make sure that the two vma's are compatible (adjacent, * and with the same memory policies). That's all stable, even with just * a read lock on the mm_sem. */ static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) { if (anon_vma_compatible(a, b)) { struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); if (anon_vma && list_is_singular(&old->anon_vma_chain)) return anon_vma; } return NULL; } /* * find_mergeable_anon_vma is used by anon_vma_prepare, to check * neighbouring vmas for a suitable anon_vma, before it goes off * to allocate a new anon_vma. It checks because a repetitive * sequence of mprotects and faults may otherwise lead to distinct * anon_vmas being allocated, preventing vma merge in subsequent * mprotect. */ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) { struct anon_vma *anon_vma; struct vm_area_struct *near; near = vma->vm_next; if (!near) goto try_prev; anon_vma = reusable_anon_vma(near, vma, near); if (anon_vma) return anon_vma; try_prev: near = vma->vm_prev; if (!near) goto none; anon_vma = reusable_anon_vma(near, near, vma); if (anon_vma) return anon_vma; none: /* * There's no absolute need to look only at touching neighbours: * we could search further afield for "compatible" anon_vmas. * But it would probably just be a waste of time searching, * or lead to too many vmas hanging off the same anon_vma. * We're trying to allow mprotect remerging later on, * not trying to minimize memory used for anon_vmas. */ return NULL; } /* * If a hint addr is less than mmap_min_addr change hint to be as * low as possible but still greater than mmap_min_addr */ static inline unsigned long round_hint_to_min(unsigned long hint) { hint &= PAGE_MASK; if (((void *)hint != NULL) && (hint < mmap_min_addr)) return PAGE_ALIGN(mmap_min_addr); return hint; } static inline int mlock_future_check(struct mm_struct *mm, unsigned long flags, unsigned long len) { unsigned long locked, lock_limit; /* mlock MCL_FUTURE? */ if (flags & VM_LOCKED) { locked = len >> PAGE_SHIFT; locked += mm->locked_vm; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) return -EAGAIN; } return 0; } static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) { if (S_ISREG(inode->i_mode)) return MAX_LFS_FILESIZE; if (S_ISBLK(inode->i_mode)) return MAX_LFS_FILESIZE; /* Special "we do even unsigned file positions" case */ if (file->f_mode & FMODE_UNSIGNED_OFFSET) return 0; /* Yes, random drivers might want more. But I'm tired of buggy drivers */ return ULONG_MAX; } static inline bool file_mmap_ok(struct file *file, struct inode *inode, unsigned long pgoff, unsigned long len) { u64 maxsize = file_mmap_size_max(file, inode); if (maxsize && len > maxsize) return false; maxsize -= len; if (pgoff > maxsize >> PAGE_SHIFT) return false; return true; } /* * The caller must hold down_write(&current->mm->mmap_sem). */ unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, struct list_head *uf) { struct mm_struct *mm = current->mm; int pkey = 0; *populate = 0; if (!len) return -EINVAL; /* * Does the application expect PROT_READ to imply PROT_EXEC? * * (the exception is when the underlying filesystem is noexec * mounted, in which case we dont add PROT_EXEC.) */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) if (!(file && path_noexec(&file->f_path))) prot |= PROT_EXEC; /* force arch specific MAP_FIXED handling in get_unmapped_area */ if (flags & MAP_FIXED_NOREPLACE) flags |= MAP_FIXED; if (!(flags & MAP_FIXED)) addr = round_hint_to_min(addr); /* Careful about overflows.. */ len = PAGE_ALIGN(len); if (!len) return -ENOMEM; /* offset overflow? */ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) return -EOVERFLOW; /* Too many mappings? */ if (mm->map_count > sysctl_max_map_count) return -ENOMEM; /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. */ addr = get_unmapped_area(file, addr, len, pgoff, flags); if (offset_in_page(addr)) return addr; if (flags & MAP_FIXED_NOREPLACE) { struct vm_area_struct *vma = find_vma(mm, addr); if (vma && vma->vm_start < addr + len) return -EEXIST; } if (prot == PROT_EXEC) { pkey = execute_only_pkey(mm); if (pkey < 0) pkey = 0; } /* Do simple checking here so the lower-level routines won't have * to. we assume access permissions have been handled by the open * of the memory object, so we don't do any here. */ vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; if (flags & MAP_LOCKED) if (!can_do_mlock()) return -EPERM; if (mlock_future_check(mm, vm_flags, len)) return -EAGAIN; if (file) { struct inode *inode = file_inode(file); unsigned long flags_mask; if (!file_mmap_ok(file, inode, pgoff, len)) return -EOVERFLOW; flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags; switch (flags & MAP_TYPE) { case MAP_SHARED: /* * Force use of MAP_SHARED_VALIDATE with non-legacy * flags. E.g. MAP_SYNC is dangerous to use with * MAP_SHARED as you don't know which consistency model * you will get. We silently ignore unsupported flags * with MAP_SHARED to preserve backward compatibility. */ flags &= LEGACY_MAP_MASK; /* fall through */ case MAP_SHARED_VALIDATE: if (flags & ~flags_mask) return -EOPNOTSUPP; if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE)) return -EACCES; /* * Make sure we don't allow writing to an append-only * file.. */ if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) return -EACCES; /* * Make sure there are no mandatory locks on the file. */ if (locks_verify_locked(file)) return -EAGAIN; vm_flags |= VM_SHARED | VM_MAYSHARE; if (!(file->f_mode & FMODE_WRITE)) vm_flags &= ~(VM_MAYWRITE | VM_SHARED); /* fall through */ case MAP_PRIVATE: if (!(file->f_mode & FMODE_READ)) return -EACCES; if (path_noexec(&file->f_path)) { if (vm_flags & VM_EXEC) return -EPERM; vm_flags &= ~VM_MAYEXEC; } if (!file->f_op->mmap) return -ENODEV; if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) return -EINVAL; break; default: return -EINVAL; } } else { switch (flags & MAP_TYPE) { case MAP_SHARED: if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) return -EINVAL; /* * Ignore pgoff. */ pgoff = 0; vm_flags |= VM_SHARED | VM_MAYSHARE; break; case MAP_PRIVATE: /* * Set pgoff according to addr for anon_vma. */ pgoff = addr >> PAGE_SHIFT; break; default: return -EINVAL; } } /* * Set 'VM_NORESERVE' if we should not account for the * memory use of this mapping. */ if (flags & MAP_NORESERVE) { /* We honor MAP_NORESERVE if allowed to overcommit */ if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) vm_flags |= VM_NORESERVE; /* hugetlb applies strict overcommit unless MAP_NORESERVE */ if (file && is_file_hugepages(file)) vm_flags |= VM_NORESERVE; } addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); if (!IS_ERR_VALUE(addr) && ((vm_flags & VM_LOCKED) || (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) *populate = len; return addr; } unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { struct file *file = NULL; unsigned long retval; if (!(flags & MAP_ANONYMOUS)) { audit_mmap_fd(fd, flags); file = fget(fd); if (!file) return -EBADF; if (is_file_hugepages(file)) len = ALIGN(len, huge_page_size(hstate_file(file))); retval = -EINVAL; if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file))) goto out_fput; } else if (flags & MAP_HUGETLB) { struct user_struct *user = NULL; struct hstate *hs; hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (!hs) return -EINVAL; len = ALIGN(len, huge_page_size(hs)); /* * VM_NORESERVE is used because the reservations will be * taken when vm_ops->mmap() is called * A dummy user value is used because we are not locking * memory so no accounting is necessary */ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, &user, HUGETLB_ANONHUGE_INODE, (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (IS_ERR(file)) return PTR_ERR(file); } flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); out_fput: if (file) fput(file); return retval; } SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff) { return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); } #ifdef __ARCH_WANT_SYS_OLD_MMAP struct mmap_arg_struct { unsigned long addr; unsigned long len; unsigned long prot; unsigned long flags; unsigned long fd; unsigned long offset; }; SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) { struct mmap_arg_struct a; if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; if (offset_in_page(a.offset)) return -EINVAL; return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); } #endif /* __ARCH_WANT_SYS_OLD_MMAP */ /* * Some shared mappings will want the pages marked read-only * to track write events. If so, we'll downgrade vm_page_prot * to the private version (using protection_map[] without the * VM_SHARED bit). */ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) { vm_flags_t vm_flags = vma->vm_flags; const struct vm_operations_struct *vm_ops = vma->vm_ops; /* If it was private or non-writable, the write bit is already clear */ if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) return 0; /* The backer wishes to know when pages are first written to? */ if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite)) return 1; /* The open routine did something to the protections that pgprot_modify * won't preserve? */ if (pgprot_val(vm_page_prot) != pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags))) return 0; /* Do we need to track softdirty? */ if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY)) return 1; /* Specialty mapping? */ if (vm_flags & VM_PFNMAP) return 0; /* Can the mapping track the dirty pages? */ return vma->vm_file && vma->vm_file->f_mapping && mapping_cap_account_dirty(vma->vm_file->f_mapping); } /* * We account for memory if it's a private writeable mapping, * not hugepages and VM_NORESERVE wasn't set. */ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) { /* * hugetlb has its own accounting separate from the core VM * VM_HUGETLB may not be set yet so we cannot check for that flag. */ if (file && is_file_hugepages(file)) return 0; return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; } unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, struct list_head *uf) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; int error; struct rb_node **rb_link, *rb_parent; unsigned long charged = 0; /* Check against address space limit. */ if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { unsigned long nr_pages; /* * MAP_FIXED may remove pages of mappings that intersects with * requested mapping. Account for the pages it would unmap. */ nr_pages = count_vma_pages_range(mm, addr, addr + len); if (!may_expand_vm(mm, vm_flags, (len >> PAGE_SHIFT) - nr_pages)) return -ENOMEM; } /* Clear old maps */ while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { if (do_munmap(mm, addr, len, uf)) return -ENOMEM; } /* * Private writable mapping: check memory availability */ if (accountable_mapping(file, vm_flags)) { charged = len >> PAGE_SHIFT; if (security_vm_enough_memory_mm(mm, charged)) return -ENOMEM; vm_flags |= VM_ACCOUNT; } /* * Can we just expand an old mapping? */ vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX); if (vma) goto out; /* * Determine the object being mapped and call the appropriate * specific mapper. the address has already been validated, but * not unmapped, but the maps are removed from the list. */ vma = vm_area_alloc(mm); if (!vma) { error = -ENOMEM; goto unacct_error; } vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags; vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; if (file) { if (vm_flags & VM_DENYWRITE) { error = deny_write_access(file); if (error) goto free_vma; } if (vm_flags & VM_SHARED) { error = mapping_map_writable(file->f_mapping); if (error) goto allow_write_and_free_vma; } /* ->mmap() can change vma->vm_file, but must guarantee that * vma_link() below can deny write-access if VM_DENYWRITE is set * and map writably if VM_SHARED is set. This usually means the * new file must not have been exposed to user-space, yet. */ vma->vm_file = get_file(file); error = call_mmap(file, vma); if (error) goto unmap_and_free_vma; /* Can addr have changed?? * * Answer: Yes, several device drivers can do it in their * f_op->mmap method. -DaveM * Bug: If addr is changed, prev, rb_link, rb_parent should * be updated for vma_link() */ WARN_ON_ONCE(addr != vma->vm_start); addr = vma->vm_start; vm_flags = vma->vm_flags; } else if (vm_flags & VM_SHARED) { error = shmem_zero_setup(vma); if (error) goto free_vma; } else { vma_set_anonymous(vma); } vma_link(mm, vma, prev, rb_link, rb_parent); /* Once vma denies write, undo our temporary denial count */ if (file) { if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); if (vm_flags & VM_DENYWRITE) allow_write_access(file); } file = vma->vm_file; out: perf_event_mmap(vma); vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); if (vm_flags & VM_LOCKED) { if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) vma->vm_flags &= VM_LOCKED_CLEAR_MASK; else mm->locked_vm += (len >> PAGE_SHIFT); } if (file) uprobe_mmap(vma); /* * New (or expanded) vma always get soft dirty status. * Otherwise user-space soft-dirty page tracker won't * be able to distinguish situation when vma area unmapped, * then new mapped in-place (which must be aimed as * a completely new data area). */ vma->vm_flags |= VM_SOFTDIRTY; vma_set_page_prot(vma); return addr; unmap_and_free_vma: vma->vm_file = NULL; fput(file); /* Undo any partial mapping done by a device driver. */ unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); charged = 0; if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); allow_write_and_free_vma: if (vm_flags & VM_DENYWRITE) allow_write_access(file); free_vma: vm_area_free(vma); unacct_error: if (charged) vm_unacct_memory(charged); return error; } unsigned long unmapped_area(struct vm_unmapped_area_info *info) { /* * We implement the search by looking for an rbtree node that * immediately follows a suitable gap. That is, * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length; * - gap_end = vma->vm_start >= info->low_limit + length; * - gap_end - gap_start >= length */ struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long length, low_limit, high_limit, gap_start, gap_end; /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) return -ENOMEM; /* Adjust search limits by the desired length */ if (info->high_limit < length) return -ENOMEM; high_limit = info->high_limit - length; if (info->low_limit > high_limit) return -ENOMEM; low_limit = info->low_limit + length; /* Check if rbtree root looks promising */ if (RB_EMPTY_ROOT(&mm->mm_rb)) goto check_highest; vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); if (vma->rb_subtree_gap < length) goto check_highest; while (true) { /* Visit left subtree if it looks promising */ gap_end = vm_start_gap(vma); if (gap_end >= low_limit && vma->vm_rb.rb_left) { struct vm_area_struct *left = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb); if (left->rb_subtree_gap >= length) { vma = left; continue; } } gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; check_current: /* Check if current node has a suitable gap */ if (gap_start > high_limit) return -ENOMEM; if (gap_end >= low_limit && gap_end > gap_start && gap_end - gap_start >= length) goto found; /* Visit right subtree if it looks promising */ if (vma->vm_rb.rb_right) { struct vm_area_struct *right = rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb); if (right->rb_subtree_gap >= length) { vma = right; continue; } } /* Go back up the rbtree to find next candidate node */ while (true) { struct rb_node *prev = &vma->vm_rb; if (!rb_parent(prev)) goto check_highest; vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_left) { gap_start = vm_end_gap(vma->vm_prev); gap_end = vm_start_gap(vma); goto check_current; } } } check_highest: /* Check highest gap, which does not precede any rbtree node */ gap_start = mm->highest_vm_end; gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */ if (gap_start > high_limit) return -ENOMEM; found: /* We found a suitable gap. Clip it with the original low_limit. */ if (gap_start < info->low_limit) gap_start = info->low_limit; /* Adjust gap address to the desired alignment */ gap_start += (info->align_offset - gap_start) & info->align_mask; VM_BUG_ON(gap_start + info->length > info->high_limit); VM_BUG_ON(gap_start + info->length > gap_end); return gap_start; } unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long length, low_limit, high_limit, gap_start, gap_end; /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) return -ENOMEM; /* * Adjust search limits by the desired length. * See implementation comment at top of unmapped_area(). */ gap_end = info->high_limit; if (gap_end < length) return -ENOMEM; high_limit = gap_end - length; if (info->low_limit > high_limit) return -ENOMEM; low_limit = info->low_limit + length; /* Check highest gap, which does not precede any rbtree node */ gap_start = mm->highest_vm_end; if (gap_start <= high_limit) goto found_highest; /* Check if rbtree root looks promising */ if (RB_EMPTY_ROOT(&mm->mm_rb)) return -ENOMEM; vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); if (vma->rb_subtree_gap < length) return -ENOMEM; while (true) { /* Visit right subtree if it looks promising */ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; if (gap_start <= high_limit && vma->vm_rb.rb_right) { struct vm_area_struct *right = rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb); if (right->rb_subtree_gap >= length) { vma = right; continue; } } check_current: /* Check if current node has a suitable gap */ gap_end = vm_start_gap(vma); if (gap_end < low_limit) return -ENOMEM; if (gap_start <= high_limit && gap_end > gap_start && gap_end - gap_start >= length) goto found; /* Visit left subtree if it looks promising */ if (vma->vm_rb.rb_left) { struct vm_area_struct *left = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb); if (left->rb_subtree_gap >= length) { vma = left; continue; } } /* Go back up the rbtree to find next candidate node */ while (true) { struct rb_node *prev = &vma->vm_rb; if (!rb_parent(prev)) return -ENOMEM; vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_right) { gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; goto check_current; } } } found: /* We found a suitable gap. Clip it with the original high_limit. */ if (gap_end > info->high_limit) gap_end = info->high_limit; found_highest: /* Compute highest gap address at the desired alignment */ gap_end -= info->length; gap_end -= (gap_end - info->align_offset) & info->align_mask; VM_BUG_ON(gap_end < info->low_limit); VM_BUG_ON(gap_end < gap_start); return gap_end; } #ifndef arch_get_mmap_end #define arch_get_mmap_end(addr) (TASK_SIZE) #endif #ifndef arch_get_mmap_base #define arch_get_mmap_base(addr, base) (base) #endif /* Get an address range which is currently unmapped. * For shmat() with addr=0. * * Ugly calling convention alert: * Return value with the low bits set means error value, * ie * if (ret & ~PAGE_MASK) * error = ret; * * This function "knows" that -ENOMEM has the bits set. */ #ifndef HAVE_ARCH_UNMAPPED_AREA unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr); if (len > mmap_end - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) return addr; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) return addr; } info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; info.high_limit = mmap_end; info.align_mask = 0; return vm_unmapped_area(&info); } #endif /* * This mmap-allocator allocates new areas top-down from below the * stack's low limit (the base): */ #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr); /* requested length too big for entire address space */ if (len > mmap_end - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); info.align_mask = 0; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (offset_in_page(addr)) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = mmap_end; addr = vm_unmapped_area(&info); } return addr; } #endif unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long error = arch_mmap_check(addr, len, flags); if (error) return error; /* Careful about overflows.. */ if (len > TASK_SIZE) return -ENOMEM; get_area = current->mm->get_unmapped_area; if (file) { if (file->f_op->get_unmapped_area) get_area = file->f_op->get_unmapped_area; } else if (flags & MAP_SHARED) { /* * mmap_region() will call shmem_zero_setup() to create a file, * so use shmem's get_unmapped_area in case it can be huge. * do_mmap_pgoff() will clear pgoff, so match alignment. */ pgoff = 0; get_area = shmem_get_unmapped_area; } addr = get_area(file, addr, len, pgoff, flags); if (IS_ERR_VALUE(addr)) return addr; if (addr > TASK_SIZE - len) return -ENOMEM; if (offset_in_page(addr)) return -EINVAL; error = security_mmap_addr(addr); return error ? error : addr; } EXPORT_SYMBOL(get_unmapped_area); /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) { struct rb_node *rb_node; struct vm_area_struct *vma; /* Check the cache first. */ vma = vmacache_find(mm, addr); if (likely(vma)) return vma; rb_node = mm->mm_rb.rb_node; while (rb_node) { struct vm_area_struct *tmp; tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); if (tmp->vm_end > addr) { vma = tmp; if (tmp->vm_start <= addr) break; rb_node = rb_node->rb_left; } else rb_node = rb_node->rb_right; } if (vma) vmacache_update(addr, vma); return vma; } EXPORT_SYMBOL(find_vma); /* * Same as find_vma, but also return a pointer to the previous VMA in *pprev. */ struct vm_area_struct * find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev) { struct vm_area_struct *vma; vma = find_vma(mm, addr); if (vma) { *pprev = vma->vm_prev; } else { struct rb_node *rb_node = mm->mm_rb.rb_node; *pprev = NULL; while (rb_node) { *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb); rb_node = rb_node->rb_right; } } return vma; } /* * Verify that the stack growth is acceptable and * update accounting. This is shared with both the * grow-up and grow-down cases. */ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) { struct mm_struct *mm = vma->vm_mm; unsigned long new_start; /* address space limit tests */ if (!may_expand_vm(mm, vma->vm_flags, grow)) return -ENOMEM; /* Stack limit test */ if (size > rlimit(RLIMIT_STACK)) return -ENOMEM; /* mlock limit tests */ if (vma->vm_flags & VM_LOCKED) { unsigned long locked; unsigned long limit; locked = mm->locked_vm + grow; limit = rlimit(RLIMIT_MEMLOCK); limit >>= PAGE_SHIFT; if (locked > limit && !capable(CAP_IPC_LOCK)) return -ENOMEM; } /* Check to ensure the stack will not grow into a hugetlb-only region */ new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : vma->vm_end - size; if (is_hugepage_only_range(vma->vm_mm, new_start, size)) return -EFAULT; /* * Overcommit.. This must be the final test, as it will * update security statistics. */ if (security_vm_enough_memory_mm(mm, grow)) return -ENOMEM; return 0; } #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) /* * PA-RISC uses this for its stack; IA64 for its Register Backing Store. * vma is the last one with address > vma->vm_end. Have to extend vma. */ int expand_upwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next; unsigned long gap_addr; int error = 0; if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; /* Guard against exceeding limits of the address space. */ address &= PAGE_MASK; if (address >= (TASK_SIZE & PAGE_MASK)) return -ENOMEM; address += PAGE_SIZE; /* Enforce stack_guard_gap */ gap_addr = address + stack_guard_gap; /* Guard against overflow */ if (gap_addr < address || gap_addr > TASK_SIZE) gap_addr = TASK_SIZE; next = vma->vm_next; if (next && next->vm_start < gap_addr && (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { if (!(next->vm_flags & VM_GROWSUP)) return -ENOMEM; /* Check that both stack segments have the same anon_vma? */ } /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ anon_vma_lock_write(vma->anon_vma); /* Somebody else might have raced and expanded it already */ if (address > vma->vm_end) { unsigned long size, grow; size = address - vma->vm_start; grow = (address - vma->vm_end) >> PAGE_SHIFT; error = -ENOMEM; if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { error = acct_stack_growth(vma, size, grow); if (!error) { /* * vma_gap_update() doesn't support concurrent * updates, but we only hold a shared mmap_sem * lock here, so we need to protect against * concurrent vma expansions. * anon_vma_lock_write() doesn't help here, as * we don't guarantee that all growable vmas * in a mm share the same root anon vma. * So, we reuse mm->page_table_lock to guard * against concurrent vma expansions. */ spin_lock(&mm->page_table_lock); if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, grow); anon_vma_interval_tree_pre_update_vma(vma); vma->vm_end = address; anon_vma_interval_tree_post_update_vma(vma); if (vma->vm_next) vma_gap_update(vma->vm_next); else mm->highest_vm_end = vm_end_gap(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); } } } anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(mm); return error; } #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ /* * vma is the first one with address < vma->vm_start. Have to extend vma. */ int expand_downwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *prev; int error = 0; address &= PAGE_MASK; if (address < mmap_min_addr) return -EPERM; /* Enforce stack_guard_gap */ prev = vma->vm_prev; /* Check that both stack segments have the same anon_vma? */ if (prev && !(prev->vm_flags & VM_GROWSDOWN) && (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { if (address - prev->vm_end < stack_guard_gap) return -ENOMEM; } /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ anon_vma_lock_write(vma->anon_vma); /* Somebody else might have raced and expanded it already */ if (address < vma->vm_start) { unsigned long size, grow; size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; error = -ENOMEM; if (grow <= vma->vm_pgoff) { error = acct_stack_growth(vma, size, grow); if (!error) { /* * vma_gap_update() doesn't support concurrent * updates, but we only hold a shared mmap_sem * lock here, so we need to protect against * concurrent vma expansions. * anon_vma_lock_write() doesn't help here, as * we don't guarantee that all growable vmas * in a mm share the same root anon vma. * So, we reuse mm->page_table_lock to guard * against concurrent vma expansions. */ spin_lock(&mm->page_table_lock); if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, grow); anon_vma_interval_tree_pre_update_vma(vma); vma->vm_start = address; vma->vm_pgoff -= grow; anon_vma_interval_tree_post_update_vma(vma); vma_gap_update(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); } } } anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(mm); return error; } /* enforced gap between the expanding stack and other mappings. */ unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; static int __init cmdline_parse_stack_guard_gap(char *p) { unsigned long val; char *endptr; val = simple_strtoul(p, &endptr, 10); if (!*endptr) stack_guard_gap = val << PAGE_SHIFT; return 0; } __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); #ifdef CONFIG_STACK_GROWSUP int expand_stack(struct vm_area_struct *vma, unsigned long address) { return expand_upwards(vma, address); } struct vm_area_struct * find_extend_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma, *prev; addr &= PAGE_MASK; vma = find_vma_prev(mm, addr, &prev); if (vma && (vma->vm_start <= addr)) return vma; if (!prev || expand_stack(prev, addr)) return NULL; if (prev->vm_flags & VM_LOCKED) populate_vma_page_range(prev, addr, prev->vm_end, NULL); return prev; } #else int expand_stack(struct vm_area_struct *vma, unsigned long address) { return expand_downwards(vma, address); } struct vm_area_struct * find_extend_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; unsigned long start; addr &= PAGE_MASK; vma = find_vma(mm, addr); if (!vma) return NULL; if (vma->vm_start <= addr) return vma; if (!(vma->vm_flags & VM_GROWSDOWN)) return NULL; start = vma->vm_start; if (expand_stack(vma, addr)) return NULL; if (vma->vm_flags & VM_LOCKED) populate_vma_page_range(vma, addr, start, NULL); return vma; } #endif EXPORT_SYMBOL_GPL(find_extend_vma); /* * Ok - we have the memory areas we should free on the vma list, * so release them, and do the vma updates. * * Called with the mm semaphore held. */ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) { unsigned long nr_accounted = 0; /* Update high watermark before we lower total_vm */ update_hiwater_vm(mm); do { long nrpages = vma_pages(vma); if (vma->vm_flags & VM_ACCOUNT) nr_accounted += nrpages; vm_stat_account(mm, vma->vm_flags, -nrpages); vma = remove_vma(vma); } while (vma); vm_unacct_memory(nr_accounted); validate_mm(mm); } /* * Get rid of page table information in the indicated region. * * Called with the mm semaphore held. */ static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end) { struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap; struct mmu_gather tlb; lru_add_drain(); tlb_gather_mmu(&tlb, mm, start, end); update_hiwater_rss(mm); unmap_vmas(&tlb, vma, start, end); free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, next ? next->vm_start : USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb, start, end); } /* * Create a list of vma's touched by the unmap, removing them from the mm's * vma list as we go.. */ static void detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long end) { struct vm_area_struct **insertion_point; struct vm_area_struct *tail_vma = NULL; insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; do { vma_rb_erase(vma, &mm->mm_rb); mm->map_count--; tail_vma = vma; vma = vma->vm_next; } while (vma && vma->vm_start < end); *insertion_point = vma; if (vma) { vma->vm_prev = prev; vma_gap_update(vma); } else mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; tail_vma->vm_next = NULL; /* Kill the cache */ vmacache_invalidate(mm); } /* * __split_vma() bypasses sysctl_max_map_count checking. We use this where it * has already been checked or doesn't make sense to fail. */ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { struct vm_area_struct *new; int err; if (vma->vm_ops && vma->vm_ops->split) { err = vma->vm_ops->split(vma, addr); if (err) return err; } new = vm_area_dup(vma); if (!new) return -ENOMEM; if (new_below) new->vm_end = addr; else { new->vm_start = addr; new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); } err = vma_dup_policy(vma, new); if (err) goto out_free_vma; err = anon_vma_clone(new, vma); if (err) goto out_free_mpol; if (new->vm_file) get_file(new->vm_file); if (new->vm_ops && new->vm_ops->open) new->vm_ops->open(new); if (new_below) err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + ((addr - new->vm_start) >> PAGE_SHIFT), new); else err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); /* Success. */ if (!err) return 0; /* Clean everything up if vma_adjust failed. */ if (new->vm_ops && new->vm_ops->close) new->vm_ops->close(new); if (new->vm_file) fput(new->vm_file); unlink_anon_vmas(new); out_free_mpol: mpol_put(vma_policy(new)); out_free_vma: vm_area_free(new); return err; } /* * Split a vma into two pieces at address 'addr', a new vma is allocated * either for the first part or the tail. */ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; return __split_vma(mm, vma, addr, new_below); } /* Munmap is split into 2 main parts -- this part which finds * what needs doing, and the areas themselves, which do the * work. This now handles partial unmappings. * Jeremy Fitzhardinge <jeremy@goop.org> */ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, bool downgrade) { unsigned long end; struct vm_area_struct *vma, *prev, *last; if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; len = PAGE_ALIGN(len); if (len == 0) return -EINVAL; /* Find the first overlapping VMA */ vma = find_vma(mm, start); if (!vma) return 0; prev = vma->vm_prev; /* we have start < vma->vm_end */ /* if it doesn't overlap, we have nothing.. */ end = start + len; if (vma->vm_start >= end) return 0; /* * If we need to split any vma, do it now to save pain later. * * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially * unmapped vm_area_struct will remain in use: so lower split_vma * places tmp vma above, and higher split_vma places tmp vma below. */ if (start > vma->vm_start) { int error; /* * Make sure that map_count on return from munmap() will * not exceed its limit; but let map_count go just above * its limit temporarily, to help free resources as expected. */ if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) return -ENOMEM; error = __split_vma(mm, vma, start, 0); if (error) return error; prev = vma; } /* Does it split the last one? */ last = find_vma(mm, end); if (last && end > last->vm_start) { int error = __split_vma(mm, last, end, 1); if (error) return error; } vma = prev ? prev->vm_next : mm->mmap; if (unlikely(uf)) { /* * If userfaultfd_unmap_prep returns an error the vmas * will remain splitted, but userland will get a * highly unexpected error anyway. This is no * different than the case where the first of the two * __split_vma fails, but we don't undo the first * split, despite we could. This is unlikely enough * failure that it's not worth optimizing it for. */ int error = userfaultfd_unmap_prep(vma, start, end, uf); if (error) return error; } /* * unlock any mlock()ed ranges before detaching vmas */ if (mm->locked_vm) { struct vm_area_struct *tmp = vma; while (tmp && tmp->vm_start < end) { if (tmp->vm_flags & VM_LOCKED) { mm->locked_vm -= vma_pages(tmp); munlock_vma_pages_all(tmp); } tmp = tmp->vm_next; } } /* Detach vmas from rbtree */ detach_vmas_to_be_unmapped(mm, vma, prev, end); /* * mpx unmap needs to be called with mmap_sem held for write. * It is safe to call it before unmap_region(). */ arch_unmap(mm, vma, start, end); if (downgrade) downgrade_write(&mm->mmap_sem); unmap_region(mm, vma, prev, start, end); /* Fix up all other VM information */ remove_vma_list(mm, vma); return downgrade ? 1 : 0; } int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf) { return __do_munmap(mm, start, len, uf, false); } static int __vm_munmap(unsigned long start, size_t len, bool downgrade) { int ret; struct mm_struct *mm = current->mm; LIST_HEAD(uf); if (down_write_killable(&mm->mmap_sem)) return -EINTR; ret = __do_munmap(mm, start, len, &uf, downgrade); /* * Returning 1 indicates mmap_sem is downgraded. * But 1 is not legal return value of vm_munmap() and munmap(), reset * it to 0 before return. */ if (ret == 1) { up_read(&mm->mmap_sem); ret = 0; } else up_write(&mm->mmap_sem); userfaultfd_unmap_complete(mm, &uf); return ret; } int vm_munmap(unsigned long start, size_t len) { return __vm_munmap(start, len, false); } EXPORT_SYMBOL(vm_munmap); SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) { profile_munmap(addr); return __vm_munmap(addr, len, true); } /* * Emulation of deprecated remap_file_pages() syscall. */ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long populate = 0; unsigned long ret = -EINVAL; struct file *file; pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.rst.\n", current->comm, current->pid); if (prot) return ret; start = start & PAGE_MASK; size = size & PAGE_MASK; if (start + size <= start) return ret; /* Does pgoff wrap? */ if (pgoff + (size >> PAGE_SHIFT) < pgoff) return ret; if (down_write_killable(&mm->mmap_sem)) return -EINTR; vma = find_vma(mm, start); if (!vma || !(vma->vm_flags & VM_SHARED)) goto out; if (start < vma->vm_start) goto out; if (start + size > vma->vm_end) { struct vm_area_struct *next; for (next = vma->vm_next; next; next = next->vm_next) { /* hole between vmas ? */ if (next->vm_start != next->vm_prev->vm_end) goto out; if (next->vm_file != vma->vm_file) goto out; if (next->vm_flags != vma->vm_flags) goto out; if (start + size <= next->vm_end) break; } if (!next) goto out; } prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; flags &= MAP_NONBLOCK; flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; if (vma->vm_flags & VM_LOCKED) { struct vm_area_struct *tmp; flags |= MAP_LOCKED; /* drop PG_Mlocked flag for over-mapped range */ for (tmp = vma; tmp->vm_start >= start + size; tmp = tmp->vm_next) { /* * Split pmd and munlock page on the border * of the range. */ vma_adjust_trans_huge(tmp, start, start + size, 0); munlock_vma_pages_range(tmp, max(tmp->vm_start, start), min(tmp->vm_end, start + size)); } } file = get_file(vma->vm_file); ret = do_mmap_pgoff(vma->vm_file, start, size, prot, flags, pgoff, &populate, NULL); fput(file); out: up_write(&mm->mmap_sem); if (populate) mm_populate(ret, populate); if (!IS_ERR_VALUE(ret)) ret = 0; return ret; } /* * this is really a simplified "do_mmap". it only handles * anonymous maps. eventually we may be able to do some * brk-specific accounting here. */ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; struct rb_node **rb_link, *rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; int error; /* Until we need other flags, refuse anything except VM_EXEC. */ if ((flags & (~VM_EXEC)) != 0) return -EINVAL; flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); if (offset_in_page(error)) return error; error = mlock_future_check(mm, mm->def_flags, len); if (error) return error; /* * Clear old maps. this also does some error checking for us */ while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { if (do_munmap(mm, addr, len, uf)) return -ENOMEM; } /* Check against address space limits *after* clearing old maps... */ if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) return -ENOMEM; if (mm->map_count > sysctl_max_map_count) return -ENOMEM; if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) return -ENOMEM; /* Can we just expand an old private anonymous mapping? */ vma = vma_merge(mm, prev, addr, addr + len, flags, NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX); if (vma) goto out; /* * create a vma struct for an anonymous mapping */ vma = vm_area_alloc(mm); if (!vma) { vm_unacct_memory(len >> PAGE_SHIFT); return -ENOMEM; } vma_set_anonymous(vma); vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_pgoff = pgoff; vma->vm_flags = flags; vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); out: perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT; mm->data_vm += len >> PAGE_SHIFT; if (flags & VM_LOCKED) mm->locked_vm += (len >> PAGE_SHIFT); vma->vm_flags |= VM_SOFTDIRTY; return 0; } int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) { struct mm_struct *mm = current->mm; unsigned long len; int ret; bool populate; LIST_HEAD(uf); len = PAGE_ALIGN(request); if (len < request) return -ENOMEM; if (!len) return 0; if (down_write_killable(&mm->mmap_sem)) return -EINTR; ret = do_brk_flags(addr, len, flags, &uf); populate = ((mm->def_flags & VM_LOCKED) != 0); up_write(&mm->mmap_sem); userfaultfd_unmap_complete(mm, &uf); if (populate && !ret) mm_populate(addr, len); return ret; } EXPORT_SYMBOL(vm_brk_flags); int vm_brk(unsigned long addr, unsigned long len) { return vm_brk_flags(addr, len, 0); } EXPORT_SYMBOL(vm_brk); /* Release all mmaps. */ void exit_mmap(struct mm_struct *mm) { struct mmu_gather tlb; struct vm_area_struct *vma; unsigned long nr_accounted = 0; /* mm's last user has gone, and its about to be pulled down */ mmu_notifier_release(mm); if (unlikely(mm_is_oom_victim(mm))) { /* * Manually reap the mm to free as much memory as possible. * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard * this mm from further consideration. Taking mm->mmap_sem for * write after setting MMF_OOM_SKIP will guarantee that the oom * reaper will not run on this mm again after mmap_sem is * dropped. * * Nothing can be holding mm->mmap_sem here and the above call * to mmu_notifier_release(mm) ensures mmu notifier callbacks in * __oom_reap_task_mm() will not block. * * This needs to be done before calling munlock_vma_pages_all(), * which clears VM_LOCKED, otherwise the oom reaper cannot * reliably test it. */ (void)__oom_reap_task_mm(mm); set_bit(MMF_OOM_SKIP, &mm->flags); down_write(&mm->mmap_sem); up_write(&mm->mmap_sem); } if (mm->locked_vm) { vma = mm->mmap; while (vma) { if (vma->vm_flags & VM_LOCKED) munlock_vma_pages_all(vma); vma = vma->vm_next; } } arch_exit_mmap(mm); vma = mm->mmap; if (!vma) /* Can happen if dup_mmap() received an OOM */ return; lru_add_drain(); flush_cache_mm(mm); tlb_gather_mmu(&tlb, mm, 0, -1); /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use -1 here to ensure all VMAs in the mm are unmapped */ unmap_vmas(&tlb, vma, 0, -1); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb, 0, -1); /* * Walk the list again, actually closing and freeing it, * with preemption enabled, without holding any MM locks. */ while (vma) { if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); vma = remove_vma(vma); } vm_unacct_memory(nr_accounted); } /* Insert vm structure into process list sorted by address * and into the inode's i_mmap tree. If vm_file is non-NULL * then i_mmap_rwsem is taken here. */ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { struct vm_area_struct *prev; struct rb_node **rb_link, *rb_parent; if (find_vma_links(mm, vma->vm_start, vma->vm_end, &prev, &rb_link, &rb_parent)) return -ENOMEM; if ((vma->vm_flags & VM_ACCOUNT) && security_vm_enough_memory_mm(mm, vma_pages(vma))) return -ENOMEM; /* * The vm_pgoff of a purely anonymous vma should be irrelevant * until its first write fault, when page's anon_vma and index * are set. But now set the vm_pgoff it will almost certainly * end up with (unless mremap moves it elsewhere before that * first wfault), so /proc/pid/maps tells a consistent story. * * By setting it to reflect the virtual start address of the * vma, merges and splits can happen in a seamless way, just * using the existing file pgoff checks and manipulations. * Similarly in do_mmap_pgoff and in do_brk. */ if (vma_is_anonymous(vma)) { BUG_ON(vma->anon_vma); vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; } vma_link(mm, vma, prev, rb_link, rb_parent); return 0; } /* * Copy the vma structure to a new location in the same mm, * prior to moving page table entries, to effect an mremap move. */ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, unsigned long addr, unsigned long len, pgoff_t pgoff, bool *need_rmap_locks) { struct vm_area_struct *vma = *vmap; unsigned long vma_start = vma->vm_start; struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma, *prev; struct rb_node **rb_link, *rb_parent; bool faulted_in_anon_vma = true; /* * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. */ if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { pgoff = addr >> PAGE_SHIFT; faulted_in_anon_vma = false; } if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) return NULL; /* should never get here */ new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), vma->vm_userfaultfd_ctx); if (new_vma) { /* * Source vma may have been merged into new_vma */ if (unlikely(vma_start >= new_vma->vm_start && vma_start < new_vma->vm_end)) { /* * The only way we can get a vma_merge with * self during an mremap is if the vma hasn't * been faulted in yet and we were allowed to * reset the dst vma->vm_pgoff to the * destination address of the mremap to allow * the merge to happen. mremap must change the * vm_pgoff linearity between src and dst vmas * (in turn preventing a vma_merge) to be * safe. It is only safe to keep the vm_pgoff * linear if there are no pages mapped yet. */ VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); *vmap = vma = new_vma; } *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); } else { new_vma = vm_area_dup(vma); if (!new_vma) goto out; new_vma->vm_start = addr; new_vma->vm_end = addr + len; new_vma->vm_pgoff = pgoff; if (vma_dup_policy(vma, new_vma)) goto out_free_vma; if (anon_vma_clone(new_vma, vma)) goto out_free_mempol; if (new_vma->vm_file) get_file(new_vma->vm_file); if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); vma_link(mm, new_vma, prev, rb_link, rb_parent); *need_rmap_locks = false; } return new_vma; out_free_mempol: mpol_put(vma_policy(new_vma)); out_free_vma: vm_area_free(new_vma); out: return NULL; } /* * Return true if the calling process may expand its vm space by the passed * number of pages */ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) { if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) return false; if (is_data_mapping(flags) && mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { /* Workaround for Valgrind */ if (rlimit(RLIMIT_DATA) == 0 && mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) return true; pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", current->comm, current->pid, (mm->data_vm + npages) << PAGE_SHIFT, rlimit(RLIMIT_DATA), ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data"); if (!ignore_rlimit_data) return false; } return true; } void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) { mm->total_vm += npages; if (is_exec_mapping(flags)) mm->exec_vm += npages; else if (is_stack_mapping(flags)) mm->stack_vm += npages; else if (is_data_mapping(flags)) mm->data_vm += npages; } static vm_fault_t special_mapping_fault(struct vm_fault *vmf); /* * Having a close hook prevents vma merging regardless of flags. */ static void special_mapping_close(struct vm_area_struct *vma) { } static const char *special_mapping_name(struct vm_area_struct *vma) { return ((struct vm_special_mapping *)vma->vm_private_data)->name; } static int special_mapping_mremap(struct vm_area_struct *new_vma) { struct vm_special_mapping *sm = new_vma->vm_private_data; if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) return -EFAULT; if (sm->mremap) return sm->mremap(sm, new_vma); return 0; } static const struct vm_operations_struct special_mapping_vmops = { .close = special_mapping_close, .fault = special_mapping_fault, .mremap = special_mapping_mremap, .name = special_mapping_name, }; static const struct vm_operations_struct legacy_special_mapping_vmops = { .close = special_mapping_close, .fault = special_mapping_fault, }; static vm_fault_t special_mapping_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; pgoff_t pgoff; struct page **pages; if (vma->vm_ops == &legacy_special_mapping_vmops) { pages = vma->vm_private_data; } else { struct vm_special_mapping *sm = vma->vm_private_data; if (sm->fault) return sm->fault(sm, vmf->vma, vmf); pages = sm->pages; } for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) pgoff--; if (*pages) { struct page *page = *pages; get_page(page); vmf->page = page; return 0; } return VM_FAULT_SIGBUS; } static struct vm_area_struct *__install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, void *priv, const struct vm_operations_struct *ops) { int ret; struct vm_area_struct *vma; vma = vm_area_alloc(mm); if (unlikely(vma == NULL)) return ERR_PTR(-ENOMEM); vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_ops = ops; vma->vm_private_data = priv; ret = insert_vm_struct(mm, vma); if (ret) goto out; vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); perf_event_mmap(vma); return vma; out: vm_area_free(vma); return ERR_PTR(ret); } bool vma_is_special_mapping(const struct vm_area_struct *vma, const struct vm_special_mapping *sm) { return vma->vm_private_data == sm && (vma->vm_ops == &special_mapping_vmops || vma->vm_ops == &legacy_special_mapping_vmops); } /* * Called with mm->mmap_sem held for writing. * Insert a new vma covering the given region, with the given flags. * Its pages are supplied by the given array of struct page *. * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. * The region past the last page supplied will always produce SIGBUS. * The array pointer and the pages it points to are assumed to stay alive * for as long as this mapping might exist. */ struct vm_area_struct *_install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, const struct vm_special_mapping *spec) { return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, &special_mapping_vmops); } int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, struct page **pages) { struct vm_area_struct *vma = __install_special_mapping( mm, addr, len, vm_flags, (void *)pages, &legacy_special_mapping_vmops); return PTR_ERR_OR_ZERO(vma); } static DEFINE_MUTEX(mm_all_locks_mutex); static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) { if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { /* * The LSB of head.next can't change from under us * because we hold the mm_all_locks_mutex. */ down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem); /* * We can safely modify head.next after taking the * anon_vma->root->rwsem. If some other vma in this mm shares * the same anon_vma we won't take it again. * * No need of atomic instructions here, head.next * can't change from under us thanks to the * anon_vma->root->rwsem. */ if (__test_and_set_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) BUG(); } } static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) { if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { /* * AS_MM_ALL_LOCKS can't change from under us because * we hold the mm_all_locks_mutex. * * Operations on ->flags have to be atomic because * even if AS_MM_ALL_LOCKS is stable thanks to the * mm_all_locks_mutex, there may be other cpus * changing other bitflags in parallel to us. */ if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem); } } /* * This operation locks against the VM for all pte/vma/mm related * operations that could ever happen on a certain mm. This includes * vmtruncate, try_to_unmap, and all page faults. * * The caller must take the mmap_sem in write mode before calling * mm_take_all_locks(). The caller isn't allowed to release the * mmap_sem until mm_drop_all_locks() returns. * * mmap_sem in write mode is required in order to block all operations * that could modify pagetables and free pages without need of * altering the vma layout. It's also needed in write mode to avoid new * anon_vmas to be associated with existing vmas. * * A single task can't take more than one mm_take_all_locks() in a row * or it would deadlock. * * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in * mapping->flags avoid to take the same lock twice, if more than one * vma in this mm is backed by the same anon_vma or address_space. * * We take locks in following order, accordingly to comment at beginning * of mm/rmap.c: * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for * hugetlb mapping); * - all i_mmap_rwsem locks; * - all anon_vma->rwseml * * We can take all locks within these types randomly because the VM code * doesn't nest them and we protected from parallel mm_take_all_locks() by * mm_all_locks_mutex. * * mm_take_all_locks() and mm_drop_all_locks are expensive operations * that may have to take thousand of locks. * * mm_take_all_locks() can fail if it's interrupted by signals. */ int mm_take_all_locks(struct mm_struct *mm) { struct vm_area_struct *vma; struct anon_vma_chain *avc; BUG_ON(down_read_trylock(&mm->mmap_sem)); mutex_lock(&mm_all_locks_mutex); for (vma = mm->mmap; vma; vma = vma->vm_next) { if (signal_pending(current)) goto out_unlock; if (vma->vm_file && vma->vm_file->f_mapping && is_vm_hugetlb_page(vma)) vm_lock_mapping(mm, vma->vm_file->f_mapping); } for (vma = mm->mmap; vma; vma = vma->vm_next) { if (signal_pending(current)) goto out_unlock; if (vma->vm_file && vma->vm_file->f_mapping && !is_vm_hugetlb_page(vma)) vm_lock_mapping(mm, vma->vm_file->f_mapping); } for (vma = mm->mmap; vma; vma = vma->vm_next) { if (signal_pending(current)) goto out_unlock; if (vma->anon_vma) list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) vm_lock_anon_vma(mm, avc->anon_vma); } return 0; out_unlock: mm_drop_all_locks(mm); return -EINTR; } static void vm_unlock_anon_vma(struct anon_vma *anon_vma) { if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { /* * The LSB of head.next can't change to 0 from under * us because we hold the mm_all_locks_mutex. * * We must however clear the bitflag before unlocking * the vma so the users using the anon_vma->rb_root will * never see our bitflag. * * No need of atomic instructions here, head.next * can't change from under us until we release the * anon_vma->root->rwsem. */ if (!__test_and_clear_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) BUG(); anon_vma_unlock_write(anon_vma); } } static void vm_unlock_mapping(struct address_space *mapping) { if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { /* * AS_MM_ALL_LOCKS can't change to 0 from under us * because we hold the mm_all_locks_mutex. */ i_mmap_unlock_write(mapping); if (!test_and_clear_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); } } /* * The mmap_sem cannot be released by the caller until * mm_drop_all_locks() returns. */ void mm_drop_all_locks(struct mm_struct *mm) { struct vm_area_struct *vma; struct anon_vma_chain *avc; BUG_ON(down_read_trylock(&mm->mmap_sem)); BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); for (vma = mm->mmap; vma; vma = vma->vm_next) { if (vma->anon_vma) list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) vm_unlock_anon_vma(avc->anon_vma); if (vma->vm_file && vma->vm_file->f_mapping) vm_unlock_mapping(vma->vm_file->f_mapping); } mutex_unlock(&mm_all_locks_mutex); } /* * initialise the percpu counter for VM */ void __init mmap_init(void) { int ret; ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); VM_BUG_ON(ret); } /* * Initialise sysctl_user_reserve_kbytes. * * This is intended to prevent a user from starting a single memory hogging * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER * mode. * * The default value is min(3% of free memory, 128MB) * 128MB is enough to recover with sshd/login, bash, and top/kill. */ static int init_user_reserve(void) { unsigned long free_kbytes; free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); return 0; } subsys_initcall(init_user_reserve); /* * Initialise sysctl_admin_reserve_kbytes. * * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin * to log in and kill a memory hogging process. * * Systems with more than 256MB will reserve 8MB, enough to recover * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will * only reserve 3% of free pages by default. */ static int init_admin_reserve(void) { unsigned long free_kbytes; free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); return 0; } subsys_initcall(init_admin_reserve); /* * Reinititalise user and admin reserves if memory is added or removed. * * The default user reserve max is 128MB, and the default max for the * admin reserve is 8MB. These are usually, but not always, enough to * enable recovery from a memory hogging process using login/sshd, a shell, * and tools like top. It may make sense to increase or even disable the * reserve depending on the existence of swap or variations in the recovery * tools. So, the admin may have changed them. * * If memory is added and the reserves have been eliminated or increased above * the default max, then we'll trust the admin. * * If memory is removed and there isn't enough free memory, then we * need to reset the reserves. * * Otherwise keep the reserve set by the admin. */ static int reserve_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { unsigned long tmp, free_kbytes; switch (action) { case MEM_ONLINE: /* Default max is 128MB. Leave alone if modified by operator. */ tmp = sysctl_user_reserve_kbytes; if (0 < tmp && tmp < (1UL << 17)) init_user_reserve(); /* Default max is 8MB. Leave alone if modified by operator. */ tmp = sysctl_admin_reserve_kbytes; if (0 < tmp && tmp < (1UL << 13)) init_admin_reserve(); break; case MEM_OFFLINE: free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); if (sysctl_user_reserve_kbytes > free_kbytes) { init_user_reserve(); pr_info("vm.user_reserve_kbytes reset to %lu\n", sysctl_user_reserve_kbytes); } if (sysctl_admin_reserve_kbytes > free_kbytes) { init_admin_reserve(); pr_info("vm.admin_reserve_kbytes reset to %lu\n", sysctl_admin_reserve_kbytes); } break; default: break; } return NOTIFY_OK; } static struct notifier_block reserve_mem_nb = { .notifier_call = reserve_mem_notifier, }; static int __meminit init_reserve_notifier(void) { if (register_hotmemory_notifier(&reserve_mem_nb)) pr_err("Failed registering memory add/remove notifier for admin reserve\n"); return 0; } subsys_initcall(init_reserve_notifier);
./CrossVul/dataset_final_sorted/CWE-362/c/bad_829_4
crossvul-cpp_data_bad_3275_1
/* * inode.c - part of debugfs, a tiny little debug file system * * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2004 IBM Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * debugfs is for people to use instead of /proc or /sys. * See Documentation/DocBook/kernel-api for more details. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/kobject.h> #include <linux/namei.h> #include <linux/debugfs.h> #include <linux/fsnotify.h> #include <linux/string.h> #include <linux/seq_file.h> #include <linux/parser.h> #include <linux/magic.h> #include <linux/slab.h> #include <linux/srcu.h> #include "internal.h" #define DEBUGFS_DEFAULT_MODE 0700 DEFINE_SRCU(debugfs_srcu); static struct vfsmount *debugfs_mount; static int debugfs_mount_count; static bool debugfs_registered; static struct inode *debugfs_get_inode(struct super_block *sb) { struct inode *inode = new_inode(sb); if (inode) { inode->i_ino = get_next_ino(); inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); } return inode; } struct debugfs_mount_opts { kuid_t uid; kgid_t gid; umode_t mode; }; enum { Opt_uid, Opt_gid, Opt_mode, Opt_err }; static const match_table_t tokens = { {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_mode, "mode=%o"}, {Opt_err, NULL} }; struct debugfs_fs_info { struct debugfs_mount_opts mount_opts; }; static int debugfs_parse_options(char *data, struct debugfs_mount_opts *opts) { substring_t args[MAX_OPT_ARGS]; int option; int token; kuid_t uid; kgid_t gid; char *p; opts->mode = DEBUGFS_DEFAULT_MODE; while ((p = strsep(&data, ",")) != NULL) { if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_uid: if (match_int(&args[0], &option)) return -EINVAL; uid = make_kuid(current_user_ns(), option); if (!uid_valid(uid)) return -EINVAL; opts->uid = uid; break; case Opt_gid: if (match_int(&args[0], &option)) return -EINVAL; gid = make_kgid(current_user_ns(), option); if (!gid_valid(gid)) return -EINVAL; opts->gid = gid; break; case Opt_mode: if (match_octal(&args[0], &option)) return -EINVAL; opts->mode = option & S_IALLUGO; break; /* * We might like to report bad mount options here; * but traditionally debugfs has ignored all mount options */ } } return 0; } static int debugfs_apply_options(struct super_block *sb) { struct debugfs_fs_info *fsi = sb->s_fs_info; struct inode *inode = d_inode(sb->s_root); struct debugfs_mount_opts *opts = &fsi->mount_opts; inode->i_mode &= ~S_IALLUGO; inode->i_mode |= opts->mode; inode->i_uid = opts->uid; inode->i_gid = opts->gid; return 0; } static int debugfs_remount(struct super_block *sb, int *flags, char *data) { int err; struct debugfs_fs_info *fsi = sb->s_fs_info; sync_filesystem(sb); err = debugfs_parse_options(data, &fsi->mount_opts); if (err) goto fail; debugfs_apply_options(sb); fail: return err; } static int debugfs_show_options(struct seq_file *m, struct dentry *root) { struct debugfs_fs_info *fsi = root->d_sb->s_fs_info; struct debugfs_mount_opts *opts = &fsi->mount_opts; if (!uid_eq(opts->uid, GLOBAL_ROOT_UID)) seq_printf(m, ",uid=%u", from_kuid_munged(&init_user_ns, opts->uid)); if (!gid_eq(opts->gid, GLOBAL_ROOT_GID)) seq_printf(m, ",gid=%u", from_kgid_munged(&init_user_ns, opts->gid)); if (opts->mode != DEBUGFS_DEFAULT_MODE) seq_printf(m, ",mode=%o", opts->mode); return 0; } static void debugfs_evict_inode(struct inode *inode) { truncate_inode_pages_final(&inode->i_data); clear_inode(inode); if (S_ISLNK(inode->i_mode)) kfree(inode->i_link); } static const struct super_operations debugfs_super_operations = { .statfs = simple_statfs, .remount_fs = debugfs_remount, .show_options = debugfs_show_options, .evict_inode = debugfs_evict_inode, }; static struct vfsmount *debugfs_automount(struct path *path) { debugfs_automount_t f; f = (debugfs_automount_t)path->dentry->d_fsdata; return f(path->dentry, d_inode(path->dentry)->i_private); } static const struct dentry_operations debugfs_dops = { .d_delete = always_delete_dentry, .d_automount = debugfs_automount, }; static int debug_fill_super(struct super_block *sb, void *data, int silent) { static const struct tree_descr debug_files[] = {{""}}; struct debugfs_fs_info *fsi; int err; save_mount_options(sb, data); fsi = kzalloc(sizeof(struct debugfs_fs_info), GFP_KERNEL); sb->s_fs_info = fsi; if (!fsi) { err = -ENOMEM; goto fail; } err = debugfs_parse_options(data, &fsi->mount_opts); if (err) goto fail; err = simple_fill_super(sb, DEBUGFS_MAGIC, debug_files); if (err) goto fail; sb->s_op = &debugfs_super_operations; sb->s_d_op = &debugfs_dops; debugfs_apply_options(sb); return 0; fail: kfree(fsi); sb->s_fs_info = NULL; return err; } static struct dentry *debug_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_single(fs_type, flags, data, debug_fill_super); } static struct file_system_type debug_fs_type = { .owner = THIS_MODULE, .name = "debugfs", .mount = debug_mount, .kill_sb = kill_litter_super, }; MODULE_ALIAS_FS("debugfs"); /** * debugfs_lookup() - look up an existing debugfs file * @name: a pointer to a string containing the name of the file to look up. * @parent: a pointer to the parent dentry of the file. * * This function will return a pointer to a dentry if it succeeds. If the file * doesn't exist or an error occurs, %NULL will be returned. The returned * dentry must be passed to dput() when it is no longer needed. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *debugfs_lookup(const char *name, struct dentry *parent) { struct dentry *dentry; if (IS_ERR(parent)) return NULL; if (!parent) parent = debugfs_mount->mnt_root; inode_lock(d_inode(parent)); dentry = lookup_one_len(name, parent, strlen(name)); inode_unlock(d_inode(parent)); if (IS_ERR(dentry)) return NULL; if (!d_really_is_positive(dentry)) { dput(dentry); return NULL; } return dentry; } EXPORT_SYMBOL_GPL(debugfs_lookup); static struct dentry *start_creating(const char *name, struct dentry *parent) { struct dentry *dentry; int error; pr_debug("debugfs: creating file '%s'\n",name); if (IS_ERR(parent)) return parent; error = simple_pin_fs(&debug_fs_type, &debugfs_mount, &debugfs_mount_count); if (error) return ERR_PTR(error); /* If the parent is not specified, we create it in the root. * We need the root dentry to do this, which is in the super * block. A pointer to that is in the struct vfsmount that we * have around. */ if (!parent) parent = debugfs_mount->mnt_root; inode_lock(d_inode(parent)); dentry = lookup_one_len(name, parent, strlen(name)); if (!IS_ERR(dentry) && d_really_is_positive(dentry)) { dput(dentry); dentry = ERR_PTR(-EEXIST); } if (IS_ERR(dentry)) { inode_unlock(d_inode(parent)); simple_release_fs(&debugfs_mount, &debugfs_mount_count); } return dentry; } static struct dentry *failed_creating(struct dentry *dentry) { inode_unlock(d_inode(dentry->d_parent)); dput(dentry); simple_release_fs(&debugfs_mount, &debugfs_mount_count); return NULL; } static struct dentry *end_creating(struct dentry *dentry) { inode_unlock(d_inode(dentry->d_parent)); return dentry; } static struct dentry *__debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *proxy_fops, const struct file_operations *real_fops) { struct dentry *dentry; struct inode *inode; if (!(mode & S_IFMT)) mode |= S_IFREG; BUG_ON(!S_ISREG(mode)); dentry = start_creating(name, parent); if (IS_ERR(dentry)) return NULL; inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) return failed_creating(dentry); inode->i_mode = mode; inode->i_private = data; inode->i_fop = proxy_fops; dentry->d_fsdata = (void *)real_fops; d_instantiate(dentry, inode); fsnotify_create(d_inode(dentry->d_parent), dentry); return end_creating(dentry); } /** * debugfs_create_file - create a file in the debugfs filesystem * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is NULL, then the * file will be created in the root of the debugfs filesystem. * @data: a pointer to something that the caller will want to get to later * on. The inode.i_private pointer will point to this value on * the open() call. * @fops: a pointer to a struct file_operations that should be used for * this file. * * This is the basic "create a file" function for debugfs. It allows for a * wide range of flexibility in creating a file, or a directory (if you want * to create a directory, the debugfs_create_dir() function is * recommended to be used instead.) * * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the debugfs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, * you are responsible here.) If an error occurs, %NULL will be returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { return __debugfs_create_file(name, mode, parent, data, fops ? &debugfs_full_proxy_file_operations : &debugfs_noop_file_operations, fops); } EXPORT_SYMBOL_GPL(debugfs_create_file); /** * debugfs_create_file_unsafe - create a file in the debugfs filesystem * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is NULL, then the * file will be created in the root of the debugfs filesystem. * @data: a pointer to something that the caller will want to get to later * on. The inode.i_private pointer will point to this value on * the open() call. * @fops: a pointer to a struct file_operations that should be used for * this file. * * debugfs_create_file_unsafe() is completely analogous to * debugfs_create_file(), the only difference being that the fops * handed it will not get protected against file removals by the * debugfs core. * * It is your responsibility to protect your struct file_operation * methods against file removals by means of debugfs_use_file_start() * and debugfs_use_file_finish(). ->open() is still protected by * debugfs though. * * Any struct file_operations defined by means of * DEFINE_DEBUGFS_ATTRIBUTE() is protected against file removals and * thus, may be used here. */ struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { return __debugfs_create_file(name, mode, parent, data, fops ? &debugfs_open_proxy_file_operations : &debugfs_noop_file_operations, fops); } EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe); /** * debugfs_create_file_size - create a file in the debugfs filesystem * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is NULL, then the * file will be created in the root of the debugfs filesystem. * @data: a pointer to something that the caller will want to get to later * on. The inode.i_private pointer will point to this value on * the open() call. * @fops: a pointer to a struct file_operations that should be used for * this file. * @file_size: initial file size * * This is the basic "create a file" function for debugfs. It allows for a * wide range of flexibility in creating a file, or a directory (if you want * to create a directory, the debugfs_create_dir() function is * recommended to be used instead.) * * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the debugfs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, * you are responsible here.) If an error occurs, %NULL will be returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *debugfs_create_file_size(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops, loff_t file_size) { struct dentry *de = debugfs_create_file(name, mode, parent, data, fops); if (de) d_inode(de)->i_size = file_size; return de; } EXPORT_SYMBOL_GPL(debugfs_create_file_size); /** * debugfs_create_dir - create a directory in the debugfs filesystem * @name: a pointer to a string containing the name of the directory to * create. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is NULL, then the * directory will be created in the root of the debugfs filesystem. * * This function creates a directory in debugfs with the given name. * * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the debugfs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, * you are responsible here.) If an error occurs, %NULL will be returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) { struct dentry *dentry = start_creating(name, parent); struct inode *inode; if (IS_ERR(dentry)) return NULL; inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) return failed_creating(dentry); inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); d_instantiate(dentry, inode); inc_nlink(d_inode(dentry->d_parent)); fsnotify_mkdir(d_inode(dentry->d_parent), dentry); return end_creating(dentry); } EXPORT_SYMBOL_GPL(debugfs_create_dir); /** * debugfs_create_automount - create automount point in the debugfs filesystem * @name: a pointer to a string containing the name of the file to create. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is NULL, then the * file will be created in the root of the debugfs filesystem. * @f: function to be called when pathname resolution steps on that one. * @data: opaque argument to pass to f(). * * @f should return what ->d_automount() would. */ struct dentry *debugfs_create_automount(const char *name, struct dentry *parent, debugfs_automount_t f, void *data) { struct dentry *dentry = start_creating(name, parent); struct inode *inode; if (IS_ERR(dentry)) return NULL; inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) return failed_creating(dentry); make_empty_dir_inode(inode); inode->i_flags |= S_AUTOMOUNT; inode->i_private = data; dentry->d_fsdata = (void *)f; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); d_instantiate(dentry, inode); inc_nlink(d_inode(dentry->d_parent)); fsnotify_mkdir(d_inode(dentry->d_parent), dentry); return end_creating(dentry); } EXPORT_SYMBOL(debugfs_create_automount); /** * debugfs_create_symlink- create a symbolic link in the debugfs filesystem * @name: a pointer to a string containing the name of the symbolic link to * create. * @parent: a pointer to the parent dentry for this symbolic link. This * should be a directory dentry if set. If this parameter is NULL, * then the symbolic link will be created in the root of the debugfs * filesystem. * @target: a pointer to a string containing the path to the target of the * symbolic link. * * This function creates a symbolic link with the given name in debugfs that * links to the given target path. * * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the debugfs_remove() function when the symbolic * link is to be removed (no automatic cleanup happens if your module is * unloaded, you are responsible here.) If an error occurs, %NULL will be * returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, const char *target) { struct dentry *dentry; struct inode *inode; char *link = kstrdup(target, GFP_KERNEL); if (!link) return NULL; dentry = start_creating(name, parent); if (IS_ERR(dentry)) { kfree(link); return NULL; } inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) { kfree(link); return failed_creating(dentry); } inode->i_mode = S_IFLNK | S_IRWXUGO; inode->i_op = &simple_symlink_inode_operations; inode->i_link = link; d_instantiate(dentry, inode); return end_creating(dentry); } EXPORT_SYMBOL_GPL(debugfs_create_symlink); static int __debugfs_remove(struct dentry *dentry, struct dentry *parent) { int ret = 0; if (simple_positive(dentry)) { dget(dentry); if (d_is_dir(dentry)) ret = simple_rmdir(d_inode(parent), dentry); else simple_unlink(d_inode(parent), dentry); if (!ret) d_delete(dentry); dput(dentry); } return ret; } /** * debugfs_remove - removes a file or directory from the debugfs filesystem * @dentry: a pointer to a the dentry of the file or directory to be * removed. If this parameter is NULL or an error value, nothing * will be done. * * This function removes a file or directory in debugfs that was previously * created with a call to another debugfs function (like * debugfs_create_file() or variants thereof.) * * This function is required to be called in order for the file to be * removed, no automatic cleanup of files will happen when a module is * removed, you are responsible here. */ void debugfs_remove(struct dentry *dentry) { struct dentry *parent; int ret; if (IS_ERR_OR_NULL(dentry)) return; parent = dentry->d_parent; inode_lock(d_inode(parent)); ret = __debugfs_remove(dentry, parent); inode_unlock(d_inode(parent)); if (!ret) simple_release_fs(&debugfs_mount, &debugfs_mount_count); synchronize_srcu(&debugfs_srcu); } EXPORT_SYMBOL_GPL(debugfs_remove); /** * debugfs_remove_recursive - recursively removes a directory * @dentry: a pointer to a the dentry of the directory to be removed. If this * parameter is NULL or an error value, nothing will be done. * * This function recursively removes a directory tree in debugfs that * was previously created with a call to another debugfs function * (like debugfs_create_file() or variants thereof.) * * This function is required to be called in order for the file to be * removed, no automatic cleanup of files will happen when a module is * removed, you are responsible here. */ void debugfs_remove_recursive(struct dentry *dentry) { struct dentry *child, *parent; if (IS_ERR_OR_NULL(dentry)) return; parent = dentry; down: inode_lock(d_inode(parent)); loop: /* * The parent->d_subdirs is protected by the d_lock. Outside that * lock, the child can be unlinked and set to be freed which can * use the d_u.d_child as the rcu head and corrupt this list. */ spin_lock(&parent->d_lock); list_for_each_entry(child, &parent->d_subdirs, d_child) { if (!simple_positive(child)) continue; /* perhaps simple_empty(child) makes more sense */ if (!list_empty(&child->d_subdirs)) { spin_unlock(&parent->d_lock); inode_unlock(d_inode(parent)); parent = child; goto down; } spin_unlock(&parent->d_lock); if (!__debugfs_remove(child, parent)) simple_release_fs(&debugfs_mount, &debugfs_mount_count); /* * The parent->d_lock protects agaist child from unlinking * from d_subdirs. When releasing the parent->d_lock we can * no longer trust that the next pointer is valid. * Restart the loop. We'll skip this one with the * simple_positive() check. */ goto loop; } spin_unlock(&parent->d_lock); inode_unlock(d_inode(parent)); child = parent; parent = parent->d_parent; inode_lock(d_inode(parent)); if (child != dentry) /* go up */ goto loop; if (!__debugfs_remove(child, parent)) simple_release_fs(&debugfs_mount, &debugfs_mount_count); inode_unlock(d_inode(parent)); synchronize_srcu(&debugfs_srcu); } EXPORT_SYMBOL_GPL(debugfs_remove_recursive); /** * debugfs_rename - rename a file/directory in the debugfs filesystem * @old_dir: a pointer to the parent dentry for the renamed object. This * should be a directory dentry. * @old_dentry: dentry of an object to be renamed. * @new_dir: a pointer to the parent dentry where the object should be * moved. This should be a directory dentry. * @new_name: a pointer to a string containing the target name. * * This function renames a file/directory in debugfs. The target must not * exist for rename to succeed. * * This function will return a pointer to old_dentry (which is updated to * reflect renaming) if it succeeds. If an error occurs, %NULL will be * returned. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, const char *new_name) { int error; struct dentry *dentry = NULL, *trap; const char *old_name; trap = lock_rename(new_dir, old_dir); /* Source or destination directories don't exist? */ if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir)) goto exit; /* Source does not exist, cyclic rename, or mountpoint? */ if (d_really_is_negative(old_dentry) || old_dentry == trap || d_mountpoint(old_dentry)) goto exit; dentry = lookup_one_len(new_name, new_dir, strlen(new_name)); /* Lookup failed, cyclic rename or target exists? */ if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry)) goto exit; old_name = fsnotify_oldname_init(old_dentry->d_name.name); error = simple_rename(d_inode(old_dir), old_dentry, d_inode(new_dir), dentry, 0); if (error) { fsnotify_oldname_free(old_name); goto exit; } d_move(old_dentry, dentry); fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name, d_is_dir(old_dentry), NULL, old_dentry); fsnotify_oldname_free(old_name); unlock_rename(new_dir, old_dir); dput(dentry); return old_dentry; exit: if (dentry && !IS_ERR(dentry)) dput(dentry); unlock_rename(new_dir, old_dir); return NULL; } EXPORT_SYMBOL_GPL(debugfs_rename); /** * debugfs_initialized - Tells whether debugfs has been registered */ bool debugfs_initialized(void) { return debugfs_registered; } EXPORT_SYMBOL_GPL(debugfs_initialized); static int __init debugfs_init(void) { int retval; retval = sysfs_create_mount_point(kernel_kobj, "debug"); if (retval) return retval; retval = register_filesystem(&debug_fs_type); if (retval) sysfs_remove_mount_point(kernel_kobj, "debug"); else debugfs_registered = true; return retval; } core_initcall(debugfs_init);
./CrossVul/dataset_final_sorted/CWE-362/c/bad_3275_1
crossvul-cpp_data_bad_1496_2
/* ssl/ssl_err.c */ /* ==================================================================== * Copyright (c) 1999-2015 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * openssl-core@OpenSSL.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ /* * NOTE: this file was auto generated by the mkerr.pl script: any changes * made to it will be overwritten when the script next updates this file, * only reason strings will be preserved. */ #include <stdio.h> #include <openssl/err.h> #include <openssl/ssl.h> /* BEGIN ERROR CODES */ #ifndef OPENSSL_NO_ERR # define ERR_FUNC(func) ERR_PACK(ERR_LIB_SSL,func,0) # define ERR_REASON(reason) ERR_PACK(ERR_LIB_SSL,0,reason) static ERR_STRING_DATA SSL_str_functs[] = { {ERR_FUNC(SSL_F_CHECK_SUITEB_CIPHER_LIST), "CHECK_SUITEB_CIPHER_LIST"}, {ERR_FUNC(SSL_F_D2I_SSL_SESSION), "d2i_SSL_SESSION"}, {ERR_FUNC(SSL_F_DO_DTLS1_WRITE), "do_dtls1_write"}, {ERR_FUNC(SSL_F_DO_SSL3_WRITE), "DO_SSL3_WRITE"}, {ERR_FUNC(SSL_F_DTLS1_ACCEPT), "dtls1_accept"}, {ERR_FUNC(SSL_F_DTLS1_ADD_CERT_TO_BUF), "DTLS1_ADD_CERT_TO_BUF"}, {ERR_FUNC(SSL_F_DTLS1_BUFFER_RECORD), "DTLS1_BUFFER_RECORD"}, {ERR_FUNC(SSL_F_DTLS1_CHECK_TIMEOUT_NUM), "dtls1_check_timeout_num"}, {ERR_FUNC(SSL_F_DTLS1_CLIENT_HELLO), "dtls1_client_hello"}, {ERR_FUNC(SSL_F_DTLS1_CONNECT), "dtls1_connect"}, {ERR_FUNC(SSL_F_DTLS1_ENC), "DTLS1_ENC"}, {ERR_FUNC(SSL_F_DTLS1_GET_HELLO_VERIFY), "DTLS1_GET_HELLO_VERIFY"}, {ERR_FUNC(SSL_F_DTLS1_GET_MESSAGE), "dtls1_get_message"}, {ERR_FUNC(SSL_F_DTLS1_GET_MESSAGE_FRAGMENT), "DTLS1_GET_MESSAGE_FRAGMENT"}, {ERR_FUNC(SSL_F_DTLS1_GET_RECORD), "dtls1_get_record"}, {ERR_FUNC(SSL_F_DTLS1_HANDLE_TIMEOUT), "dtls1_handle_timeout"}, {ERR_FUNC(SSL_F_DTLS1_HEARTBEAT), "dtls1_heartbeat"}, {ERR_FUNC(SSL_F_DTLS1_OUTPUT_CERT_CHAIN), "dtls1_output_cert_chain"}, {ERR_FUNC(SSL_F_DTLS1_PREPROCESS_FRAGMENT), "DTLS1_PREPROCESS_FRAGMENT"}, {ERR_FUNC(SSL_F_DTLS1_PROCESS_OUT_OF_SEQ_MESSAGE), "DTLS1_PROCESS_OUT_OF_SEQ_MESSAGE"}, {ERR_FUNC(SSL_F_DTLS1_PROCESS_RECORD), "DTLS1_PROCESS_RECORD"}, {ERR_FUNC(SSL_F_DTLS1_READ_BYTES), "dtls1_read_bytes"}, {ERR_FUNC(SSL_F_DTLS1_READ_FAILED), "dtls1_read_failed"}, {ERR_FUNC(SSL_F_DTLS1_SEND_CERTIFICATE_REQUEST), "DTLS1_SEND_CERTIFICATE_REQUEST"}, {ERR_FUNC(SSL_F_DTLS1_SEND_CHANGE_CIPHER_SPEC), "dtls1_send_change_cipher_spec"}, {ERR_FUNC(SSL_F_DTLS1_SEND_CLIENT_CERTIFICATE), "dtls1_send_client_certificate"}, {ERR_FUNC(SSL_F_DTLS1_SEND_CLIENT_KEY_EXCHANGE), "dtls1_send_client_key_exchange"}, {ERR_FUNC(SSL_F_DTLS1_SEND_CLIENT_VERIFY), "dtls1_send_client_verify"}, {ERR_FUNC(SSL_F_DTLS1_SEND_HELLO_VERIFY_REQUEST), "DTLS1_SEND_HELLO_VERIFY_REQUEST"}, {ERR_FUNC(SSL_F_DTLS1_SEND_SERVER_CERTIFICATE), "dtls1_send_server_certificate"}, {ERR_FUNC(SSL_F_DTLS1_SEND_SERVER_HELLO), "dtls1_send_server_hello"}, {ERR_FUNC(SSL_F_DTLS1_SEND_SERVER_KEY_EXCHANGE), "dtls1_send_server_key_exchange"}, {ERR_FUNC(SSL_F_DTLS1_WRITE_APP_DATA_BYTES), "dtls1_write_app_data_bytes"}, {ERR_FUNC(SSL_F_SSL3_ACCEPT), "ssl3_accept"}, {ERR_FUNC(SSL_F_SSL3_ADD_CERT_TO_BUF), "SSL3_ADD_CERT_TO_BUF"}, {ERR_FUNC(SSL_F_SSL3_CALLBACK_CTRL), "ssl3_callback_ctrl"}, {ERR_FUNC(SSL_F_SSL3_CHANGE_CIPHER_STATE), "ssl3_change_cipher_state"}, {ERR_FUNC(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM), "ssl3_check_cert_and_algorithm"}, {ERR_FUNC(SSL_F_SSL3_CHECK_CLIENT_HELLO), "ssl3_check_client_hello"}, {ERR_FUNC(SSL_F_SSL3_CHECK_FINISHED), "SSL3_CHECK_FINISHED"}, {ERR_FUNC(SSL_F_SSL3_CLIENT_HELLO), "ssl3_client_hello"}, {ERR_FUNC(SSL_F_SSL3_CONNECT), "ssl3_connect"}, {ERR_FUNC(SSL_F_SSL3_CTRL), "ssl3_ctrl"}, {ERR_FUNC(SSL_F_SSL3_CTX_CTRL), "ssl3_ctx_ctrl"}, {ERR_FUNC(SSL_F_SSL3_DIGEST_CACHED_RECORDS), "ssl3_digest_cached_records"}, {ERR_FUNC(SSL_F_SSL3_DO_CHANGE_CIPHER_SPEC), "ssl3_do_change_cipher_spec"}, {ERR_FUNC(SSL_F_SSL3_ENC), "ssl3_enc"}, {ERR_FUNC(SSL_F_SSL3_GENERATE_KEY_BLOCK), "SSL3_GENERATE_KEY_BLOCK"}, {ERR_FUNC(SSL_F_SSL3_GET_CERTIFICATE_REQUEST), "ssl3_get_certificate_request"}, {ERR_FUNC(SSL_F_SSL3_GET_CERT_STATUS), "ssl3_get_cert_status"}, {ERR_FUNC(SSL_F_SSL3_GET_CERT_VERIFY), "ssl3_get_cert_verify"}, {ERR_FUNC(SSL_F_SSL3_GET_CLIENT_CERTIFICATE), "ssl3_get_client_certificate"}, {ERR_FUNC(SSL_F_SSL3_GET_CLIENT_HELLO), "ssl3_get_client_hello"}, {ERR_FUNC(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE), "ssl3_get_client_key_exchange"}, {ERR_FUNC(SSL_F_SSL3_GET_FINISHED), "ssl3_get_finished"}, {ERR_FUNC(SSL_F_SSL3_GET_KEY_EXCHANGE), "ssl3_get_key_exchange"}, {ERR_FUNC(SSL_F_SSL3_GET_MESSAGE), "ssl3_get_message"}, {ERR_FUNC(SSL_F_SSL3_GET_NEW_SESSION_TICKET), "ssl3_get_new_session_ticket"}, {ERR_FUNC(SSL_F_SSL3_GET_NEXT_PROTO), "ssl3_get_next_proto"}, {ERR_FUNC(SSL_F_SSL3_GET_RECORD), "SSL3_GET_RECORD"}, {ERR_FUNC(SSL_F_SSL3_GET_SERVER_CERTIFICATE), "ssl3_get_server_certificate"}, {ERR_FUNC(SSL_F_SSL3_GET_SERVER_DONE), "ssl3_get_server_done"}, {ERR_FUNC(SSL_F_SSL3_GET_SERVER_HELLO), "ssl3_get_server_hello"}, {ERR_FUNC(SSL_F_SSL3_HANDSHAKE_MAC), "ssl3_handshake_mac"}, {ERR_FUNC(SSL_F_SSL3_NEW_SESSION_TICKET), "SSL3_NEW_SESSION_TICKET"}, {ERR_FUNC(SSL_F_SSL3_OUTPUT_CERT_CHAIN), "ssl3_output_cert_chain"}, {ERR_FUNC(SSL_F_SSL3_PEEK), "ssl3_peek"}, {ERR_FUNC(SSL_F_SSL3_READ_BYTES), "ssl3_read_bytes"}, {ERR_FUNC(SSL_F_SSL3_READ_N), "ssl3_read_n"}, {ERR_FUNC(SSL_F_SSL3_SEND_CERTIFICATE_REQUEST), "ssl3_send_certificate_request"}, {ERR_FUNC(SSL_F_SSL3_SEND_CLIENT_CERTIFICATE), "ssl3_send_client_certificate"}, {ERR_FUNC(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE), "ssl3_send_client_key_exchange"}, {ERR_FUNC(SSL_F_SSL3_SEND_CLIENT_VERIFY), "ssl3_send_client_verify"}, {ERR_FUNC(SSL_F_SSL3_SEND_FINISHED), "ssl3_send_finished"}, {ERR_FUNC(SSL_F_SSL3_SEND_HELLO_REQUEST), "ssl3_send_hello_request"}, {ERR_FUNC(SSL_F_SSL3_SEND_SERVER_CERTIFICATE), "ssl3_send_server_certificate"}, {ERR_FUNC(SSL_F_SSL3_SEND_SERVER_DONE), "ssl3_send_server_done"}, {ERR_FUNC(SSL_F_SSL3_SEND_SERVER_HELLO), "ssl3_send_server_hello"}, {ERR_FUNC(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE), "ssl3_send_server_key_exchange"}, {ERR_FUNC(SSL_F_SSL3_SETUP_KEY_BLOCK), "ssl3_setup_key_block"}, {ERR_FUNC(SSL_F_SSL3_SETUP_READ_BUFFER), "ssl3_setup_read_buffer"}, {ERR_FUNC(SSL_F_SSL3_SETUP_WRITE_BUFFER), "ssl3_setup_write_buffer"}, {ERR_FUNC(SSL_F_SSL3_WRITE_BYTES), "ssl3_write_bytes"}, {ERR_FUNC(SSL_F_SSL3_WRITE_PENDING), "ssl3_write_pending"}, {ERR_FUNC(SSL_F_SSL_ADD_CERT_CHAIN), "ssl_add_cert_chain"}, {ERR_FUNC(SSL_F_SSL_ADD_CERT_TO_BUF), "SSL_ADD_CERT_TO_BUF"}, {ERR_FUNC(SSL_F_SSL_ADD_CLIENTHELLO_RENEGOTIATE_EXT), "ssl_add_clienthello_renegotiate_ext"}, {ERR_FUNC(SSL_F_SSL_ADD_CLIENTHELLO_TLSEXT), "ssl_add_clienthello_tlsext"}, {ERR_FUNC(SSL_F_SSL_ADD_CLIENTHELLO_USE_SRTP_EXT), "ssl_add_clienthello_use_srtp_ext"}, {ERR_FUNC(SSL_F_SSL_ADD_DIR_CERT_SUBJECTS_TO_STACK), "SSL_add_dir_cert_subjects_to_stack"}, {ERR_FUNC(SSL_F_SSL_ADD_FILE_CERT_SUBJECTS_TO_STACK), "SSL_add_file_cert_subjects_to_stack"}, {ERR_FUNC(SSL_F_SSL_ADD_SERVERHELLO_RENEGOTIATE_EXT), "ssl_add_serverhello_renegotiate_ext"}, {ERR_FUNC(SSL_F_SSL_ADD_SERVERHELLO_TLSEXT), "ssl_add_serverhello_tlsext"}, {ERR_FUNC(SSL_F_SSL_ADD_SERVERHELLO_USE_SRTP_EXT), "ssl_add_serverhello_use_srtp_ext"}, {ERR_FUNC(SSL_F_SSL_BAD_METHOD), "ssl_bad_method"}, {ERR_FUNC(SSL_F_SSL_BUILD_CERT_CHAIN), "ssl_build_cert_chain"}, {ERR_FUNC(SSL_F_SSL_BYTES_TO_CIPHER_LIST), "ssl_bytes_to_cipher_list"}, {ERR_FUNC(SSL_F_SSL_CERT_ADD0_CHAIN_CERT), "ssl_cert_add0_chain_cert"}, {ERR_FUNC(SSL_F_SSL_CERT_DUP), "ssl_cert_dup"}, {ERR_FUNC(SSL_F_SSL_CERT_INSTANTIATE), "SSL_CERT_INSTANTIATE"}, {ERR_FUNC(SSL_F_SSL_CERT_NEW), "ssl_cert_new"}, {ERR_FUNC(SSL_F_SSL_CERT_SET0_CHAIN), "ssl_cert_set0_chain"}, {ERR_FUNC(SSL_F_SSL_CHECK_PRIVATE_KEY), "SSL_check_private_key"}, {ERR_FUNC(SSL_F_SSL_CHECK_SERVERHELLO_TLSEXT), "SSL_CHECK_SERVERHELLO_TLSEXT"}, {ERR_FUNC(SSL_F_SSL_CHECK_SRVR_ECC_CERT_AND_ALG), "ssl_check_srvr_ecc_cert_and_alg"}, {ERR_FUNC(SSL_F_SSL_CIPHER_PROCESS_RULESTR), "SSL_CIPHER_PROCESS_RULESTR"}, {ERR_FUNC(SSL_F_SSL_CIPHER_STRENGTH_SORT), "SSL_CIPHER_STRENGTH_SORT"}, {ERR_FUNC(SSL_F_SSL_CLEAR), "SSL_clear"}, {ERR_FUNC(SSL_F_SSL_COMP_ADD_COMPRESSION_METHOD), "SSL_COMP_add_compression_method"}, {ERR_FUNC(SSL_F_SSL_CONF_CMD), "SSL_CONF_cmd"}, {ERR_FUNC(SSL_F_SSL_CREATE_CIPHER_LIST), "ssl_create_cipher_list"}, {ERR_FUNC(SSL_F_SSL_CTRL), "SSL_ctrl"}, {ERR_FUNC(SSL_F_SSL_CTX_CHECK_PRIVATE_KEY), "SSL_CTX_check_private_key"}, {ERR_FUNC(SSL_F_SSL_CTX_MAKE_PROFILES), "SSL_CTX_MAKE_PROFILES"}, {ERR_FUNC(SSL_F_SSL_CTX_NEW), "SSL_CTX_new"}, {ERR_FUNC(SSL_F_SSL_CTX_SET_CIPHER_LIST), "SSL_CTX_set_cipher_list"}, {ERR_FUNC(SSL_F_SSL_CTX_SET_CLIENT_CERT_ENGINE), "SSL_CTX_set_client_cert_engine"}, {ERR_FUNC(SSL_F_SSL_CTX_SET_PURPOSE), "SSL_CTX_set_purpose"}, {ERR_FUNC(SSL_F_SSL_CTX_SET_SESSION_ID_CONTEXT), "SSL_CTX_set_session_id_context"}, {ERR_FUNC(SSL_F_SSL_CTX_SET_SSL_VERSION), "SSL_CTX_set_ssl_version"}, {ERR_FUNC(SSL_F_SSL_CTX_SET_TRUST), "SSL_CTX_set_trust"}, {ERR_FUNC(SSL_F_SSL_CTX_USE_CERTIFICATE), "SSL_CTX_use_certificate"}, {ERR_FUNC(SSL_F_SSL_CTX_USE_CERTIFICATE_ASN1), "SSL_CTX_use_certificate_ASN1"}, {ERR_FUNC(SSL_F_USE_CERTIFICATE_CHAIN_FILE), "use_certificate_chain_file"}, {ERR_FUNC(SSL_F_SSL_CTX_USE_CERTIFICATE_FILE), "SSL_CTX_use_certificate_file"}, {ERR_FUNC(SSL_F_SSL_CTX_USE_PRIVATEKEY), "SSL_CTX_use_PrivateKey"}, {ERR_FUNC(SSL_F_SSL_CTX_USE_PRIVATEKEY_ASN1), "SSL_CTX_use_PrivateKey_ASN1"}, {ERR_FUNC(SSL_F_SSL_CTX_USE_PRIVATEKEY_FILE), "SSL_CTX_use_PrivateKey_file"}, {ERR_FUNC(SSL_F_SSL_CTX_USE_PSK_IDENTITY_HINT), "SSL_CTX_use_psk_identity_hint"}, {ERR_FUNC(SSL_F_SSL_CTX_USE_RSAPRIVATEKEY), "SSL_CTX_use_RSAPrivateKey"}, {ERR_FUNC(SSL_F_SSL_CTX_USE_RSAPRIVATEKEY_ASN1), "SSL_CTX_use_RSAPrivateKey_ASN1"}, {ERR_FUNC(SSL_F_SSL_CTX_USE_RSAPRIVATEKEY_FILE), "SSL_CTX_use_RSAPrivateKey_file"}, {ERR_FUNC(SSL_F_SSL_CTX_USE_SERVERINFO), "SSL_CTX_use_serverinfo"}, {ERR_FUNC(SSL_F_SSL_CTX_USE_SERVERINFO_FILE), "SSL_CTX_use_serverinfo_file"}, {ERR_FUNC(SSL_F_SSL_DO_HANDSHAKE), "SSL_do_handshake"}, {ERR_FUNC(SSL_F_SSL_GET_NEW_SESSION), "ssl_get_new_session"}, {ERR_FUNC(SSL_F_SSL_GET_PREV_SESSION), "ssl_get_prev_session"}, {ERR_FUNC(SSL_F_SSL_GET_SERVER_CERT_INDEX), "SSL_GET_SERVER_CERT_INDEX"}, {ERR_FUNC(SSL_F_SSL_GET_SERVER_SEND_CERT), "SSL_GET_SERVER_SEND_CERT"}, {ERR_FUNC(SSL_F_SSL_GET_SERVER_SEND_PKEY), "ssl_get_server_send_pkey"}, {ERR_FUNC(SSL_F_SSL_GET_SIGN_PKEY), "ssl_get_sign_pkey"}, {ERR_FUNC(SSL_F_SSL_INIT_WBIO_BUFFER), "ssl_init_wbio_buffer"}, {ERR_FUNC(SSL_F_SSL_LOAD_CLIENT_CA_FILE), "SSL_load_client_CA_file"}, {ERR_FUNC(SSL_F_SSL_NEW), "SSL_new"}, {ERR_FUNC(SSL_F_SSL_PARSE_CLIENTHELLO_RENEGOTIATE_EXT), "ssl_parse_clienthello_renegotiate_ext"}, {ERR_FUNC(SSL_F_SSL_PARSE_CLIENTHELLO_TLSEXT), "ssl_parse_clienthello_tlsext"}, {ERR_FUNC(SSL_F_SSL_PARSE_CLIENTHELLO_USE_SRTP_EXT), "ssl_parse_clienthello_use_srtp_ext"}, {ERR_FUNC(SSL_F_SSL_PARSE_SERVERHELLO_RENEGOTIATE_EXT), "ssl_parse_serverhello_renegotiate_ext"}, {ERR_FUNC(SSL_F_SSL_PARSE_SERVERHELLO_TLSEXT), "ssl_parse_serverhello_tlsext"}, {ERR_FUNC(SSL_F_SSL_PARSE_SERVERHELLO_USE_SRTP_EXT), "ssl_parse_serverhello_use_srtp_ext"}, {ERR_FUNC(SSL_F_SSL_PEEK), "SSL_peek"}, {ERR_FUNC(SSL_F_SSL_PREPARE_CLIENTHELLO_TLSEXT), "ssl_prepare_clienthello_tlsext"}, {ERR_FUNC(SSL_F_SSL_PREPARE_SERVERHELLO_TLSEXT), "ssl_prepare_serverhello_tlsext"}, {ERR_FUNC(SSL_F_SSL_READ), "SSL_read"}, {ERR_FUNC(SSL_F_SSL_SCAN_CLIENTHELLO_TLSEXT), "SSL_SCAN_CLIENTHELLO_TLSEXT"}, {ERR_FUNC(SSL_F_SSL_SCAN_SERVERHELLO_TLSEXT), "SSL_SCAN_SERVERHELLO_TLSEXT"}, {ERR_FUNC(SSL_F_SSL_SESSION_NEW), "SSL_SESSION_new"}, {ERR_FUNC(SSL_F_SSL_SESSION_PRINT_FP), "SSL_SESSION_print_fp"}, {ERR_FUNC(SSL_F_SSL_SESSION_SET1_ID_CONTEXT), "SSL_SESSION_set1_id_context"}, {ERR_FUNC(SSL_F_SSL_SESS_CERT_NEW), "ssl_sess_cert_new"}, {ERR_FUNC(SSL_F_SSL_SET_CERT), "SSL_SET_CERT"}, {ERR_FUNC(SSL_F_SSL_SET_CIPHER_LIST), "SSL_set_cipher_list"}, {ERR_FUNC(SSL_F_SSL_SET_FD), "SSL_set_fd"}, {ERR_FUNC(SSL_F_SSL_SET_PKEY), "SSL_SET_PKEY"}, {ERR_FUNC(SSL_F_SSL_SET_PURPOSE), "SSL_set_purpose"}, {ERR_FUNC(SSL_F_SSL_SET_RFD), "SSL_set_rfd"}, {ERR_FUNC(SSL_F_SSL_SET_SESSION), "SSL_set_session"}, {ERR_FUNC(SSL_F_SSL_SET_SESSION_ID_CONTEXT), "SSL_set_session_id_context"}, {ERR_FUNC(SSL_F_SSL_SET_SESSION_TICKET_EXT), "SSL_set_session_ticket_ext"}, {ERR_FUNC(SSL_F_SSL_SET_TRUST), "SSL_set_trust"}, {ERR_FUNC(SSL_F_SSL_SET_VERSION), "SSL_SET_VERSION"}, {ERR_FUNC(SSL_F_SSL_SET_WFD), "SSL_set_wfd"}, {ERR_FUNC(SSL_F_SSL_SHUTDOWN), "SSL_shutdown"}, {ERR_FUNC(SSL_F_SSL_SRP_CTX_INIT), "SSL_SRP_CTX_init"}, {ERR_FUNC(SSL_F_SSL_UNDEFINED_CONST_FUNCTION), "ssl_undefined_const_function"}, {ERR_FUNC(SSL_F_SSL_UNDEFINED_FUNCTION), "ssl_undefined_function"}, {ERR_FUNC(SSL_F_SSL_UNDEFINED_VOID_FUNCTION), "ssl_undefined_void_function"}, {ERR_FUNC(SSL_F_SSL_USE_CERTIFICATE), "SSL_use_certificate"}, {ERR_FUNC(SSL_F_SSL_USE_CERTIFICATE_ASN1), "SSL_use_certificate_ASN1"}, {ERR_FUNC(SSL_F_SSL_USE_CERTIFICATE_FILE), "SSL_use_certificate_file"}, {ERR_FUNC(SSL_F_SSL_USE_PRIVATEKEY), "SSL_use_PrivateKey"}, {ERR_FUNC(SSL_F_SSL_USE_PRIVATEKEY_ASN1), "SSL_use_PrivateKey_ASN1"}, {ERR_FUNC(SSL_F_SSL_USE_PRIVATEKEY_FILE), "SSL_use_PrivateKey_file"}, {ERR_FUNC(SSL_F_SSL_USE_PSK_IDENTITY_HINT), "SSL_use_psk_identity_hint"}, {ERR_FUNC(SSL_F_SSL_USE_RSAPRIVATEKEY), "SSL_use_RSAPrivateKey"}, {ERR_FUNC(SSL_F_SSL_USE_RSAPRIVATEKEY_ASN1), "SSL_use_RSAPrivateKey_ASN1"}, {ERR_FUNC(SSL_F_SSL_USE_RSAPRIVATEKEY_FILE), "SSL_use_RSAPrivateKey_file"}, {ERR_FUNC(SSL_F_SSL_VERIFY_CERT_CHAIN), "ssl_verify_cert_chain"}, {ERR_FUNC(SSL_F_SSL_WRITE), "SSL_write"}, {ERR_FUNC(SSL_F_TLS12_CHECK_PEER_SIGALG), "tls12_check_peer_sigalg"}, {ERR_FUNC(SSL_F_TLS1_CERT_VERIFY_MAC), "tls1_cert_verify_mac"}, {ERR_FUNC(SSL_F_TLS1_CHANGE_CIPHER_STATE), "tls1_change_cipher_state"}, {ERR_FUNC(SSL_F_TLS1_CHECK_SERVERHELLO_TLSEXT), "TLS1_CHECK_SERVERHELLO_TLSEXT"}, {ERR_FUNC(SSL_F_TLS1_ENC), "tls1_enc"}, {ERR_FUNC(SSL_F_TLS1_EXPORT_KEYING_MATERIAL), "tls1_export_keying_material"}, {ERR_FUNC(SSL_F_TLS1_GET_CURVELIST), "TLS1_GET_CURVELIST"}, {ERR_FUNC(SSL_F_TLS1_HEARTBEAT), "tls1_heartbeat"}, {ERR_FUNC(SSL_F_TLS1_PREPARE_CLIENTHELLO_TLSEXT), "TLS1_PREPARE_CLIENTHELLO_TLSEXT"}, {ERR_FUNC(SSL_F_TLS1_PREPARE_SERVERHELLO_TLSEXT), "TLS1_PREPARE_SERVERHELLO_TLSEXT"}, {ERR_FUNC(SSL_F_TLS1_PRF), "tls1_prf"}, {ERR_FUNC(SSL_F_TLS1_PROCESS_HEARTBEAT), "tls1_process_heartbeat"}, {ERR_FUNC(SSL_F_TLS1_SETUP_KEY_BLOCK), "tls1_setup_key_block"}, {ERR_FUNC(SSL_F_TLS1_SET_SERVER_SIGALGS), "tls1_set_server_sigalgs"}, {0, NULL} }; static ERR_STRING_DATA SSL_str_reasons[] = { {ERR_REASON(SSL_R_APP_DATA_IN_HANDSHAKE), "app data in handshake"}, {ERR_REASON(SSL_R_ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT), "attempt to reuse session in different context"}, {ERR_REASON(SSL_R_BAD_ALERT_RECORD), "bad alert record"}, {ERR_REASON(SSL_R_BAD_CHANGE_CIPHER_SPEC), "bad change cipher spec"}, {ERR_REASON(SSL_R_BAD_DATA), "bad data"}, {ERR_REASON(SSL_R_BAD_DATA_RETURNED_BY_CALLBACK), "bad data returned by callback"}, {ERR_REASON(SSL_R_BAD_DECOMPRESSION), "bad decompression"}, {ERR_REASON(SSL_R_BAD_DH_G_LENGTH), "bad dh g length"}, {ERR_REASON(SSL_R_BAD_DH_PUB_KEY_LENGTH), "bad dh pub key length"}, {ERR_REASON(SSL_R_BAD_DH_P_LENGTH), "bad dh p length"}, {ERR_REASON(SSL_R_BAD_DIGEST_LENGTH), "bad digest length"}, {ERR_REASON(SSL_R_BAD_DSA_SIGNATURE), "bad dsa signature"}, {ERR_REASON(SSL_R_BAD_ECC_CERT), "bad ecc cert"}, {ERR_REASON(SSL_R_BAD_ECDSA_SIGNATURE), "bad ecdsa signature"}, {ERR_REASON(SSL_R_BAD_ECPOINT), "bad ecpoint"}, {ERR_REASON(SSL_R_BAD_HANDSHAKE_LENGTH), "bad handshake length"}, {ERR_REASON(SSL_R_BAD_HELLO_REQUEST), "bad hello request"}, {ERR_REASON(SSL_R_BAD_LENGTH), "bad length"}, {ERR_REASON(SSL_R_BAD_MAC_LENGTH), "bad mac length"}, {ERR_REASON(SSL_R_BAD_MESSAGE_TYPE), "bad message type"}, {ERR_REASON(SSL_R_BAD_PACKET_LENGTH), "bad packet length"}, {ERR_REASON(SSL_R_BAD_PROTOCOL_VERSION_NUMBER), "bad protocol version number"}, {ERR_REASON(SSL_R_BAD_PSK_IDENTITY_HINT_LENGTH), "bad psk identity hint length"}, {ERR_REASON(SSL_R_BAD_RSA_DECRYPT), "bad rsa decrypt"}, {ERR_REASON(SSL_R_BAD_RSA_ENCRYPT), "bad rsa encrypt"}, {ERR_REASON(SSL_R_BAD_RSA_E_LENGTH), "bad rsa e length"}, {ERR_REASON(SSL_R_BAD_RSA_MODULUS_LENGTH), "bad rsa modulus length"}, {ERR_REASON(SSL_R_BAD_RSA_SIGNATURE), "bad rsa signature"}, {ERR_REASON(SSL_R_BAD_SIGNATURE), "bad signature"}, {ERR_REASON(SSL_R_BAD_SRP_A_LENGTH), "bad srp a length"}, {ERR_REASON(SSL_R_BAD_SRP_B_LENGTH), "bad srp b length"}, {ERR_REASON(SSL_R_BAD_SRP_G_LENGTH), "bad srp g length"}, {ERR_REASON(SSL_R_BAD_SRP_N_LENGTH), "bad srp n length"}, {ERR_REASON(SSL_R_BAD_SRP_PARAMETERS), "bad srp parameters"}, {ERR_REASON(SSL_R_BAD_SRP_S_LENGTH), "bad srp s length"}, {ERR_REASON(SSL_R_BAD_SRTP_MKI_VALUE), "bad srtp mki value"}, {ERR_REASON(SSL_R_BAD_SRTP_PROTECTION_PROFILE_LIST), "bad srtp protection profile list"}, {ERR_REASON(SSL_R_BAD_SSL_FILETYPE), "bad ssl filetype"}, {ERR_REASON(SSL_R_BAD_VALUE), "bad value"}, {ERR_REASON(SSL_R_BAD_WRITE_RETRY), "bad write retry"}, {ERR_REASON(SSL_R_BIO_NOT_SET), "bio not set"}, {ERR_REASON(SSL_R_BLOCK_CIPHER_PAD_IS_WRONG), "block cipher pad is wrong"}, {ERR_REASON(SSL_R_BN_LIB), "bn lib"}, {ERR_REASON(SSL_R_CA_DN_LENGTH_MISMATCH), "ca dn length mismatch"}, {ERR_REASON(SSL_R_CA_DN_TOO_LONG), "ca dn too long"}, {ERR_REASON(SSL_R_CA_KEY_TOO_SMALL), "ca key too small"}, {ERR_REASON(SSL_R_CA_MD_TOO_WEAK), "ca md too weak"}, {ERR_REASON(SSL_R_CCS_RECEIVED_EARLY), "ccs received early"}, {ERR_REASON(SSL_R_CERTIFICATE_VERIFY_FAILED), "certificate verify failed"}, {ERR_REASON(SSL_R_CERT_CB_ERROR), "cert cb error"}, {ERR_REASON(SSL_R_CERT_LENGTH_MISMATCH), "cert length mismatch"}, {ERR_REASON(SSL_R_CIPHER_CODE_WRONG_LENGTH), "cipher code wrong length"}, {ERR_REASON(SSL_R_CIPHER_OR_HASH_UNAVAILABLE), "cipher or hash unavailable"}, {ERR_REASON(SSL_R_CLIENTHELLO_TLSEXT), "clienthello tlsext"}, {ERR_REASON(SSL_R_COMPRESSED_LENGTH_TOO_LONG), "compressed length too long"}, {ERR_REASON(SSL_R_COMPRESSION_DISABLED), "compression disabled"}, {ERR_REASON(SSL_R_COMPRESSION_FAILURE), "compression failure"}, {ERR_REASON(SSL_R_COMPRESSION_ID_NOT_WITHIN_PRIVATE_RANGE), "compression id not within private range"}, {ERR_REASON(SSL_R_COMPRESSION_LIBRARY_ERROR), "compression library error"}, {ERR_REASON(SSL_R_CONNECTION_TYPE_NOT_SET), "connection type not set"}, {ERR_REASON(SSL_R_COOKIE_MISMATCH), "cookie mismatch"}, {ERR_REASON(SSL_R_DATA_BETWEEN_CCS_AND_FINISHED), "data between ccs and finished"}, {ERR_REASON(SSL_R_DATA_LENGTH_TOO_LONG), "data length too long"}, {ERR_REASON(SSL_R_DECRYPTION_FAILED), "decryption failed"}, {ERR_REASON(SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC), "decryption failed or bad record mac"}, {ERR_REASON(SSL_R_DH_KEY_TOO_SMALL), "dh key too small"}, {ERR_REASON(SSL_R_DH_PUBLIC_VALUE_LENGTH_IS_WRONG), "dh public value length is wrong"}, {ERR_REASON(SSL_R_DIGEST_CHECK_FAILED), "digest check failed"}, {ERR_REASON(SSL_R_DTLS_MESSAGE_TOO_BIG), "dtls message too big"}, {ERR_REASON(SSL_R_DUPLICATE_COMPRESSION_ID), "duplicate compression id"}, {ERR_REASON(SSL_R_ECC_CERT_NOT_FOR_KEY_AGREEMENT), "ecc cert not for key agreement"}, {ERR_REASON(SSL_R_ECC_CERT_NOT_FOR_SIGNING), "ecc cert not for signing"}, {ERR_REASON(SSL_R_ECC_CERT_SHOULD_HAVE_RSA_SIGNATURE), "ecc cert should have rsa signature"}, {ERR_REASON(SSL_R_ECC_CERT_SHOULD_HAVE_SHA1_SIGNATURE), "ecc cert should have sha1 signature"}, {ERR_REASON(SSL_R_ECDH_REQUIRED_FOR_SUITEB_MODE), "ecdh required for suiteb mode"}, {ERR_REASON(SSL_R_ECGROUP_TOO_LARGE_FOR_CIPHER), "ecgroup too large for cipher"}, {ERR_REASON(SSL_R_EE_KEY_TOO_SMALL), "ee key too small"}, {ERR_REASON(SSL_R_EMPTY_SRTP_PROTECTION_PROFILE_LIST), "empty srtp protection profile list"}, {ERR_REASON(SSL_R_ENCRYPTED_LENGTH_TOO_LONG), "encrypted length too long"}, {ERR_REASON(SSL_R_ERROR_GENERATING_TMP_RSA_KEY), "error generating tmp rsa key"}, {ERR_REASON(SSL_R_ERROR_IN_RECEIVED_CIPHER_LIST), "error in received cipher list"}, {ERR_REASON(SSL_R_EXCESSIVE_MESSAGE_SIZE), "excessive message size"}, {ERR_REASON(SSL_R_EXTRA_DATA_IN_MESSAGE), "extra data in message"}, {ERR_REASON(SSL_R_GOT_A_FIN_BEFORE_A_CCS), "got a fin before a ccs"}, {ERR_REASON(SSL_R_GOT_NEXT_PROTO_BEFORE_A_CCS), "got next proto before a ccs"}, {ERR_REASON(SSL_R_GOT_NEXT_PROTO_WITHOUT_EXTENSION), "got next proto without seeing extension"}, {ERR_REASON(SSL_R_HTTPS_PROXY_REQUEST), "https proxy request"}, {ERR_REASON(SSL_R_HTTP_REQUEST), "http request"}, {ERR_REASON(SSL_R_ILLEGAL_SUITEB_DIGEST), "illegal Suite B digest"}, {ERR_REASON(SSL_R_INAPPROPRIATE_FALLBACK), "inappropriate fallback"}, {ERR_REASON(SSL_R_INCONSISTENT_COMPRESSION), "inconsistent compression"}, {ERR_REASON(SSL_R_INVALID_COMMAND), "invalid command"}, {ERR_REASON(SSL_R_INVALID_COMPRESSION_ALGORITHM), "invalid compression algorithm"}, {ERR_REASON(SSL_R_INVALID_NULL_CMD_NAME), "invalid null cmd name"}, {ERR_REASON(SSL_R_INVALID_PURPOSE), "invalid purpose"}, {ERR_REASON(SSL_R_INVALID_SERVERINFO_DATA), "invalid serverinfo data"}, {ERR_REASON(SSL_R_INVALID_SRP_USERNAME), "invalid srp username"}, {ERR_REASON(SSL_R_INVALID_STATUS_RESPONSE), "invalid status response"}, {ERR_REASON(SSL_R_INVALID_TICKET_KEYS_LENGTH), "invalid ticket keys length"}, {ERR_REASON(SSL_R_INVALID_TRUST), "invalid trust"}, {ERR_REASON(SSL_R_LENGTH_MISMATCH), "length mismatch"}, {ERR_REASON(SSL_R_LENGTH_TOO_SHORT), "length too short"}, {ERR_REASON(SSL_R_LIBRARY_BUG), "library bug"}, {ERR_REASON(SSL_R_LIBRARY_HAS_NO_CIPHERS), "library has no ciphers"}, {ERR_REASON(SSL_R_MISSING_DH_DSA_CERT), "missing dh dsa cert"}, {ERR_REASON(SSL_R_MISSING_DH_KEY), "missing dh key"}, {ERR_REASON(SSL_R_MISSING_DH_RSA_CERT), "missing dh rsa cert"}, {ERR_REASON(SSL_R_MISSING_DSA_SIGNING_CERT), "missing dsa signing cert"}, {ERR_REASON(SSL_R_MISSING_ECDH_CERT), "missing ecdh cert"}, {ERR_REASON(SSL_R_MISSING_ECDSA_SIGNING_CERT), "missing ecdsa signing cert"}, {ERR_REASON(SSL_R_MISSING_EXPORT_TMP_DH_KEY), "missing export tmp dh key"}, {ERR_REASON(SSL_R_MISSING_EXPORT_TMP_RSA_KEY), "missing export tmp rsa key"}, {ERR_REASON(SSL_R_MISSING_RSA_CERTIFICATE), "missing rsa certificate"}, {ERR_REASON(SSL_R_MISSING_RSA_ENCRYPTING_CERT), "missing rsa encrypting cert"}, {ERR_REASON(SSL_R_MISSING_RSA_SIGNING_CERT), "missing rsa signing cert"}, {ERR_REASON(SSL_R_MISSING_SRP_PARAM), "can't find SRP server param"}, {ERR_REASON(SSL_R_MISSING_TMP_DH_KEY), "missing tmp dh key"}, {ERR_REASON(SSL_R_MISSING_TMP_ECDH_KEY), "missing tmp ecdh key"}, {ERR_REASON(SSL_R_MISSING_TMP_RSA_KEY), "missing tmp rsa key"}, {ERR_REASON(SSL_R_MISSING_TMP_RSA_PKEY), "missing tmp rsa pkey"}, {ERR_REASON(SSL_R_MISSING_VERIFY_MESSAGE), "missing verify message"}, {ERR_REASON(SSL_R_MULTIPLE_SGC_RESTARTS), "multiple sgc restarts"}, {ERR_REASON(SSL_R_NO_CERTIFICATES_RETURNED), "no certificates returned"}, {ERR_REASON(SSL_R_NO_CERTIFICATE_ASSIGNED), "no certificate assigned"}, {ERR_REASON(SSL_R_NO_CERTIFICATE_RETURNED), "no certificate returned"}, {ERR_REASON(SSL_R_NO_CERTIFICATE_SET), "no certificate set"}, {ERR_REASON(SSL_R_NO_CIPHERS_AVAILABLE), "no ciphers available"}, {ERR_REASON(SSL_R_NO_CIPHERS_PASSED), "no ciphers passed"}, {ERR_REASON(SSL_R_NO_CIPHERS_SPECIFIED), "no ciphers specified"}, {ERR_REASON(SSL_R_NO_CIPHER_MATCH), "no cipher match"}, {ERR_REASON(SSL_R_NO_CLIENT_CERT_METHOD), "no client cert method"}, {ERR_REASON(SSL_R_NO_CLIENT_CERT_RECEIVED), "no client cert received"}, {ERR_REASON(SSL_R_NO_COMPRESSION_SPECIFIED), "no compression specified"}, {ERR_REASON(SSL_R_NO_GOST_CERTIFICATE_SENT_BY_PEER), "Peer haven't sent GOST certificate, required for selected ciphersuite"}, {ERR_REASON(SSL_R_NO_METHOD_SPECIFIED), "no method specified"}, {ERR_REASON(SSL_R_NO_PEM_EXTENSIONS), "no pem extensions"}, {ERR_REASON(SSL_R_NO_PRIVATE_KEY_ASSIGNED), "no private key assigned"}, {ERR_REASON(SSL_R_NO_PROTOCOLS_AVAILABLE), "no protocols available"}, {ERR_REASON(SSL_R_NO_RENEGOTIATION), "no renegotiation"}, {ERR_REASON(SSL_R_NO_REQUIRED_DIGEST), "no required digest"}, {ERR_REASON(SSL_R_NO_SHARED_CIPHER), "no shared cipher"}, {ERR_REASON(SSL_R_NO_SHARED_SIGATURE_ALGORITHMS), "no shared sigature algorithms"}, {ERR_REASON(SSL_R_NO_SRTP_PROFILES), "no srtp profiles"}, {ERR_REASON(SSL_R_NO_VERIFY_CALLBACK), "no verify callback"}, {ERR_REASON(SSL_R_NULL_SSL_CTX), "null ssl ctx"}, {ERR_REASON(SSL_R_NULL_SSL_METHOD_PASSED), "null ssl method passed"}, {ERR_REASON(SSL_R_OLD_SESSION_CIPHER_NOT_RETURNED), "old session cipher not returned"}, {ERR_REASON(SSL_R_OLD_SESSION_COMPRESSION_ALGORITHM_NOT_RETURNED), "old session compression algorithm not returned"}, {ERR_REASON(SSL_R_ONLY_DTLS_1_2_ALLOWED_IN_SUITEB_MODE), "only DTLS 1.2 allowed in Suite B mode"}, {ERR_REASON(SSL_R_ONLY_TLS_1_2_ALLOWED_IN_SUITEB_MODE), "only TLS 1.2 allowed in Suite B mode"}, {ERR_REASON(SSL_R_ONLY_TLS_ALLOWED_IN_FIPS_MODE), "only tls allowed in fips mode"}, {ERR_REASON(SSL_R_OPAQUE_PRF_INPUT_TOO_LONG), "opaque PRF input too long"}, {ERR_REASON(SSL_R_PACKET_LENGTH_TOO_LONG), "packet length too long"}, {ERR_REASON(SSL_R_PARSE_TLSEXT), "parse tlsext"}, {ERR_REASON(SSL_R_PATH_TOO_LONG), "path too long"}, {ERR_REASON(SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE), "peer did not return a certificate"}, {ERR_REASON(SSL_R_PEM_NAME_BAD_PREFIX), "pem name bad prefix"}, {ERR_REASON(SSL_R_PEM_NAME_TOO_SHORT), "pem name too short"}, {ERR_REASON(SSL_R_PRE_MAC_LENGTH_TOO_LONG), "pre mac length too long"}, {ERR_REASON(SSL_R_PROTOCOL_IS_SHUTDOWN), "protocol is shutdown"}, {ERR_REASON(SSL_R_PSK_IDENTITY_NOT_FOUND), "psk identity not found"}, {ERR_REASON(SSL_R_PSK_NO_CLIENT_CB), "psk no client cb"}, {ERR_REASON(SSL_R_PSK_NO_SERVER_CB), "psk no server cb"}, {ERR_REASON(SSL_R_READ_BIO_NOT_SET), "read bio not set"}, {ERR_REASON(SSL_R_READ_TIMEOUT_EXPIRED), "read timeout expired"}, {ERR_REASON(SSL_R_RECORD_LENGTH_MISMATCH), "record length mismatch"}, {ERR_REASON(SSL_R_RECORD_TOO_LARGE), "record too large"}, {ERR_REASON(SSL_R_RECORD_TOO_SMALL), "record too small"}, {ERR_REASON(SSL_R_RENEGOTIATE_EXT_TOO_LONG), "renegotiate ext too long"}, {ERR_REASON(SSL_R_RENEGOTIATION_ENCODING_ERR), "renegotiation encoding err"}, {ERR_REASON(SSL_R_RENEGOTIATION_MISMATCH), "renegotiation mismatch"}, {ERR_REASON(SSL_R_REQUIRED_CIPHER_MISSING), "required cipher missing"}, {ERR_REASON(SSL_R_REQUIRED_COMPRESSSION_ALGORITHM_MISSING), "required compresssion algorithm missing"}, {ERR_REASON(SSL_R_SCSV_RECEIVED_WHEN_RENEGOTIATING), "scsv received when renegotiating"}, {ERR_REASON(SSL_R_SERVERHELLO_TLSEXT), "serverhello tlsext"}, {ERR_REASON(SSL_R_SESSION_ID_CONTEXT_UNINITIALIZED), "session id context uninitialized"}, {ERR_REASON(SSL_R_SIGNATURE_ALGORITHMS_ERROR), "signature algorithms error"}, {ERR_REASON(SSL_R_SIGNATURE_FOR_NON_SIGNING_CERTIFICATE), "signature for non signing certificate"}, {ERR_REASON(SSL_R_SRP_A_CALC), "error with the srp params"}, {ERR_REASON(SSL_R_SRTP_COULD_NOT_ALLOCATE_PROFILES), "srtp could not allocate profiles"}, {ERR_REASON(SSL_R_SRTP_PROTECTION_PROFILE_LIST_TOO_LONG), "srtp protection profile list too long"}, {ERR_REASON(SSL_R_SRTP_UNKNOWN_PROTECTION_PROFILE), "srtp unknown protection profile"}, {ERR_REASON(SSL_R_SSL3_EXT_INVALID_ECPOINTFORMAT), "ssl3 ext invalid ecpointformat"}, {ERR_REASON(SSL_R_SSL3_EXT_INVALID_SERVERNAME), "ssl3 ext invalid servername"}, {ERR_REASON(SSL_R_SSL3_EXT_INVALID_SERVERNAME_TYPE), "ssl3 ext invalid servername type"}, {ERR_REASON(SSL_R_SSL3_SESSION_ID_TOO_LONG), "ssl3 session id too long"}, {ERR_REASON(SSL_R_SSL3_SESSION_ID_TOO_SHORT), "ssl3 session id too short"}, {ERR_REASON(SSL_R_SSLV3_ALERT_BAD_CERTIFICATE), "sslv3 alert bad certificate"}, {ERR_REASON(SSL_R_SSLV3_ALERT_BAD_RECORD_MAC), "sslv3 alert bad record mac"}, {ERR_REASON(SSL_R_SSLV3_ALERT_CERTIFICATE_EXPIRED), "sslv3 alert certificate expired"}, {ERR_REASON(SSL_R_SSLV3_ALERT_CERTIFICATE_REVOKED), "sslv3 alert certificate revoked"}, {ERR_REASON(SSL_R_SSLV3_ALERT_CERTIFICATE_UNKNOWN), "sslv3 alert certificate unknown"}, {ERR_REASON(SSL_R_SSLV3_ALERT_DECOMPRESSION_FAILURE), "sslv3 alert decompression failure"}, {ERR_REASON(SSL_R_SSLV3_ALERT_HANDSHAKE_FAILURE), "sslv3 alert handshake failure"}, {ERR_REASON(SSL_R_SSLV3_ALERT_ILLEGAL_PARAMETER), "sslv3 alert illegal parameter"}, {ERR_REASON(SSL_R_SSLV3_ALERT_NO_CERTIFICATE), "sslv3 alert no certificate"}, {ERR_REASON(SSL_R_SSLV3_ALERT_UNEXPECTED_MESSAGE), "sslv3 alert unexpected message"}, {ERR_REASON(SSL_R_SSLV3_ALERT_UNSUPPORTED_CERTIFICATE), "sslv3 alert unsupported certificate"}, {ERR_REASON(SSL_R_SSL_CTX_HAS_NO_DEFAULT_SSL_VERSION), "ssl ctx has no default ssl version"}, {ERR_REASON(SSL_R_SSL_HANDSHAKE_FAILURE), "ssl handshake failure"}, {ERR_REASON(SSL_R_SSL_LIBRARY_HAS_NO_CIPHERS), "ssl library has no ciphers"}, {ERR_REASON(SSL_R_SSL_NEGATIVE_LENGTH), "ssl negative length"}, {ERR_REASON(SSL_R_SSL_SESSION_ID_CALLBACK_FAILED), "ssl session id callback failed"}, {ERR_REASON(SSL_R_SSL_SESSION_ID_CONFLICT), "ssl session id conflict"}, {ERR_REASON(SSL_R_SSL_SESSION_ID_CONTEXT_TOO_LONG), "ssl session id context too long"}, {ERR_REASON(SSL_R_SSL_SESSION_ID_HAS_BAD_LENGTH), "ssl session id has bad length"}, {ERR_REASON(SSL_R_TLSV1_ALERT_ACCESS_DENIED), "tlsv1 alert access denied"}, {ERR_REASON(SSL_R_TLSV1_ALERT_DECODE_ERROR), "tlsv1 alert decode error"}, {ERR_REASON(SSL_R_TLSV1_ALERT_DECRYPTION_FAILED), "tlsv1 alert decryption failed"}, {ERR_REASON(SSL_R_TLSV1_ALERT_DECRYPT_ERROR), "tlsv1 alert decrypt error"}, {ERR_REASON(SSL_R_TLSV1_ALERT_EXPORT_RESTRICTION), "tlsv1 alert export restriction"}, {ERR_REASON(SSL_R_TLSV1_ALERT_INAPPROPRIATE_FALLBACK), "tlsv1 alert inappropriate fallback"}, {ERR_REASON(SSL_R_TLSV1_ALERT_INSUFFICIENT_SECURITY), "tlsv1 alert insufficient security"}, {ERR_REASON(SSL_R_TLSV1_ALERT_INTERNAL_ERROR), "tlsv1 alert internal error"}, {ERR_REASON(SSL_R_TLSV1_ALERT_NO_RENEGOTIATION), "tlsv1 alert no renegotiation"}, {ERR_REASON(SSL_R_TLSV1_ALERT_PROTOCOL_VERSION), "tlsv1 alert protocol version"}, {ERR_REASON(SSL_R_TLSV1_ALERT_RECORD_OVERFLOW), "tlsv1 alert record overflow"}, {ERR_REASON(SSL_R_TLSV1_ALERT_UNKNOWN_CA), "tlsv1 alert unknown ca"}, {ERR_REASON(SSL_R_TLSV1_ALERT_USER_CANCELLED), "tlsv1 alert user cancelled"}, {ERR_REASON(SSL_R_TLSV1_BAD_CERTIFICATE_HASH_VALUE), "tlsv1 bad certificate hash value"}, {ERR_REASON(SSL_R_TLSV1_BAD_CERTIFICATE_STATUS_RESPONSE), "tlsv1 bad certificate status response"}, {ERR_REASON(SSL_R_TLSV1_CERTIFICATE_UNOBTAINABLE), "tlsv1 certificate unobtainable"}, {ERR_REASON(SSL_R_TLSV1_UNRECOGNIZED_NAME), "tlsv1 unrecognized name"}, {ERR_REASON(SSL_R_TLSV1_UNSUPPORTED_EXTENSION), "tlsv1 unsupported extension"}, {ERR_REASON(SSL_R_TLS_CLIENT_CERT_REQ_WITH_ANON_CIPHER), "tls client cert req with anon cipher"}, {ERR_REASON(SSL_R_TLS_HEARTBEAT_PEER_DOESNT_ACCEPT), "peer does not accept heartbeats"}, {ERR_REASON(SSL_R_TLS_HEARTBEAT_PENDING), "heartbeat request already pending"}, {ERR_REASON(SSL_R_TLS_ILLEGAL_EXPORTER_LABEL), "tls illegal exporter label"}, {ERR_REASON(SSL_R_TLS_INVALID_ECPOINTFORMAT_LIST), "tls invalid ecpointformat list"}, {ERR_REASON(SSL_R_TLS_PEER_DID_NOT_RESPOND_WITH_CERTIFICATE_LIST), "tls peer did not respond with certificate list"}, {ERR_REASON(SSL_R_TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG), "tls rsa encrypted value length is wrong"}, {ERR_REASON(SSL_R_UNABLE_TO_DECODE_DH_CERTS), "unable to decode dh certs"}, {ERR_REASON(SSL_R_UNABLE_TO_DECODE_ECDH_CERTS), "unable to decode ecdh certs"}, {ERR_REASON(SSL_R_UNABLE_TO_FIND_DH_PARAMETERS), "unable to find dh parameters"}, {ERR_REASON(SSL_R_UNABLE_TO_FIND_ECDH_PARAMETERS), "unable to find ecdh parameters"}, {ERR_REASON(SSL_R_UNABLE_TO_FIND_PUBLIC_KEY_PARAMETERS), "unable to find public key parameters"}, {ERR_REASON(SSL_R_UNABLE_TO_FIND_SSL_METHOD), "unable to find ssl method"}, {ERR_REASON(SSL_R_UNABLE_TO_LOAD_SSL3_MD5_ROUTINES), "unable to load ssl3 md5 routines"}, {ERR_REASON(SSL_R_UNABLE_TO_LOAD_SSL3_SHA1_ROUTINES), "unable to load ssl3 sha1 routines"}, {ERR_REASON(SSL_R_UNEXPECTED_MESSAGE), "unexpected message"}, {ERR_REASON(SSL_R_UNEXPECTED_RECORD), "unexpected record"}, {ERR_REASON(SSL_R_UNINITIALIZED), "uninitialized"}, {ERR_REASON(SSL_R_UNKNOWN_ALERT_TYPE), "unknown alert type"}, {ERR_REASON(SSL_R_UNKNOWN_CERTIFICATE_TYPE), "unknown certificate type"}, {ERR_REASON(SSL_R_UNKNOWN_CIPHER_RETURNED), "unknown cipher returned"}, {ERR_REASON(SSL_R_UNKNOWN_CIPHER_TYPE), "unknown cipher type"}, {ERR_REASON(SSL_R_UNKNOWN_CMD_NAME), "unknown cmd name"}, {ERR_REASON(SSL_R_UNKNOWN_DIGEST), "unknown digest"}, {ERR_REASON(SSL_R_UNKNOWN_KEY_EXCHANGE_TYPE), "unknown key exchange type"}, {ERR_REASON(SSL_R_UNKNOWN_PKEY_TYPE), "unknown pkey type"}, {ERR_REASON(SSL_R_UNKNOWN_PROTOCOL), "unknown protocol"}, {ERR_REASON(SSL_R_UNKNOWN_REMOTE_ERROR_TYPE), "unknown remote error type"}, {ERR_REASON(SSL_R_UNKNOWN_SSL_VERSION), "unknown ssl version"}, {ERR_REASON(SSL_R_UNKNOWN_STATE), "unknown state"}, {ERR_REASON(SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED), "unsafe legacy renegotiation disabled"}, {ERR_REASON(SSL_R_UNSUPPORTED_CIPHER), "unsupported cipher"}, {ERR_REASON(SSL_R_UNSUPPORTED_COMPRESSION_ALGORITHM), "unsupported compression algorithm"}, {ERR_REASON(SSL_R_UNSUPPORTED_DIGEST_TYPE), "unsupported digest type"}, {ERR_REASON(SSL_R_UNSUPPORTED_ELLIPTIC_CURVE), "unsupported elliptic curve"}, {ERR_REASON(SSL_R_UNSUPPORTED_PROTOCOL), "unsupported protocol"}, {ERR_REASON(SSL_R_UNSUPPORTED_SSL_VERSION), "unsupported ssl version"}, {ERR_REASON(SSL_R_UNSUPPORTED_STATUS_TYPE), "unsupported status type"}, {ERR_REASON(SSL_R_USE_SRTP_NOT_NEGOTIATED), "use srtp not negotiated"}, {ERR_REASON(SSL_R_VERSION_TOO_LOW), "version too low"}, {ERR_REASON(SSL_R_WRONG_CERTIFICATE_TYPE), "wrong certificate type"}, {ERR_REASON(SSL_R_WRONG_CIPHER_RETURNED), "wrong cipher returned"}, {ERR_REASON(SSL_R_WRONG_CURVE), "wrong curve"}, {ERR_REASON(SSL_R_WRONG_MESSAGE_TYPE), "wrong message type"}, {ERR_REASON(SSL_R_WRONG_SIGNATURE_LENGTH), "wrong signature length"}, {ERR_REASON(SSL_R_WRONG_SIGNATURE_SIZE), "wrong signature size"}, {ERR_REASON(SSL_R_WRONG_SIGNATURE_TYPE), "wrong signature type"}, {ERR_REASON(SSL_R_WRONG_SSL_VERSION), "wrong ssl version"}, {ERR_REASON(SSL_R_WRONG_VERSION_NUMBER), "wrong version number"}, {ERR_REASON(SSL_R_X509_LIB), "x509 lib"}, {ERR_REASON(SSL_R_X509_VERIFICATION_SETUP_PROBLEMS), "x509 verification setup problems"}, {0, NULL} }; #endif void ERR_load_SSL_strings(void) { #ifndef OPENSSL_NO_ERR if (ERR_func_error_string(SSL_str_functs[0].error) == NULL) { ERR_load_strings(0, SSL_str_functs); ERR_load_strings(0, SSL_str_reasons); } #endif }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_1496_2
crossvul-cpp_data_bad_3459_1
/* * IPv6 tunneling device * Linux INET6 implementation * * Authors: * Ville Nuorvala <vnuorval@tcs.hut.fi> * Yasuyuki Kozakai <kozakai@linux-ipv6.org> * * Based on: * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c * * RFC 2473 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/module.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/sockios.h> #include <linux/icmp.h> #include <linux/if.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/if_tunnel.h> #include <linux/net.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/icmpv6.h> #include <linux/init.h> #include <linux/route.h> #include <linux/rtnetlink.h> #include <linux/netfilter_ipv6.h> #include <asm/uaccess.h> #include <asm/atomic.h> #include <net/icmp.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/ip6_tunnel.h> #include <net/xfrm.h> #include <net/dsfield.h> #include <net/inet_ecn.h> #include <net/net_namespace.h> #include <net/netns/generic.h> MODULE_AUTHOR("Ville Nuorvala"); MODULE_DESCRIPTION("IPv6 tunneling device"); MODULE_LICENSE("GPL"); #define IPV6_TLV_TEL_DST_SIZE 8 #ifdef IP6_TNL_DEBUG #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) #else #define IP6_TNL_TRACE(x...) do {;} while(0) #endif #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) #define IPV6_TCLASS_SHIFT 20 #define HASH_SIZE 32 #define HASH(addr) ((__force u32)((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \ (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \ (HASH_SIZE - 1)) static void ip6_tnl_dev_init(struct net_device *dev); static void ip6_tnl_dev_setup(struct net_device *dev); static int ip6_tnl_net_id __read_mostly; struct ip6_tnl_net { /* the IPv6 tunnel fallback device */ struct net_device *fb_tnl_dev; /* lists for storing tunnels in use */ struct ip6_tnl *tnls_r_l[HASH_SIZE]; struct ip6_tnl *tnls_wc[1]; struct ip6_tnl **tnls[2]; }; /* * Locking : hash tables are protected by RCU and a spinlock */ static DEFINE_SPINLOCK(ip6_tnl_lock); static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) { struct dst_entry *dst = t->dst_cache; if (dst && dst->obsolete && dst->ops->check(dst, t->dst_cookie) == NULL) { t->dst_cache = NULL; dst_release(dst); return NULL; } return dst; } static inline void ip6_tnl_dst_reset(struct ip6_tnl *t) { dst_release(t->dst_cache); t->dst_cache = NULL; } static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst) { struct rt6_info *rt = (struct rt6_info *) dst; t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; dst_release(t->dst_cache); t->dst_cache = dst; } /** * ip6_tnl_lookup - fetch tunnel matching the end-point addresses * @remote: the address of the tunnel exit-point * @local: the address of the tunnel entry-point * * Return: * tunnel matching given end-points if found, * else fallback tunnel if its device is up, * else %NULL **/ #define for_each_ip6_tunnel_rcu(start) \ for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) static struct ip6_tnl * ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local) { unsigned h0 = HASH(remote); unsigned h1 = HASH(local); struct ip6_tnl *t; struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[h0 ^ h1]) { if (ipv6_addr_equal(local, &t->parms.laddr) && ipv6_addr_equal(remote, &t->parms.raddr) && (t->dev->flags & IFF_UP)) return t; } t = rcu_dereference(ip6n->tnls_wc[0]); if (t && (t->dev->flags & IFF_UP)) return t; return NULL; } /** * ip6_tnl_bucket - get head of list matching given tunnel parameters * @p: parameters containing tunnel end-points * * Description: * ip6_tnl_bucket() returns the head of the list matching the * &struct in6_addr entries laddr and raddr in @p. * * Return: head of IPv6 tunnel list **/ static struct ip6_tnl ** ip6_tnl_bucket(struct ip6_tnl_net *ip6n, struct ip6_tnl_parm *p) { struct in6_addr *remote = &p->raddr; struct in6_addr *local = &p->laddr; unsigned h = 0; int prio = 0; if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { prio = 1; h = HASH(remote) ^ HASH(local); } return &ip6n->tnls[prio][h]; } /** * ip6_tnl_link - add tunnel to hash table * @t: tunnel to be added **/ static void ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) { struct ip6_tnl **tp = ip6_tnl_bucket(ip6n, &t->parms); spin_lock_bh(&ip6_tnl_lock); t->next = *tp; rcu_assign_pointer(*tp, t); spin_unlock_bh(&ip6_tnl_lock); } /** * ip6_tnl_unlink - remove tunnel from hash table * @t: tunnel to be removed **/ static void ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) { struct ip6_tnl **tp; for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) { if (t == *tp) { spin_lock_bh(&ip6_tnl_lock); *tp = t->next; spin_unlock_bh(&ip6_tnl_lock); break; } } } /** * ip6_tnl_create() - create a new tunnel * @p: tunnel parameters * @pt: pointer to new tunnel * * Description: * Create tunnel matching given parameters. * * Return: * created tunnel or NULL **/ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p) { struct net_device *dev; struct ip6_tnl *t; char name[IFNAMSIZ]; int err; struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); if (p->name[0]) strlcpy(name, p->name, IFNAMSIZ); else sprintf(name, "ip6tnl%%d"); dev = alloc_netdev(sizeof (*t), name, ip6_tnl_dev_setup); if (dev == NULL) goto failed; dev_net_set(dev, net); if (strchr(name, '%')) { if (dev_alloc_name(dev, name) < 0) goto failed_free; } t = netdev_priv(dev); t->parms = *p; ip6_tnl_dev_init(dev); if ((err = register_netdevice(dev)) < 0) goto failed_free; dev_hold(dev); ip6_tnl_link(ip6n, t); return t; failed_free: free_netdev(dev); failed: return NULL; } /** * ip6_tnl_locate - find or create tunnel matching given parameters * @p: tunnel parameters * @create: != 0 if allowed to create new tunnel if no match found * * Description: * ip6_tnl_locate() first tries to locate an existing tunnel * based on @parms. If this is unsuccessful, but @create is set a new * tunnel device is created and registered for use. * * Return: * matching tunnel or NULL **/ static struct ip6_tnl *ip6_tnl_locate(struct net *net, struct ip6_tnl_parm *p, int create) { struct in6_addr *remote = &p->raddr; struct in6_addr *local = &p->laddr; struct ip6_tnl *t; struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); for (t = *ip6_tnl_bucket(ip6n, p); t; t = t->next) { if (ipv6_addr_equal(local, &t->parms.laddr) && ipv6_addr_equal(remote, &t->parms.raddr)) return t; } if (!create) return NULL; return ip6_tnl_create(net, p); } /** * ip6_tnl_dev_uninit - tunnel device uninitializer * @dev: the device to be destroyed * * Description: * ip6_tnl_dev_uninit() removes tunnel from its list **/ static void ip6_tnl_dev_uninit(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct net *net = dev_net(dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); if (dev == ip6n->fb_tnl_dev) { spin_lock_bh(&ip6_tnl_lock); ip6n->tnls_wc[0] = NULL; spin_unlock_bh(&ip6_tnl_lock); } else { ip6_tnl_unlink(ip6n, t); } ip6_tnl_dst_reset(t); dev_put(dev); } /** * parse_tvl_tnl_enc_lim - handle encapsulation limit option * @skb: received socket buffer * * Return: * 0 if none was found, * else index to encapsulation limit **/ static __u16 parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw) { struct ipv6hdr *ipv6h = (struct ipv6hdr *) raw; __u8 nexthdr = ipv6h->nexthdr; __u16 off = sizeof (*ipv6h); while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { __u16 optlen = 0; struct ipv6_opt_hdr *hdr; if (raw + off + sizeof (*hdr) > skb->data && !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) break; hdr = (struct ipv6_opt_hdr *) (raw + off); if (nexthdr == NEXTHDR_FRAGMENT) { struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; if (frag_hdr->frag_off) break; optlen = 8; } else if (nexthdr == NEXTHDR_AUTH) { optlen = (hdr->hdrlen + 2) << 2; } else { optlen = ipv6_optlen(hdr); } if (nexthdr == NEXTHDR_DEST) { __u16 i = off + 2; while (1) { struct ipv6_tlv_tnl_enc_lim *tel; /* No more room for encapsulation limit */ if (i + sizeof (*tel) > off + optlen) break; tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i]; /* return index of option if found and valid */ if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && tel->length == 1) return i; /* else jump to next option */ if (tel->type) i += tel->length + 2; else i++; } } nexthdr = hdr->nexthdr; off += optlen; } return 0; } /** * ip6_tnl_err - tunnel error handler * * Description: * ip6_tnl_err() should handle errors in the tunnel according * to the specifications in RFC 2473. **/ static int ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, u8 *type, u8 *code, int *msg, __u32 *info, int offset) { struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data; struct ip6_tnl *t; int rel_msg = 0; u8 rel_type = ICMPV6_DEST_UNREACH; u8 rel_code = ICMPV6_ADDR_UNREACH; __u32 rel_info = 0; __u16 len; int err = -ENOENT; /* If the packet doesn't contain the original IPv6 header we are in trouble since we might need the source address for further processing of the error. */ rcu_read_lock(); if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr)) == NULL) goto out; if (t->parms.proto != ipproto && t->parms.proto != 0) goto out; err = 0; switch (*type) { __u32 teli; struct ipv6_tlv_tnl_enc_lim *tel; __u32 mtu; case ICMPV6_DEST_UNREACH: if (net_ratelimit()) printk(KERN_WARNING "%s: Path to destination invalid " "or inactive!\n", t->parms.name); rel_msg = 1; break; case ICMPV6_TIME_EXCEED: if ((*code) == ICMPV6_EXC_HOPLIMIT) { if (net_ratelimit()) printk(KERN_WARNING "%s: Too small hop limit or " "routing loop in tunnel!\n", t->parms.name); rel_msg = 1; } break; case ICMPV6_PARAMPROB: teli = 0; if ((*code) == ICMPV6_HDR_FIELD) teli = parse_tlv_tnl_enc_lim(skb, skb->data); if (teli && teli == *info - 2) { tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; if (tel->encap_limit == 0) { if (net_ratelimit()) printk(KERN_WARNING "%s: Too small encapsulation " "limit or routing loop in " "tunnel!\n", t->parms.name); rel_msg = 1; } } else if (net_ratelimit()) { printk(KERN_WARNING "%s: Recipient unable to parse tunneled " "packet!\n ", t->parms.name); } break; case ICMPV6_PKT_TOOBIG: mtu = *info - offset; if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; t->dev->mtu = mtu; if ((len = sizeof (*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) { rel_type = ICMPV6_PKT_TOOBIG; rel_code = 0; rel_info = mtu; rel_msg = 1; } break; } *type = rel_type; *code = rel_code; *info = rel_info; *msg = rel_msg; out: rcu_read_unlock(); return err; } static int ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { int rel_msg = 0; u8 rel_type = type; u8 rel_code = code; __u32 rel_info = ntohl(info); int err; struct sk_buff *skb2; struct iphdr *eiph; struct flowi fl; struct rtable *rt; err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, &rel_msg, &rel_info, offset); if (err < 0) return err; if (rel_msg == 0) return 0; switch (rel_type) { case ICMPV6_DEST_UNREACH: if (rel_code != ICMPV6_ADDR_UNREACH) return 0; rel_type = ICMP_DEST_UNREACH; rel_code = ICMP_HOST_UNREACH; break; case ICMPV6_PKT_TOOBIG: if (rel_code != 0) return 0; rel_type = ICMP_DEST_UNREACH; rel_code = ICMP_FRAG_NEEDED; break; default: return 0; } if (!pskb_may_pull(skb, offset + sizeof(struct iphdr))) return 0; skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) return 0; skb_dst_drop(skb2); skb_pull(skb2, offset); skb_reset_network_header(skb2); eiph = ip_hdr(skb2); /* Try to guess incoming interface */ memset(&fl, 0, sizeof(fl)); fl.fl4_dst = eiph->saddr; fl.fl4_tos = RT_TOS(eiph->tos); fl.proto = IPPROTO_IPIP; if (ip_route_output_key(dev_net(skb->dev), &rt, &fl)) goto out; skb2->dev = rt->u.dst.dev; /* route "incoming" packet */ if (rt->rt_flags & RTCF_LOCAL) { ip_rt_put(rt); rt = NULL; fl.fl4_dst = eiph->daddr; fl.fl4_src = eiph->saddr; fl.fl4_tos = eiph->tos; if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) || rt->u.dst.dev->type != ARPHRD_TUNNEL) { ip_rt_put(rt); goto out; } skb_dst_set(skb2, (struct dst_entry *)rt); } else { ip_rt_put(rt); if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) || skb_dst(skb2)->dev->type != ARPHRD_TUNNEL) goto out; } /* change mtu on this route */ if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) { if (rel_info > dst_mtu(skb_dst(skb2))) goto out; skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), rel_info); } icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); out: kfree_skb(skb2); return 0; } static int ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { int rel_msg = 0; u8 rel_type = type; u8 rel_code = code; __u32 rel_info = ntohl(info); int err; err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code, &rel_msg, &rel_info, offset); if (err < 0) return err; if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) { struct rt6_info *rt; struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) return 0; skb_dst_drop(skb2); skb_pull(skb2, offset); skb_reset_network_header(skb2); /* Try to guess incoming interface */ rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0); if (rt && rt->rt6i_dev) skb2->dev = rt->rt6i_dev; icmpv6_send(skb2, rel_type, rel_code, rel_info, skb2->dev); if (rt) dst_release(&rt->u.dst); kfree_skb(skb2); } return 0; } static void ip4ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, struct ipv6hdr *ipv6h, struct sk_buff *skb) { __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield); if (INET_ECN_is_ce(dsfield)) IP_ECN_set_ce(ip_hdr(skb)); } static void ip6ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, struct ipv6hdr *ipv6h, struct sk_buff *skb) { if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb)); if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h))) IP6_ECN_set_ce(ipv6_hdr(skb)); } /* called with rcu_read_lock() */ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t) { struct ip6_tnl_parm *p = &t->parms; int ret = 0; struct net *net = dev_net(t->dev); if (p->flags & IP6_TNL_F_CAP_RCV) { struct net_device *ldev = NULL; if (p->link) ldev = dev_get_by_index_rcu(net, p->link); if ((ipv6_addr_is_multicast(&p->laddr) || likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) && likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0))) ret = 1; } return ret; } /** * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally * @skb: received socket buffer * @protocol: ethernet protocol ID * @dscp_ecn_decapsulate: the function to decapsulate DSCP code and ECN * * Return: 0 **/ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, __u8 ipproto, void (*dscp_ecn_decapsulate)(struct ip6_tnl *t, struct ipv6hdr *ipv6h, struct sk_buff *skb)) { struct ip6_tnl *t; struct ipv6hdr *ipv6h = ipv6_hdr(skb); rcu_read_lock(); if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr)) != NULL) { if (t->parms.proto != ipproto && t->parms.proto != 0) { rcu_read_unlock(); goto discard; } if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { rcu_read_unlock(); goto discard; } if (!ip6_tnl_rcv_ctl(t)) { t->dev->stats.rx_dropped++; rcu_read_unlock(); goto discard; } secpath_reset(skb); skb->mac_header = skb->network_header; skb_reset_network_header(skb); skb->protocol = htons(protocol); skb->pkt_type = PACKET_HOST; memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); skb->dev = t->dev; skb_dst_drop(skb); nf_reset(skb); dscp_ecn_decapsulate(t, ipv6h, skb); t->dev->stats.rx_packets++; t->dev->stats.rx_bytes += skb->len; netif_rx(skb); rcu_read_unlock(); return 0; } rcu_read_unlock(); return 1; discard: kfree_skb(skb); return 0; } static int ip4ip6_rcv(struct sk_buff *skb) { return ip6_tnl_rcv(skb, ETH_P_IP, IPPROTO_IPIP, ip4ip6_dscp_ecn_decapsulate); } static int ip6ip6_rcv(struct sk_buff *skb) { return ip6_tnl_rcv(skb, ETH_P_IPV6, IPPROTO_IPV6, ip6ip6_dscp_ecn_decapsulate); } struct ipv6_tel_txoption { struct ipv6_txoptions ops; __u8 dst_opt[8]; }; static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) { memset(opt, 0, sizeof(struct ipv6_tel_txoption)); opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT; opt->dst_opt[3] = 1; opt->dst_opt[4] = encap_limit; opt->dst_opt[5] = IPV6_TLV_PADN; opt->dst_opt[6] = 1; opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt; opt->ops.opt_nflen = 8; } /** * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own * @t: the outgoing tunnel device * @hdr: IPv6 header from the incoming packet * * Description: * Avoid trivial tunneling loop by checking that tunnel exit-point * doesn't match source of incoming packet. * * Return: * 1 if conflict, * 0 else **/ static inline int ip6_tnl_addr_conflict(struct ip6_tnl *t, struct ipv6hdr *hdr) { return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); } static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t) { struct ip6_tnl_parm *p = &t->parms; int ret = 0; struct net *net = dev_net(t->dev); if (p->flags & IP6_TNL_F_CAP_XMIT) { struct net_device *ldev = NULL; rcu_read_lock(); if (p->link) ldev = dev_get_by_index_rcu(net, p->link); if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0))) printk(KERN_WARNING "%s xmit: Local address not yet configured!\n", p->name); else if (!ipv6_addr_is_multicast(&p->raddr) && unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0))) printk(KERN_WARNING "%s xmit: Routing loop! " "Remote address found on this node!\n", p->name); else ret = 1; rcu_read_unlock(); } return ret; } /** * ip6_tnl_xmit2 - encapsulate packet and send * @skb: the outgoing socket buffer * @dev: the outgoing tunnel device * @dsfield: dscp code for outer header * @fl: flow of tunneled packet * @encap_limit: encapsulation limit * @pmtu: Path MTU is stored if packet is too big * * Description: * Build new header and do some sanity checks on the packet before sending * it. * * Return: * 0 on success * -1 fail * %-EMSGSIZE message too big. return mtu in this case. **/ static int ip6_tnl_xmit2(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, struct flowi *fl, int encap_limit, __u32 *pmtu) { struct net *net = dev_net(dev); struct ip6_tnl *t = netdev_priv(dev); struct net_device_stats *stats = &t->dev->stats; struct ipv6hdr *ipv6h = ipv6_hdr(skb); struct ipv6_tel_txoption opt; struct dst_entry *dst; struct net_device *tdev; int mtu; unsigned int max_headroom = sizeof(struct ipv6hdr); u8 proto; int err = -1; int pkt_len; if ((dst = ip6_tnl_dst_check(t)) != NULL) dst_hold(dst); else { dst = ip6_route_output(net, NULL, fl); if (dst->error || xfrm_lookup(net, &dst, fl, NULL, 0) < 0) goto tx_err_link_failure; } tdev = dst->dev; if (tdev == dev) { stats->collisions++; if (net_ratelimit()) printk(KERN_WARNING "%s: Local routing loop detected!\n", t->parms.name); goto tx_err_dst_release; } mtu = dst_mtu(dst) - sizeof (*ipv6h); if (encap_limit >= 0) { max_headroom += 8; mtu -= 8; } if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; if (skb_dst(skb)) skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); if (skb->len > mtu) { *pmtu = mtu; err = -EMSGSIZE; goto tx_err_dst_release; } /* * Okay, now see if we can stuff it in the buffer as-is. */ max_headroom += LL_RESERVED_SPACE(tdev); if (skb_headroom(skb) < max_headroom || skb_shared(skb) || (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { struct sk_buff *new_skb; if (!(new_skb = skb_realloc_headroom(skb, max_headroom))) goto tx_err_dst_release; if (skb->sk) skb_set_owner_w(new_skb, skb->sk); kfree_skb(skb); skb = new_skb; } skb_dst_drop(skb); skb_dst_set(skb, dst_clone(dst)); skb->transport_header = skb->network_header; proto = fl->proto; if (encap_limit >= 0) { init_tel_txopt(&opt, encap_limit); ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); } skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); ipv6h = ipv6_hdr(skb); *(__be32*)ipv6h = fl->fl6_flowlabel | htonl(0x60000000); dsfield = INET_ECN_encapsulate(0, dsfield); ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield); ipv6h->hop_limit = t->parms.hop_limit; ipv6h->nexthdr = proto; ipv6_addr_copy(&ipv6h->saddr, &fl->fl6_src); ipv6_addr_copy(&ipv6h->daddr, &fl->fl6_dst); nf_reset(skb); pkt_len = skb->len; err = ip6_local_out(skb); if (net_xmit_eval(err) == 0) { stats->tx_bytes += pkt_len; stats->tx_packets++; } else { stats->tx_errors++; stats->tx_aborted_errors++; } ip6_tnl_dst_store(t, dst); return 0; tx_err_link_failure: stats->tx_carrier_errors++; dst_link_failure(skb); tx_err_dst_release: dst_release(dst); return err; } static inline int ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct iphdr *iph = ip_hdr(skb); int encap_limit = -1; struct flowi fl; __u8 dsfield; __u32 mtu; int err; if ((t->parms.proto != IPPROTO_IPIP && t->parms.proto != 0) || !ip6_tnl_xmit_ctl(t)) return -1; if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) encap_limit = t->parms.encap_limit; memcpy(&fl, &t->fl, sizeof (fl)); fl.proto = IPPROTO_IPIP; dsfield = ipv4_get_dsfield(iph); if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)) fl.fl6_flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) & IPV6_TCLASS_MASK; err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu); if (err != 0) { /* XXX: send ICMP error even if DF is not set. */ if (err == -EMSGSIZE) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); return -1; } return 0; } static inline int ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct ipv6hdr *ipv6h = ipv6_hdr(skb); int encap_limit = -1; __u16 offset; struct flowi fl; __u8 dsfield; __u32 mtu; int err; if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) || !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h)) return -1; offset = parse_tlv_tnl_enc_lim(skb, skb_network_header(skb)); if (offset > 0) { struct ipv6_tlv_tnl_enc_lim *tel; tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; if (tel->encap_limit == 0) { icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_HDR_FIELD, offset + 2, skb->dev); return -1; } encap_limit = tel->encap_limit - 1; } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) encap_limit = t->parms.encap_limit; memcpy(&fl, &t->fl, sizeof (fl)); fl.proto = IPPROTO_IPV6; dsfield = ipv6_get_dsfield(ipv6h); if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)) fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)) fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK); err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu); if (err != 0) { if (err == -EMSGSIZE) icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); return -1; } return 0; } static netdev_tx_t ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct net_device_stats *stats = &t->dev->stats; int ret; switch (skb->protocol) { case htons(ETH_P_IP): ret = ip4ip6_tnl_xmit(skb, dev); break; case htons(ETH_P_IPV6): ret = ip6ip6_tnl_xmit(skb, dev); break; default: goto tx_err; } if (ret < 0) goto tx_err; return NETDEV_TX_OK; tx_err: stats->tx_errors++; stats->tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; } static void ip6_tnl_set_cap(struct ip6_tnl *t) { struct ip6_tnl_parm *p = &t->parms; int ltype = ipv6_addr_type(&p->laddr); int rtype = ipv6_addr_type(&p->raddr); p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV); if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && !((ltype|rtype) & IPV6_ADDR_LOOPBACK) && (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) { if (ltype&IPV6_ADDR_UNICAST) p->flags |= IP6_TNL_F_CAP_XMIT; if (rtype&IPV6_ADDR_UNICAST) p->flags |= IP6_TNL_F_CAP_RCV; } } static void ip6_tnl_link_config(struct ip6_tnl *t) { struct net_device *dev = t->dev; struct ip6_tnl_parm *p = &t->parms; struct flowi *fl = &t->fl; memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); /* Set up flowi template */ ipv6_addr_copy(&fl->fl6_src, &p->laddr); ipv6_addr_copy(&fl->fl6_dst, &p->raddr); fl->oif = p->link; fl->fl6_flowlabel = 0; if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) fl->fl6_flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) fl->fl6_flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; ip6_tnl_set_cap(t); if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV) dev->flags |= IFF_POINTOPOINT; else dev->flags &= ~IFF_POINTOPOINT; dev->iflink = p->link; if (p->flags & IP6_TNL_F_CAP_XMIT) { int strict = (ipv6_addr_type(&p->raddr) & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); struct rt6_info *rt = rt6_lookup(dev_net(dev), &p->raddr, &p->laddr, p->link, strict); if (rt == NULL) return; if (rt->rt6i_dev) { dev->hard_header_len = rt->rt6i_dev->hard_header_len + sizeof (struct ipv6hdr); dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr); if (dev->mtu < IPV6_MIN_MTU) dev->mtu = IPV6_MIN_MTU; } dst_release(&rt->u.dst); } } /** * ip6_tnl_change - update the tunnel parameters * @t: tunnel to be changed * @p: tunnel configuration parameters * * Description: * ip6_tnl_change() updates the tunnel parameters **/ static int ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p) { ipv6_addr_copy(&t->parms.laddr, &p->laddr); ipv6_addr_copy(&t->parms.raddr, &p->raddr); t->parms.flags = p->flags; t->parms.hop_limit = p->hop_limit; t->parms.encap_limit = p->encap_limit; t->parms.flowinfo = p->flowinfo; t->parms.link = p->link; t->parms.proto = p->proto; ip6_tnl_dst_reset(t); ip6_tnl_link_config(t); return 0; } /** * ip6_tnl_ioctl - configure ipv6 tunnels from userspace * @dev: virtual device associated with tunnel * @ifr: parameters passed from userspace * @cmd: command to be performed * * Description: * ip6_tnl_ioctl() is used for managing IPv6 tunnels * from userspace. * * The possible commands are the following: * %SIOCGETTUNNEL: get tunnel parameters for device * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters * %SIOCCHGTUNNEL: change tunnel parameters to those given * %SIOCDELTUNNEL: delete tunnel * * The fallback device "ip6tnl0", created during module * initialization, can be used for creating other tunnel devices. * * Return: * 0 on success, * %-EFAULT if unable to copy data to or from userspace, * %-EPERM if current process hasn't %CAP_NET_ADMIN set * %-EINVAL if passed tunnel parameters are invalid, * %-EEXIST if changing a tunnel's parameters would cause a conflict * %-ENODEV if attempting to change or delete a nonexisting device **/ static int ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { int err = 0; struct ip6_tnl_parm p; struct ip6_tnl *t = NULL; struct net *net = dev_net(dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); switch (cmd) { case SIOCGETTUNNEL: if (dev == ip6n->fb_tnl_dev) { if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) { err = -EFAULT; break; } t = ip6_tnl_locate(net, &p, 0); } if (t == NULL) t = netdev_priv(dev); memcpy(&p, &t->parms, sizeof (p)); if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { err = -EFAULT; } break; case SIOCADDTUNNEL: case SIOCCHGTUNNEL: err = -EPERM; if (!capable(CAP_NET_ADMIN)) break; err = -EFAULT; if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) break; err = -EINVAL; if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP && p.proto != 0) break; t = ip6_tnl_locate(net, &p, cmd == SIOCADDTUNNEL); if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) { if (t != NULL) { if (t->dev != dev) { err = -EEXIST; break; } } else t = netdev_priv(dev); ip6_tnl_unlink(ip6n, t); err = ip6_tnl_change(t, &p); ip6_tnl_link(ip6n, t); netdev_state_change(dev); } if (t) { err = 0; if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof (p))) err = -EFAULT; } else err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); break; case SIOCDELTUNNEL: err = -EPERM; if (!capable(CAP_NET_ADMIN)) break; if (dev == ip6n->fb_tnl_dev) { err = -EFAULT; if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) break; err = -ENOENT; if ((t = ip6_tnl_locate(net, &p, 0)) == NULL) break; err = -EPERM; if (t->dev == ip6n->fb_tnl_dev) break; dev = t->dev; } err = 0; unregister_netdevice(dev); break; default: err = -EINVAL; } return err; } /** * ip6_tnl_change_mtu - change mtu manually for tunnel device * @dev: virtual device associated with tunnel * @new_mtu: the new mtu * * Return: * 0 on success, * %-EINVAL if mtu too small **/ static int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu < IPV6_MIN_MTU) { return -EINVAL; } dev->mtu = new_mtu; return 0; } static const struct net_device_ops ip6_tnl_netdev_ops = { .ndo_uninit = ip6_tnl_dev_uninit, .ndo_start_xmit = ip6_tnl_xmit, .ndo_do_ioctl = ip6_tnl_ioctl, .ndo_change_mtu = ip6_tnl_change_mtu, }; /** * ip6_tnl_dev_setup - setup virtual tunnel device * @dev: virtual device associated with tunnel * * Description: * Initialize function pointers and device parameters **/ static void ip6_tnl_dev_setup(struct net_device *dev) { dev->netdev_ops = &ip6_tnl_netdev_ops; dev->destructor = free_netdev; dev->type = ARPHRD_TUNNEL6; dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr); dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr); dev->flags |= IFF_NOARP; dev->addr_len = sizeof(struct in6_addr); dev->features |= NETIF_F_NETNS_LOCAL; } /** * ip6_tnl_dev_init_gen - general initializer for all tunnel devices * @dev: virtual device associated with tunnel **/ static inline void ip6_tnl_dev_init_gen(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); t->dev = dev; strcpy(t->parms.name, dev->name); } /** * ip6_tnl_dev_init - initializer for all non fallback tunnel devices * @dev: virtual device associated with tunnel **/ static void ip6_tnl_dev_init(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); ip6_tnl_dev_init_gen(dev); ip6_tnl_link_config(t); } /** * ip6_fb_tnl_dev_init - initializer for fallback tunnel device * @dev: fallback device * * Return: 0 **/ static void __net_init ip6_fb_tnl_dev_init(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct net *net = dev_net(dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); ip6_tnl_dev_init_gen(dev); t->parms.proto = IPPROTO_IPV6; dev_hold(dev); ip6n->tnls_wc[0] = t; } static struct xfrm6_tunnel ip4ip6_handler = { .handler = ip4ip6_rcv, .err_handler = ip4ip6_err, .priority = 1, }; static struct xfrm6_tunnel ip6ip6_handler = { .handler = ip6ip6_rcv, .err_handler = ip6ip6_err, .priority = 1, }; static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n) { int h; struct ip6_tnl *t; LIST_HEAD(list); for (h = 0; h < HASH_SIZE; h++) { t = ip6n->tnls_r_l[h]; while (t != NULL) { unregister_netdevice_queue(t->dev, &list); t = t->next; } } t = ip6n->tnls_wc[0]; unregister_netdevice_queue(t->dev, &list); unregister_netdevice_many(&list); } static int __net_init ip6_tnl_init_net(struct net *net) { struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); int err; ip6n->tnls[0] = ip6n->tnls_wc; ip6n->tnls[1] = ip6n->tnls_r_l; err = -ENOMEM; ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0", ip6_tnl_dev_setup); if (!ip6n->fb_tnl_dev) goto err_alloc_dev; dev_net_set(ip6n->fb_tnl_dev, net); ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev); err = register_netdev(ip6n->fb_tnl_dev); if (err < 0) goto err_register; return 0; err_register: free_netdev(ip6n->fb_tnl_dev); err_alloc_dev: return err; } static void __net_exit ip6_tnl_exit_net(struct net *net) { struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); rtnl_lock(); ip6_tnl_destroy_tunnels(ip6n); rtnl_unlock(); } static struct pernet_operations ip6_tnl_net_ops = { .init = ip6_tnl_init_net, .exit = ip6_tnl_exit_net, .id = &ip6_tnl_net_id, .size = sizeof(struct ip6_tnl_net), }; /** * ip6_tunnel_init - register protocol and reserve needed resources * * Return: 0 on success **/ static int __init ip6_tunnel_init(void) { int err; if (xfrm6_tunnel_register(&ip4ip6_handler, AF_INET)) { printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n"); err = -EAGAIN; goto out; } if (xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6)) { printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n"); err = -EAGAIN; goto unreg_ip4ip6; } err = register_pernet_device(&ip6_tnl_net_ops); if (err < 0) goto err_pernet; return 0; err_pernet: xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6); unreg_ip4ip6: xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); out: return err; } /** * ip6_tunnel_cleanup - free resources and unregister protocol **/ static void __exit ip6_tunnel_cleanup(void) { if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) printk(KERN_INFO "ip6_tunnel close: can't deregister ip4ip6\n"); if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n"); unregister_pernet_device(&ip6_tnl_net_ops); } module_init(ip6_tunnel_init); module_exit(ip6_tunnel_cleanup);
./CrossVul/dataset_final_sorted/CWE-362/c/bad_3459_1
crossvul-cpp_data_good_677_2
/* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs * * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000 */ /* * Handle hardware traps and faults. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/context_tracking.h> #include <linux/interrupt.h> #include <linux/kallsyms.h> #include <linux/spinlock.h> #include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/kdebug.h> #include <linux/kgdb.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/ptrace.h> #include <linux/uprobes.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/kexec.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/bug.h> #include <linux/nmi.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/io.h> #if defined(CONFIG_EDAC) #include <linux/edac.h> #endif #include <asm/stacktrace.h> #include <asm/processor.h> #include <asm/debugreg.h> #include <linux/atomic.h> #include <asm/text-patching.h> #include <asm/ftrace.h> #include <asm/traps.h> #include <asm/desc.h> #include <asm/fpu/internal.h> #include <asm/cpu_entry_area.h> #include <asm/mce.h> #include <asm/fixmap.h> #include <asm/mach_traps.h> #include <asm/alternative.h> #include <asm/fpu/xstate.h> #include <asm/trace/mpx.h> #include <asm/mpx.h> #include <asm/vm86.h> #include <asm/umip.h> #ifdef CONFIG_X86_64 #include <asm/x86_init.h> #include <asm/pgalloc.h> #include <asm/proto.h> #else #include <asm/processor-flags.h> #include <asm/setup.h> #include <asm/proto.h> #endif DECLARE_BITMAP(system_vectors, NR_VECTORS); static inline void cond_local_irq_enable(struct pt_regs *regs) { if (regs->flags & X86_EFLAGS_IF) local_irq_enable(); } static inline void cond_local_irq_disable(struct pt_regs *regs) { if (regs->flags & X86_EFLAGS_IF) local_irq_disable(); } /* * In IST context, we explicitly disable preemption. This serves two * purposes: it makes it much less likely that we would accidentally * schedule in IST context and it will force a warning if we somehow * manage to schedule by accident. */ void ist_enter(struct pt_regs *regs) { if (user_mode(regs)) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); } else { /* * We might have interrupted pretty much anything. In * fact, if we're a machine check, we can even interrupt * NMI processing. We don't want in_nmi() to return true, * but we need to notify RCU. */ rcu_nmi_enter(); } preempt_disable(); /* This code is a bit fragile. Test it. */ RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); } void ist_exit(struct pt_regs *regs) { preempt_enable_no_resched(); if (!user_mode(regs)) rcu_nmi_exit(); } /** * ist_begin_non_atomic() - begin a non-atomic section in an IST exception * @regs: regs passed to the IST exception handler * * IST exception handlers normally cannot schedule. As a special * exception, if the exception interrupted userspace code (i.e. * user_mode(regs) would return true) and the exception was not * a double fault, it can be safe to schedule. ist_begin_non_atomic() * begins a non-atomic section within an ist_enter()/ist_exit() region. * Callers are responsible for enabling interrupts themselves inside * the non-atomic section, and callers must call ist_end_non_atomic() * before ist_exit(). */ void ist_begin_non_atomic(struct pt_regs *regs) { BUG_ON(!user_mode(regs)); /* * Sanity check: we need to be on the normal thread stack. This * will catch asm bugs and any attempt to use ist_preempt_enable * from double_fault. */ BUG_ON(!on_thread_stack()); preempt_enable_no_resched(); } /** * ist_end_non_atomic() - begin a non-atomic section in an IST exception * * Ends a non-atomic section started with ist_begin_non_atomic(). */ void ist_end_non_atomic(void) { preempt_disable(); } int is_valid_bugaddr(unsigned long addr) { unsigned short ud; if (addr < TASK_SIZE_MAX) return 0; if (probe_kernel_address((unsigned short *)addr, ud)) return 0; return ud == INSN_UD0 || ud == INSN_UD2; } int fixup_bug(struct pt_regs *regs, int trapnr) { if (trapnr != X86_TRAP_UD) return 0; switch (report_bug(regs->ip, regs)) { case BUG_TRAP_TYPE_NONE: case BUG_TRAP_TYPE_BUG: break; case BUG_TRAP_TYPE_WARN: regs->ip += LEN_UD2; return 1; } return 0; } static nokprobe_inline int do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, struct pt_regs *regs, long error_code) { if (v8086_mode(regs)) { /* * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86. * On nmi (interrupt 2), do_trap should not be called. */ if (trapnr < X86_TRAP_UD) { if (!handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr)) return 0; } return -1; } if (!user_mode(regs)) { if (fixup_exception(regs, trapnr)) return 0; tsk->thread.error_code = error_code; tsk->thread.trap_nr = trapnr; die(str, regs, error_code); } return -1; } static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr, siginfo_t *info) { unsigned long siaddr; int sicode; switch (trapnr) { default: return SEND_SIG_PRIV; case X86_TRAP_DE: sicode = FPE_INTDIV; siaddr = uprobe_get_trap_addr(regs); break; case X86_TRAP_UD: sicode = ILL_ILLOPN; siaddr = uprobe_get_trap_addr(regs); break; case X86_TRAP_AC: sicode = BUS_ADRALN; siaddr = 0; break; } info->si_signo = signr; info->si_errno = 0; info->si_code = sicode; info->si_addr = (void __user *)siaddr; return info; } static void do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, long error_code, siginfo_t *info) { struct task_struct *tsk = current; if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code)) return; /* * We want error_code and trap_nr set for userspace faults and * kernelspace faults which result in die(), but not * kernelspace faults which are fixed up. die() gives the * process no chance to handle the signal and notice the * kernel fault information, so that won't result in polluting * the information about previously queued, but not yet * delivered, faults. See also do_general_protection below. */ tsk->thread.error_code = error_code; tsk->thread.trap_nr = trapnr; if (show_unhandled_signals && unhandled_signal(tsk, signr) && printk_ratelimit()) { pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", tsk->comm, tsk->pid, str, regs->ip, regs->sp, error_code); print_vma_addr(KERN_CONT " in ", regs->ip); pr_cont("\n"); } force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk); } NOKPROBE_SYMBOL(do_trap); static void do_error_trap(struct pt_regs *regs, long error_code, char *str, unsigned long trapnr, int signr) { siginfo_t info; RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); /* * WARN*()s end up here; fix them up before we call the * notifier chain. */ if (!user_mode(regs) && fixup_bug(regs, trapnr)) return; if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != NOTIFY_STOP) { cond_local_irq_enable(regs); do_trap(trapnr, signr, str, regs, error_code, fill_trap_info(regs, signr, trapnr, &info)); } } #define DO_ERROR(trapnr, signr, str, name) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ { \ do_error_trap(regs, error_code, str, trapnr, signr); \ } DO_ERROR(X86_TRAP_DE, SIGFPE, "divide error", divide_error) DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun) DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) #ifdef CONFIG_VMAP_STACK __visible void __noreturn handle_stack_overflow(const char *message, struct pt_regs *regs, unsigned long fault_address) { printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n", (void *)fault_address, current->stack, (char *)current->stack + THREAD_SIZE - 1); die(message, regs, 0); /* Be absolutely certain we don't return. */ panic(message); } #endif #ifdef CONFIG_X86_64 /* Runs on IST stack */ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) { static const char str[] = "double fault"; struct task_struct *tsk = current; #ifdef CONFIG_VMAP_STACK unsigned long cr2; #endif #ifdef CONFIG_X86_ESPFIX64 extern unsigned char native_irq_return_iret[]; /* * If IRET takes a non-IST fault on the espfix64 stack, then we * end up promoting it to a doublefault. In that case, take * advantage of the fact that we're not using the normal (TSS.sp0) * stack right now. We can write a fake #GP(0) frame at TSS.sp0 * and then modify our own IRET frame so that, when we return, * we land directly at the #GP(0) vector with the stack already * set up according to its expectations. * * The net result is that our #GP handler will think that we * entered from usermode with the bad user context. * * No need for ist_enter here because we don't use RCU. */ if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY && regs->cs == __KERNEL_CS && regs->ip == (unsigned long)native_irq_return_iret) { struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; /* * regs->sp points to the failing IRET frame on the * ESPFIX64 stack. Copy it to the entry stack. This fills * in gpregs->ss through gpregs->ip. * */ memmove(&gpregs->ip, (void *)regs->sp, 5*8); gpregs->orig_ax = 0; /* Missing (lost) #GP error code */ /* * Adjust our frame so that we return straight to the #GP * vector with the expected RSP value. This is safe because * we won't enable interupts or schedule before we invoke * general_protection, so nothing will clobber the stack * frame we just set up. */ regs->ip = (unsigned long)general_protection; regs->sp = (unsigned long)&gpregs->orig_ax; return; } #endif ist_enter(regs); notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_DF; #ifdef CONFIG_VMAP_STACK /* * If we overflow the stack into a guard page, the CPU will fail * to deliver #PF and will send #DF instead. Similarly, if we * take any non-IST exception while too close to the bottom of * the stack, the processor will get a page fault while * delivering the exception and will generate a double fault. * * According to the SDM (footnote in 6.15 under "Interrupt 14 - * Page-Fault Exception (#PF): * * Processors update CR2 whenever a page fault is detected. If a * second page fault occurs while an earlier page fault is being * delivered, the faulting linear address of the second fault will * overwrite the contents of CR2 (replacing the previous * address). These updates to CR2 occur even if the page fault * results in a double fault or occurs during the delivery of a * double fault. * * The logic below has a small possibility of incorrectly diagnosing * some errors as stack overflows. For example, if the IDT or GDT * gets corrupted such that #GP delivery fails due to a bad descriptor * causing #GP and we hit this condition while CR2 coincidentally * points to the stack guard page, we'll think we overflowed the * stack. Given that we're going to panic one way or another * if this happens, this isn't necessarily worth fixing. * * If necessary, we could improve the test by only diagnosing * a stack overflow if the saved RSP points within 47 bytes of * the bottom of the stack: if RSP == tsk_stack + 48 and we * take an exception, the stack is already aligned and there * will be enough room SS, RSP, RFLAGS, CS, RIP, and a * possible error code, so a stack overflow would *not* double * fault. With any less space left, exception delivery could * fail, and, as a practical matter, we've overflowed the * stack even if the actual trigger for the double fault was * something else. */ cr2 = read_cr2(); if ((unsigned long)task_stack_page(tsk) - 1 - cr2 < PAGE_SIZE) handle_stack_overflow("kernel stack overflow (double-fault)", regs, cr2); #endif #ifdef CONFIG_DOUBLEFAULT df_debug(regs, error_code); #endif /* * This is always a kernel trap and never fixable (and thus must * never return). */ for (;;) die(str, regs, error_code); } #endif dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) { const struct mpx_bndcsr *bndcsr; siginfo_t *info; RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); if (notify_die(DIE_TRAP, "bounds", regs, error_code, X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP) return; cond_local_irq_enable(regs); if (!user_mode(regs)) die("bounds", regs, error_code); if (!cpu_feature_enabled(X86_FEATURE_MPX)) { /* The exception is not from Intel MPX */ goto exit_trap; } /* * We need to look at BNDSTATUS to resolve this exception. * A NULL here might mean that it is in its 'init state', * which is all zeros which indicates MPX was not * responsible for the exception. */ bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR); if (!bndcsr) goto exit_trap; trace_bounds_exception_mpx(bndcsr); /* * The error code field of the BNDSTATUS register communicates status * information of a bound range exception #BR or operation involving * bound directory. */ switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) { case 2: /* Bound directory has invalid entry. */ if (mpx_handle_bd_fault()) goto exit_trap; break; /* Success, it was handled */ case 1: /* Bound violation. */ info = mpx_generate_siginfo(regs); if (IS_ERR(info)) { /* * We failed to decode the MPX instruction. Act as if * the exception was not caused by MPX. */ goto exit_trap; } /* * Success, we decoded the instruction and retrieved * an 'info' containing the address being accessed * which caused the exception. This information * allows and application to possibly handle the * #BR exception itself. */ do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info); kfree(info); break; case 0: /* No exception caused by Intel MPX operations. */ goto exit_trap; default: die("bounds", regs, error_code); } return; exit_trap: /* * This path out is for all the cases where we could not * handle the exception in some way (like allocating a * table or telling userspace about it. We will also end * up here if the kernel has MPX turned off at compile * time.. */ do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL); } dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code) { struct task_struct *tsk; RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); cond_local_irq_enable(regs); if (static_cpu_has(X86_FEATURE_UMIP)) { if (user_mode(regs) && fixup_umip_exception(regs)) return; } if (v8086_mode(regs)) { local_irq_enable(); handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); return; } tsk = current; if (!user_mode(regs)) { if (fixup_exception(regs, X86_TRAP_GP)) return; tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_GP; if (notify_die(DIE_GPF, "general protection fault", regs, error_code, X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) die("general protection fault", regs, error_code); return; } tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_GP; if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && printk_ratelimit()) { pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", tsk->comm, task_pid_nr(tsk), regs->ip, regs->sp, error_code); print_vma_addr(KERN_CONT " in ", regs->ip); pr_cont("\n"); } force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); } NOKPROBE_SYMBOL(do_general_protection); dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) { #ifdef CONFIG_DYNAMIC_FTRACE /* * ftrace must be first, everything else may cause a recursive crash. * See note by declaration of modifying_ftrace_code in ftrace.c */ if (unlikely(atomic_read(&modifying_ftrace_code)) && ftrace_int3_handler(regs)) return; #endif if (poke_int3_handler(regs)) return; /* * Use ist_enter despite the fact that we don't use an IST stack. * We can be called from a kprobe in non-CONTEXT_KERNEL kernel * mode or even during context tracking state changes. * * This means that we can't schedule. That's okay. */ ist_enter(regs); RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) goto exit; #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ #ifdef CONFIG_KPROBES if (kprobe_int3_handler(regs)) goto exit; #endif if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) goto exit; cond_local_irq_enable(regs); do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); cond_local_irq_disable(regs); exit: ist_exit(regs); } NOKPROBE_SYMBOL(do_int3); #ifdef CONFIG_X86_64 /* * Help handler running on a per-cpu (IST or entry trampoline) stack * to switch to the normal thread stack if the interrupted code was in * user mode. The actual stack switch is done in entry_64.S */ asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs) { struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1; if (regs != eregs) *regs = *eregs; return regs; } NOKPROBE_SYMBOL(sync_regs); struct bad_iret_stack { void *error_entry_ret; struct pt_regs regs; }; asmlinkage __visible notrace struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) { /* * This is called from entry_64.S early in handling a fault * caused by a bad iret to user mode. To handle the fault * correctly, we want to move our stack frame to where it would * be had we entered directly on the entry stack (rather than * just below the IRET frame) and we want to pretend that the * exception came from the IRET target. */ struct bad_iret_stack *new_stack = (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; /* Copy the IRET target to the new stack. */ memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); /* Copy the remainder of the stack from the current stack. */ memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip)); BUG_ON(!user_mode(&new_stack->regs)); return new_stack; } NOKPROBE_SYMBOL(fixup_bad_iret); #endif static bool is_sysenter_singlestep(struct pt_regs *regs) { /* * We don't try for precision here. If we're anywhere in the region of * code that can be single-stepped in the SYSENTER entry path, then * assume that this is a useless single-step trap due to SYSENTER * being invoked with TF set. (We don't know in advance exactly * which instructions will be hit because BTF could plausibly * be set.) */ #ifdef CONFIG_X86_32 return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) < (unsigned long)__end_SYSENTER_singlestep_region - (unsigned long)__begin_SYSENTER_singlestep_region; #elif defined(CONFIG_IA32_EMULATION) return (regs->ip - (unsigned long)entry_SYSENTER_compat) < (unsigned long)__end_entry_SYSENTER_compat - (unsigned long)entry_SYSENTER_compat; #else return false; #endif } /* * Our handling of the processor debug registers is non-trivial. * We do not clear them on entry and exit from the kernel. Therefore * it is possible to get a watchpoint trap here from inside the kernel. * However, the code in ./ptrace.c has ensured that the user can * only set watchpoints on userspace addresses. Therefore the in-kernel * watchpoint trap can only occur in code which is reading/writing * from user space. Such code must not hold kernel locks (since it * can equally take a page fault), therefore it is safe to call * force_sig_info even though that claims and releases locks. * * Code in ./signal.c ensures that the debug control register * is restored before we deliver any signal, and therefore that * user code runs with the correct debug control register even though * we clear it here. * * Being careful here means that we don't have to be as careful in a * lot of more complicated places (task switching can be a bit lazy * about restoring all the debug state, and ptrace doesn't have to * find every occurrence of the TF bit that could be saved away even * by user code) * * May run on IST stack. */ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) { struct task_struct *tsk = current; int user_icebp = 0; unsigned long dr6; int si_code; ist_enter(regs); get_debugreg(dr6, 6); /* * The Intel SDM says: * * Certain debug exceptions may clear bits 0-3. The remaining * contents of the DR6 register are never cleared by the * processor. To avoid confusion in identifying debug * exceptions, debug handlers should clear the register before * returning to the interrupted task. * * Keep it simple: clear DR6 immediately. */ set_debugreg(0, 6); /* Filter out all the reserved bits which are preset to 1 */ dr6 &= ~DR6_RESERVED; /* * The SDM says "The processor clears the BTF flag when it * generates a debug exception." Clear TIF_BLOCKSTEP to keep * TIF_BLOCKSTEP in sync with the hardware BTF flag. */ clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); if (unlikely(!user_mode(regs) && (dr6 & DR_STEP) && is_sysenter_singlestep(regs))) { dr6 &= ~DR_STEP; if (!dr6) goto exit; /* * else we might have gotten a single-step trap and hit a * watchpoint at the same time, in which case we should fall * through and handle the watchpoint. */ } /* * If dr6 has no reason to give us about the origin of this trap, * then it's very likely the result of an icebp/int01 trap. * User wants a sigtrap for that. */ if (!dr6 && user_mode(regs)) user_icebp = 1; /* Store the virtualized DR6 value */ tsk->thread.debugreg6 = dr6; #ifdef CONFIG_KPROBES if (kprobe_debug_handler(regs)) goto exit; #endif if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code, SIGTRAP) == NOTIFY_STOP) goto exit; /* * Let others (NMI) know that the debug stack is in use * as we may switch to the interrupt stack. */ debug_stack_usage_inc(); /* It's safe to allow irq's after DR6 has been saved */ cond_local_irq_enable(regs); if (v8086_mode(regs)) { handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, X86_TRAP_DB); cond_local_irq_disable(regs); debug_stack_usage_dec(); goto exit; } if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) { /* * Historical junk that used to handle SYSENTER single-stepping. * This should be unreachable now. If we survive for a while * without anyone hitting this warning, we'll turn this into * an oops. */ tsk->thread.debugreg6 &= ~DR_STEP; set_tsk_thread_flag(tsk, TIF_SINGLESTEP); regs->flags &= ~X86_EFLAGS_TF; } si_code = get_si_code(tsk->thread.debugreg6); if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) send_sigtrap(tsk, regs, error_code, si_code); cond_local_irq_disable(regs); debug_stack_usage_dec(); exit: ist_exit(regs); } NOKPROBE_SYMBOL(do_debug); /* * Note that we play around with the 'TS' bit in an attempt to get * the correct behaviour even in the presence of the asynchronous * IRQ13 behaviour */ static void math_error(struct pt_regs *regs, int error_code, int trapnr) { struct task_struct *task = current; struct fpu *fpu = &task->thread.fpu; siginfo_t info; char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : "simd exception"; if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) return; cond_local_irq_enable(regs); if (!user_mode(regs)) { if (!fixup_exception(regs, trapnr)) { task->thread.error_code = error_code; task->thread.trap_nr = trapnr; die(str, regs, error_code); } return; } /* * Save the info for the exception handler and clear the error. */ fpu__save(fpu); task->thread.trap_nr = trapnr; task->thread.error_code = error_code; info.si_signo = SIGFPE; info.si_errno = 0; info.si_addr = (void __user *)uprobe_get_trap_addr(regs); info.si_code = fpu__exception_code(fpu, trapnr); /* Retry when we get spurious exceptions: */ if (!info.si_code) return; force_sig_info(SIGFPE, &info, task); } dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); math_error(regs, error_code, X86_TRAP_MF); } dotraplinkage void do_simd_coprocessor_error(struct pt_regs *regs, long error_code) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); math_error(regs, error_code, X86_TRAP_XF); } dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) { cond_local_irq_enable(regs); } dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code) { unsigned long cr0; RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); #ifdef CONFIG_MATH_EMULATION if (!boot_cpu_has(X86_FEATURE_FPU) && (read_cr0() & X86_CR0_EM)) { struct math_emu_info info = { }; cond_local_irq_enable(regs); info.regs = regs; math_emulate(&info); return; } #endif /* This should not happen. */ cr0 = read_cr0(); if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) { /* Try to fix it up and carry on. */ write_cr0(cr0 & ~X86_CR0_TS); } else { /* * Something terrible happened, and we're better off trying * to kill the task than getting stuck in a never-ending * loop of #NM faults. */ die("unexpected #NM exception", regs, error_code); } } NOKPROBE_SYMBOL(do_device_not_available); #ifdef CONFIG_X86_32 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) { siginfo_t info; RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); local_irq_enable(); info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_BADSTK; info.si_addr = NULL; if (notify_die(DIE_TRAP, "iret exception", regs, error_code, X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) { do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, &info); } } #endif void __init trap_init(void) { /* Init cpu_entry_area before IST entries are set up */ setup_cpu_entry_areas(); idt_setup_traps(); /* * Set the IDT descriptor to a fixed read-only location, so that the * "sidt" instruction will not leak the location of the kernel, and * to defend the IDT against arbitrary memory write vulnerabilities. * It will be reloaded in cpu_init() */ cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table), PAGE_KERNEL_RO); idt_descr.address = CPU_ENTRY_AREA_RO_IDT; /* * Should be a barrier for any external CPU state: */ cpu_init(); idt_setup_ist_traps(); x86_init.irqs.trap_init(); idt_setup_debugidt_traps(); }
./CrossVul/dataset_final_sorted/CWE-362/c/good_677_2
crossvul-cpp_data_good_1865_0
/* * Functions to sequence FLUSH and FUA writes. * * Copyright (C) 2011 Max Planck Institute for Gravitational Physics * Copyright (C) 2011 Tejun Heo <tj@kernel.org> * * This file is released under the GPLv2. * * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request * properties and hardware capability. * * If a request doesn't have data, only REQ_FLUSH makes sense, which * indicates a simple flush request. If there is data, REQ_FLUSH indicates * that the device cache should be flushed before the data is executed, and * REQ_FUA means that the data must be on non-volatile media on request * completion. * * If the device doesn't have writeback cache, FLUSH and FUA don't make any * difference. The requests are either completed immediately if there's no * data or executed as normal requests otherwise. * * If the device has writeback cache and supports FUA, REQ_FLUSH is * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. * * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is * translated to PREFLUSH and REQ_FUA to POSTFLUSH. * * The actual execution of flush is double buffered. Whenever a request * needs to execute PRE or POSTFLUSH, it queues at * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a * flush is issued and the pending_idx is toggled. When the flush * completes, all the requests which were pending are proceeded to the next * step. This allows arbitrary merging of different types of FLUSH/FUA * requests. * * Currently, the following conditions are used to determine when to issue * flush. * * C1. At any given time, only one flush shall be in progress. This makes * double buffering sufficient. * * C2. Flush is deferred if any request is executing DATA of its sequence. * This avoids issuing separate POSTFLUSHes for requests which shared * PREFLUSH. * * C3. The second condition is ignored if there is a request which has * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid * starvation in the unlikely case where there are continuous stream of * FUA (without FLUSH) requests. * * For devices which support FUA, it isn't clear whether C2 (and thus C3) * is beneficial. * * Note that a sequenced FLUSH/FUA request with DATA is completed twice. * Once while executing DATA and again after the whole sequence is * complete. The first completion updates the contained bio but doesn't * finish it so that the bio submitter is notified only after the whole * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in * req_bio_endio(). * * The above peculiarity requires that each FLUSH/FUA request has only one * bio attached to it, which is guaranteed as they aren't allowed to be * merged in the usual way. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/gfp.h> #include <linux/blk-mq.h> #include "blk.h" #include "blk-mq.h" #include "blk-mq-tag.h" /* FLUSH/FUA sequences */ enum { REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ REQ_FSEQ_DONE = (1 << 3), REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | REQ_FSEQ_POSTFLUSH, /* * If flush has been pending longer than the following timeout, * it's issued even if flush_data requests are still in flight. */ FLUSH_PENDING_TIMEOUT = 5 * HZ, }; static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq); static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) { unsigned int policy = 0; if (blk_rq_sectors(rq)) policy |= REQ_FSEQ_DATA; if (fflags & REQ_FLUSH) { if (rq->cmd_flags & REQ_FLUSH) policy |= REQ_FSEQ_PREFLUSH; if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) policy |= REQ_FSEQ_POSTFLUSH; } return policy; } static unsigned int blk_flush_cur_seq(struct request *rq) { return 1 << ffz(rq->flush.seq); } static void blk_flush_restore_request(struct request *rq) { /* * After flush data completion, @rq->bio is %NULL but we need to * complete the bio again. @rq->biotail is guaranteed to equal the * original @rq->bio. Restore it. */ rq->bio = rq->biotail; /* make @rq a normal request */ rq->cmd_flags &= ~REQ_FLUSH_SEQ; rq->end_io = rq->flush.saved_end_io; } static bool blk_flush_queue_rq(struct request *rq, bool add_front) { if (rq->q->mq_ops) { struct request_queue *q = rq->q; blk_mq_add_to_requeue_list(rq, add_front); blk_mq_kick_requeue_list(q); return false; } else { if (add_front) list_add(&rq->queuelist, &rq->q->queue_head); else list_add_tail(&rq->queuelist, &rq->q->queue_head); return true; } } /** * blk_flush_complete_seq - complete flush sequence * @rq: FLUSH/FUA request being sequenced * @fq: flush queue * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) * @error: whether an error occurred * * @rq just completed @seq part of its flush sequence, record the * completion and trigger the next step. * * CONTEXT: * spin_lock_irq(q->queue_lock or fq->mq_flush_lock) * * RETURNS: * %true if requests were added to the dispatch queue, %false otherwise. */ static bool blk_flush_complete_seq(struct request *rq, struct blk_flush_queue *fq, unsigned int seq, int error) { struct request_queue *q = rq->q; struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; bool queued = false, kicked; BUG_ON(rq->flush.seq & seq); rq->flush.seq |= seq; if (likely(!error)) seq = blk_flush_cur_seq(rq); else seq = REQ_FSEQ_DONE; switch (seq) { case REQ_FSEQ_PREFLUSH: case REQ_FSEQ_POSTFLUSH: /* queue for flush */ if (list_empty(pending)) fq->flush_pending_since = jiffies; list_move_tail(&rq->flush.list, pending); break; case REQ_FSEQ_DATA: list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); queued = blk_flush_queue_rq(rq, true); break; case REQ_FSEQ_DONE: /* * @rq was previously adjusted by blk_flush_issue() for * flush sequencing and may already have gone through the * flush data request completion path. Restore @rq for * normal completion and end it. */ BUG_ON(!list_empty(&rq->queuelist)); list_del_init(&rq->flush.list); blk_flush_restore_request(rq); if (q->mq_ops) blk_mq_end_request(rq, error); else __blk_end_request_all(rq, error); break; default: BUG(); } kicked = blk_kick_flush(q, fq); return kicked | queued; } static void flush_end_io(struct request *flush_rq, int error) { struct request_queue *q = flush_rq->q; struct list_head *running; bool queued = false; struct request *rq, *n; unsigned long flags = 0; struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); if (q->mq_ops) { struct blk_mq_hw_ctx *hctx; /* release the tag's ownership to the req cloned from */ spin_lock_irqsave(&fq->mq_flush_lock, flags); hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu); blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); flush_rq->tag = -1; } running = &fq->flush_queue[fq->flush_running_idx]; BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); /* account completion of the flush request */ fq->flush_running_idx ^= 1; if (!q->mq_ops) elv_completed_request(q, flush_rq); /* and push the waiting requests to the next stage */ list_for_each_entry_safe(rq, n, running, flush.list) { unsigned int seq = blk_flush_cur_seq(rq); BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); queued |= blk_flush_complete_seq(rq, fq, seq, error); } /* * Kick the queue to avoid stall for two cases: * 1. Moving a request silently to empty queue_head may stall the * queue. * 2. When flush request is running in non-queueable queue, the * queue is hold. Restart the queue after flush request is finished * to avoid stall. * This function is called from request completion path and calling * directly into request_fn may confuse the driver. Always use * kblockd. */ if (queued || fq->flush_queue_delayed) { WARN_ON(q->mq_ops); blk_run_queue_async(q); } fq->flush_queue_delayed = 0; if (q->mq_ops) spin_unlock_irqrestore(&fq->mq_flush_lock, flags); } /** * blk_kick_flush - consider issuing flush request * @q: request_queue being kicked * @fq: flush queue * * Flush related states of @q have changed, consider issuing flush request. * Please read the comment at the top of this file for more info. * * CONTEXT: * spin_lock_irq(q->queue_lock or fq->mq_flush_lock) * * RETURNS: * %true if flush was issued, %false otherwise. */ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) { struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; struct request *first_rq = list_first_entry(pending, struct request, flush.list); struct request *flush_rq = fq->flush_rq; /* C1 described at the top of this file */ if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) return false; /* C2 and C3 */ if (!list_empty(&fq->flush_data_in_flight) && time_before(jiffies, fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) return false; /* * Issue flush and toggle pending_idx. This makes pending_idx * different from running_idx, which means flush is in flight. */ fq->flush_pending_idx ^= 1; blk_rq_init(q, flush_rq); /* * Borrow tag from the first request since they can't * be in flight at the same time. And acquire the tag's * ownership for flush req. */ if (q->mq_ops) { struct blk_mq_hw_ctx *hctx; flush_rq->mq_ctx = first_rq->mq_ctx; flush_rq->tag = first_rq->tag; fq->orig_rq = first_rq; hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu); blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq); } flush_rq->cmd_type = REQ_TYPE_FS; flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; flush_rq->rq_disk = first_rq->rq_disk; flush_rq->end_io = flush_end_io; return blk_flush_queue_rq(flush_rq, false); } static void flush_data_end_io(struct request *rq, int error) { struct request_queue *q = rq->q; struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); /* * After populating an empty queue, kick it to avoid stall. Read * the comment in flush_end_io(). */ if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) blk_run_queue_async(q); } static void mq_flush_data_end_io(struct request *rq, int error) { struct request_queue *q = rq->q; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx = rq->mq_ctx; unsigned long flags; struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); hctx = q->mq_ops->map_queue(q, ctx->cpu); /* * After populating an empty queue, kick it to avoid stall. Read * the comment in flush_end_io(). */ spin_lock_irqsave(&fq->mq_flush_lock, flags); if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) blk_mq_run_hw_queue(hctx, true); spin_unlock_irqrestore(&fq->mq_flush_lock, flags); } /** * blk_insert_flush - insert a new FLUSH/FUA request * @rq: request to insert * * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. * or __blk_mq_run_hw_queue() to dispatch request. * @rq is being submitted. Analyze what needs to be done and put it on the * right queue. * * CONTEXT: * spin_lock_irq(q->queue_lock) in !mq case */ void blk_insert_flush(struct request *rq) { struct request_queue *q = rq->q; unsigned int fflags = q->flush_flags; /* may change, cache */ unsigned int policy = blk_flush_policy(fflags, rq); struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); /* * @policy now records what operations need to be done. Adjust * REQ_FLUSH and FUA for the driver. */ rq->cmd_flags &= ~REQ_FLUSH; if (!(fflags & REQ_FUA)) rq->cmd_flags &= ~REQ_FUA; /* * An empty flush handed down from a stacking driver may * translate into nothing if the underlying device does not * advertise a write-back cache. In this case, simply * complete the request. */ if (!policy) { if (q->mq_ops) blk_mq_end_request(rq, 0); else __blk_end_bidi_request(rq, 0, 0, 0); return; } BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ /* * If there's data but flush is not necessary, the request can be * processed directly without going through flush machinery. Queue * for normal execution. */ if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { if (q->mq_ops) { blk_mq_insert_request(rq, false, false, true); } else list_add_tail(&rq->queuelist, &q->queue_head); return; } /* * @rq should go through flush machinery. Mark it part of flush * sequence and submit for further processing. */ memset(&rq->flush, 0, sizeof(rq->flush)); INIT_LIST_HEAD(&rq->flush.list); rq->cmd_flags |= REQ_FLUSH_SEQ; rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ if (q->mq_ops) { rq->end_io = mq_flush_data_end_io; spin_lock_irq(&fq->mq_flush_lock); blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); spin_unlock_irq(&fq->mq_flush_lock); return; } rq->end_io = flush_data_end_io; blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); } /** * blkdev_issue_flush - queue a flush * @bdev: blockdev to issue flush for * @gfp_mask: memory allocation flags (for bio_alloc) * @error_sector: error sector * * Description: * Issue a flush for the block device in question. Caller can supply * room for storing the error offset in case of a flush error, if they * wish to. If WAIT flag is not passed then caller may check only what * request was pushed in some internal queue for later handling. */ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, sector_t *error_sector) { struct request_queue *q; struct bio *bio; int ret = 0; if (bdev->bd_disk == NULL) return -ENXIO; q = bdev_get_queue(bdev); if (!q) return -ENXIO; /* * some block devices may not have their queue correctly set up here * (e.g. loop device without a backing file) and so issuing a flush * here will panic. Ensure there is a request function before issuing * the flush. */ if (!q->make_request_fn) return -ENXIO; bio = bio_alloc(gfp_mask, 0); bio->bi_bdev = bdev; ret = submit_bio_wait(WRITE_FLUSH, bio); /* * The driver must store the error location in ->bi_sector, if * it supports it. For non-stacked drivers, this should be * copied from blk_rq_pos(rq). */ if (error_sector) *error_sector = bio->bi_iter.bi_sector; bio_put(bio); return ret; } EXPORT_SYMBOL(blkdev_issue_flush); struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, int node, int cmd_size) { struct blk_flush_queue *fq; int rq_sz = sizeof(struct request); fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node); if (!fq) goto fail; if (q->mq_ops) { spin_lock_init(&fq->mq_flush_lock); rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); } fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node); if (!fq->flush_rq) goto fail_rq; INIT_LIST_HEAD(&fq->flush_queue[0]); INIT_LIST_HEAD(&fq->flush_queue[1]); INIT_LIST_HEAD(&fq->flush_data_in_flight); return fq; fail_rq: kfree(fq); fail: return NULL; } void blk_free_flush_queue(struct blk_flush_queue *fq) { /* bio based request queue hasn't flush queue */ if (!fq) return; kfree(fq->flush_rq); kfree(fq); }
./CrossVul/dataset_final_sorted/CWE-362/c/good_1865_0
crossvul-cpp_data_good_3026_0
/* * fs/f2fs/node.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/fs.h> #include <linux/f2fs_fs.h> #include <linux/mpage.h> #include <linux/backing-dev.h> #include <linux/blkdev.h> #include <linux/pagevec.h> #include <linux/swap.h> #include "f2fs.h" #include "node.h" #include "segment.h" #include "trace.h" #include <trace/events/f2fs.h> #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock) static struct kmem_cache *nat_entry_slab; static struct kmem_cache *free_nid_slab; static struct kmem_cache *nat_entry_set_slab; bool available_free_memory(struct f2fs_sb_info *sbi, int type) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct sysinfo val; unsigned long avail_ram; unsigned long mem_size = 0; bool res = false; si_meminfo(&val); /* only uses low memory */ avail_ram = val.totalram - val.totalhigh; /* * give 25%, 25%, 50%, 50%, 50% memory for each components respectively */ if (type == FREE_NIDS) { mem_size = (nm_i->nid_cnt[FREE_NID_LIST] * sizeof(struct free_nid)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); } else if (type == NAT_ENTRIES) { mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); if (excess_cached_nats(sbi)) res = false; } else if (type == DIRTY_DENTS) { if (sbi->sb->s_bdi->wb.dirty_exceeded) return false; mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); } else if (type == INO_ENTRIES) { int i; for (i = 0; i <= UPDATE_INO; i++) mem_size += sbi->im[i].ino_num * sizeof(struct ino_entry); mem_size >>= PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); } else if (type == EXTENT_CACHE) { mem_size = (atomic_read(&sbi->total_ext_tree) * sizeof(struct extent_tree) + atomic_read(&sbi->total_ext_node) * sizeof(struct extent_node)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); } else { if (!sbi->sb->s_bdi->wb.dirty_exceeded) return true; } return res; } static void clear_node_page_dirty(struct page *page) { struct address_space *mapping = page->mapping; unsigned int long flags; if (PageDirty(page)) { spin_lock_irqsave(&mapping->tree_lock, flags); radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); spin_unlock_irqrestore(&mapping->tree_lock, flags); clear_page_dirty_for_io(page); dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES); } ClearPageUptodate(page); } static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) { pgoff_t index = current_nat_addr(sbi, nid); return get_meta_page(sbi, index); } static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) { struct page *src_page; struct page *dst_page; pgoff_t src_off; pgoff_t dst_off; void *src_addr; void *dst_addr; struct f2fs_nm_info *nm_i = NM_I(sbi); src_off = current_nat_addr(sbi, nid); dst_off = next_nat_addr(sbi, src_off); /* get current nat block page with lock */ src_page = get_meta_page(sbi, src_off); dst_page = grab_meta_page(sbi, dst_off); f2fs_bug_on(sbi, PageDirty(src_page)); src_addr = page_address(src_page); dst_addr = page_address(dst_page); memcpy(dst_addr, src_addr, PAGE_SIZE); set_page_dirty(dst_page); f2fs_put_page(src_page, 1); set_to_next_nat(nm_i, nid); return dst_page; } static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) { return radix_tree_lookup(&nm_i->nat_root, n); } static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t start, unsigned int nr, struct nat_entry **ep) { return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); } static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) { list_del(&e->list); radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); nm_i->nat_cnt--; kmem_cache_free(nat_entry_slab, e); } static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, struct nat_entry *ne) { nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); struct nat_entry_set *head; if (get_nat_flag(ne, IS_DIRTY)) return; head = radix_tree_lookup(&nm_i->nat_set_root, set); if (!head) { head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS); INIT_LIST_HEAD(&head->entry_list); INIT_LIST_HEAD(&head->set_list); head->set = set; head->entry_cnt = 0; f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); } list_move_tail(&ne->list, &head->entry_list); nm_i->dirty_nat_cnt++; head->entry_cnt++; set_nat_flag(ne, IS_DIRTY, true); } static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, struct nat_entry_set *set, struct nat_entry *ne) { list_move_tail(&ne->list, &nm_i->nat_entries); set_nat_flag(ne, IS_DIRTY, false); set->entry_cnt--; nm_i->dirty_nat_cnt--; } static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, nid_t start, unsigned int nr, struct nat_entry_set **ep) { return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep, start, nr); } int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct nat_entry *e; bool need = false; down_read(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, nid); if (e) { if (!get_nat_flag(e, IS_CHECKPOINTED) && !get_nat_flag(e, HAS_FSYNCED_INODE)) need = true; } up_read(&nm_i->nat_tree_lock); return need; } bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct nat_entry *e; bool is_cp = true; down_read(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, nid); if (e && !get_nat_flag(e, IS_CHECKPOINTED)) is_cp = false; up_read(&nm_i->nat_tree_lock); return is_cp; } bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct nat_entry *e; bool need_update = true; down_read(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, ino); if (e && get_nat_flag(e, HAS_LAST_FSYNC) && (get_nat_flag(e, IS_CHECKPOINTED) || get_nat_flag(e, HAS_FSYNCED_INODE))) need_update = false; up_read(&nm_i->nat_tree_lock); return need_update; } static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, bool no_fail) { struct nat_entry *new; if (no_fail) { new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS); f2fs_radix_tree_insert(&nm_i->nat_root, nid, new); } else { new = kmem_cache_alloc(nat_entry_slab, GFP_NOFS); if (!new) return NULL; if (radix_tree_insert(&nm_i->nat_root, nid, new)) { kmem_cache_free(nat_entry_slab, new); return NULL; } } memset(new, 0, sizeof(struct nat_entry)); nat_set_nid(new, nid); nat_reset_flag(new); list_add_tail(&new->list, &nm_i->nat_entries); nm_i->nat_cnt++; return new; } static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, struct f2fs_nat_entry *ne) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct nat_entry *e; e = __lookup_nat_cache(nm_i, nid); if (!e) { e = grab_nat_entry(nm_i, nid, false); if (e) node_info_from_raw_nat(&e->ni, ne); } else { f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || nat_get_blkaddr(e) != le32_to_cpu(ne->block_addr) || nat_get_version(e) != ne->version); } } static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, block_t new_blkaddr, bool fsync_done) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct nat_entry *e; down_write(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, ni->nid); if (!e) { e = grab_nat_entry(nm_i, ni->nid, true); copy_node_info(&e->ni, ni); f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); } else if (new_blkaddr == NEW_ADDR) { /* * when nid is reallocated, * previous nat entry can be remained in nat cache. * So, reinitialize it with new information. */ copy_node_info(&e->ni, ni); f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); } /* sanity check */ f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && new_blkaddr == NULL_ADDR); f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && new_blkaddr == NEW_ADDR); f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR && nat_get_blkaddr(e) != NULL_ADDR && new_blkaddr == NEW_ADDR); /* increment version no as node is removed */ if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { unsigned char version = nat_get_version(e); nat_set_version(e, inc_node_version(version)); /* in order to reuse the nid */ if (nm_i->next_scan_nid > ni->nid) nm_i->next_scan_nid = ni->nid; } /* change address */ nat_set_blkaddr(e, new_blkaddr); if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR) set_nat_flag(e, IS_CHECKPOINTED, false); __set_nat_cache_dirty(nm_i, e); /* update fsync_mark if its inode nat entry is still alive */ if (ni->nid != ni->ino) e = __lookup_nat_cache(nm_i, ni->ino); if (e) { if (fsync_done && ni->nid == ni->ino) set_nat_flag(e, HAS_FSYNCED_INODE, true); set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); } up_write(&nm_i->nat_tree_lock); } int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) { struct f2fs_nm_info *nm_i = NM_I(sbi); int nr = nr_shrink; if (!down_write_trylock(&nm_i->nat_tree_lock)) return 0; while (nr_shrink && !list_empty(&nm_i->nat_entries)) { struct nat_entry *ne; ne = list_first_entry(&nm_i->nat_entries, struct nat_entry, list); __del_from_nat_cache(nm_i, ne); nr_shrink--; } up_write(&nm_i->nat_tree_lock); return nr - nr_shrink; } /* * This function always returns success */ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct f2fs_journal *journal = curseg->journal; nid_t start_nid = START_NID(nid); struct f2fs_nat_block *nat_blk; struct page *page = NULL; struct f2fs_nat_entry ne; struct nat_entry *e; int i; ni->nid = nid; /* Check nat cache */ down_read(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, nid); if (e) { ni->ino = nat_get_ino(e); ni->blk_addr = nat_get_blkaddr(e); ni->version = nat_get_version(e); up_read(&nm_i->nat_tree_lock); return; } memset(&ne, 0, sizeof(struct f2fs_nat_entry)); /* Check current segment summary */ down_read(&curseg->journal_rwsem); i = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0); if (i >= 0) { ne = nat_in_journal(journal, i); node_info_from_raw_nat(ni, &ne); } up_read(&curseg->journal_rwsem); if (i >= 0) goto cache; /* Fill node_info from nat page */ page = get_current_nat_page(sbi, start_nid); nat_blk = (struct f2fs_nat_block *)page_address(page); ne = nat_blk->entries[nid - start_nid]; node_info_from_raw_nat(ni, &ne); f2fs_put_page(page, 1); cache: up_read(&nm_i->nat_tree_lock); /* cache nat entry */ down_write(&nm_i->nat_tree_lock); cache_nat_entry(sbi, nid, &ne); up_write(&nm_i->nat_tree_lock); } /* * readahead MAX_RA_NODE number of node pages. */ static void ra_node_pages(struct page *parent, int start, int n) { struct f2fs_sb_info *sbi = F2FS_P_SB(parent); struct blk_plug plug; int i, end; nid_t nid; blk_start_plug(&plug); /* Then, try readahead for siblings of the desired node */ end = start + n; end = min(end, NIDS_PER_BLOCK); for (i = start; i < end; i++) { nid = get_nid(parent, i, false); ra_node_page(sbi, nid); } blk_finish_plug(&plug); } pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) { const long direct_index = ADDRS_PER_INODE(dn->inode); const long direct_blks = ADDRS_PER_BLOCK; const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; unsigned int skipped_unit = ADDRS_PER_BLOCK; int cur_level = dn->cur_level; int max_level = dn->max_level; pgoff_t base = 0; if (!dn->max_level) return pgofs + 1; while (max_level-- > cur_level) skipped_unit *= NIDS_PER_BLOCK; switch (dn->max_level) { case 3: base += 2 * indirect_blks; case 2: base += 2 * direct_blks; case 1: base += direct_index; break; default: f2fs_bug_on(F2FS_I_SB(dn->inode), 1); } return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base; } /* * The maximum depth is four. * Offset[0] will have raw inode offset. */ static int get_node_path(struct inode *inode, long block, int offset[4], unsigned int noffset[4]) { const long direct_index = ADDRS_PER_INODE(inode); const long direct_blks = ADDRS_PER_BLOCK; const long dptrs_per_blk = NIDS_PER_BLOCK; const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; int n = 0; int level = 0; noffset[0] = 0; if (block < direct_index) { offset[n] = block; goto got; } block -= direct_index; if (block < direct_blks) { offset[n++] = NODE_DIR1_BLOCK; noffset[n] = 1; offset[n] = block; level = 1; goto got; } block -= direct_blks; if (block < direct_blks) { offset[n++] = NODE_DIR2_BLOCK; noffset[n] = 2; offset[n] = block; level = 1; goto got; } block -= direct_blks; if (block < indirect_blks) { offset[n++] = NODE_IND1_BLOCK; noffset[n] = 3; offset[n++] = block / direct_blks; noffset[n] = 4 + offset[n - 1]; offset[n] = block % direct_blks; level = 2; goto got; } block -= indirect_blks; if (block < indirect_blks) { offset[n++] = NODE_IND2_BLOCK; noffset[n] = 4 + dptrs_per_blk; offset[n++] = block / direct_blks; noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; offset[n] = block % direct_blks; level = 2; goto got; } block -= indirect_blks; if (block < dindirect_blks) { offset[n++] = NODE_DIND_BLOCK; noffset[n] = 5 + (dptrs_per_blk * 2); offset[n++] = block / indirect_blks; noffset[n] = 6 + (dptrs_per_blk * 2) + offset[n - 1] * (dptrs_per_blk + 1); offset[n++] = (block / direct_blks) % dptrs_per_blk; noffset[n] = 7 + (dptrs_per_blk * 2) + offset[n - 2] * (dptrs_per_blk + 1) + offset[n - 1]; offset[n] = block % direct_blks; level = 3; goto got; } else { BUG(); } got: return level; } /* * Caller should call f2fs_put_dnode(dn). * Also, it should grab and release a rwsem by calling f2fs_lock_op() and * f2fs_unlock_op() only if ro is not set RDONLY_NODE. * In the case of RDONLY_NODE, we don't need to care about mutex. */ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct page *npage[4]; struct page *parent = NULL; int offset[4]; unsigned int noffset[4]; nid_t nids[4]; int level, i = 0; int err = 0; level = get_node_path(dn->inode, index, offset, noffset); nids[0] = dn->inode->i_ino; npage[0] = dn->inode_page; if (!npage[0]) { npage[0] = get_node_page(sbi, nids[0]); if (IS_ERR(npage[0])) return PTR_ERR(npage[0]); } /* if inline_data is set, should not report any block indices */ if (f2fs_has_inline_data(dn->inode) && index) { err = -ENOENT; f2fs_put_page(npage[0], 1); goto release_out; } parent = npage[0]; if (level != 0) nids[1] = get_nid(parent, offset[0], true); dn->inode_page = npage[0]; dn->inode_page_locked = true; /* get indirect or direct nodes */ for (i = 1; i <= level; i++) { bool done = false; if (!nids[i] && mode == ALLOC_NODE) { /* alloc new node */ if (!alloc_nid(sbi, &(nids[i]))) { err = -ENOSPC; goto release_pages; } dn->nid = nids[i]; npage[i] = new_node_page(dn, noffset[i], NULL); if (IS_ERR(npage[i])) { alloc_nid_failed(sbi, nids[i]); err = PTR_ERR(npage[i]); goto release_pages; } set_nid(parent, offset[i - 1], nids[i], i == 1); alloc_nid_done(sbi, nids[i]); done = true; } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { npage[i] = get_node_page_ra(parent, offset[i - 1]); if (IS_ERR(npage[i])) { err = PTR_ERR(npage[i]); goto release_pages; } done = true; } if (i == 1) { dn->inode_page_locked = false; unlock_page(parent); } else { f2fs_put_page(parent, 1); } if (!done) { npage[i] = get_node_page(sbi, nids[i]); if (IS_ERR(npage[i])) { err = PTR_ERR(npage[i]); f2fs_put_page(npage[0], 0); goto release_out; } } if (i < level) { parent = npage[i]; nids[i + 1] = get_nid(parent, offset[i], false); } } dn->nid = nids[level]; dn->ofs_in_node = offset[level]; dn->node_page = npage[level]; dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); return 0; release_pages: f2fs_put_page(parent, 1); if (i > 1) f2fs_put_page(npage[0], 0); release_out: dn->inode_page = NULL; dn->node_page = NULL; if (err == -ENOENT) { dn->cur_level = i; dn->max_level = level; dn->ofs_in_node = offset[level]; } return err; } static void truncate_node(struct dnode_of_data *dn) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct node_info ni; get_node_info(sbi, dn->nid, &ni); if (dn->inode->i_blocks == 0) { f2fs_bug_on(sbi, ni.blk_addr != NULL_ADDR); goto invalidate; } f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR); /* Deallocate node address */ invalidate_blocks(sbi, ni.blk_addr); dec_valid_node_count(sbi, dn->inode); set_node_addr(sbi, &ni, NULL_ADDR, false); if (dn->nid == dn->inode->i_ino) { remove_orphan_inode(sbi, dn->nid); dec_valid_inode_count(sbi); f2fs_inode_synced(dn->inode); } invalidate: clear_node_page_dirty(dn->node_page); set_sbi_flag(sbi, SBI_IS_DIRTY); f2fs_put_page(dn->node_page, 1); invalidate_mapping_pages(NODE_MAPPING(sbi), dn->node_page->index, dn->node_page->index); dn->node_page = NULL; trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); } static int truncate_dnode(struct dnode_of_data *dn) { struct page *page; if (dn->nid == 0) return 1; /* get direct node */ page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) return 1; else if (IS_ERR(page)) return PTR_ERR(page); /* Make dnode_of_data for parameter */ dn->node_page = page; dn->ofs_in_node = 0; truncate_data_blocks(dn); truncate_node(dn); return 1; } static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, int ofs, int depth) { struct dnode_of_data rdn = *dn; struct page *page; struct f2fs_node *rn; nid_t child_nid; unsigned int child_nofs; int freed = 0; int i, ret; if (dn->nid == 0) return NIDS_PER_BLOCK + 1; trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); if (IS_ERR(page)) { trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); return PTR_ERR(page); } ra_node_pages(page, ofs, NIDS_PER_BLOCK); rn = F2FS_NODE(page); if (depth < 3) { for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { child_nid = le32_to_cpu(rn->in.nid[i]); if (child_nid == 0) continue; rdn.nid = child_nid; ret = truncate_dnode(&rdn); if (ret < 0) goto out_err; if (set_nid(page, i, 0, false)) dn->node_changed = true; } } else { child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; for (i = ofs; i < NIDS_PER_BLOCK; i++) { child_nid = le32_to_cpu(rn->in.nid[i]); if (child_nid == 0) { child_nofs += NIDS_PER_BLOCK + 1; continue; } rdn.nid = child_nid; ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); if (ret == (NIDS_PER_BLOCK + 1)) { if (set_nid(page, i, 0, false)) dn->node_changed = true; child_nofs += ret; } else if (ret < 0 && ret != -ENOENT) { goto out_err; } } freed = child_nofs; } if (!ofs) { /* remove current indirect node */ dn->node_page = page; truncate_node(dn); freed++; } else { f2fs_put_page(page, 1); } trace_f2fs_truncate_nodes_exit(dn->inode, freed); return freed; out_err: f2fs_put_page(page, 1); trace_f2fs_truncate_nodes_exit(dn->inode, ret); return ret; } static int truncate_partial_nodes(struct dnode_of_data *dn, struct f2fs_inode *ri, int *offset, int depth) { struct page *pages[2]; nid_t nid[3]; nid_t child_nid; int err = 0; int i; int idx = depth - 2; nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); if (!nid[0]) return 0; /* get indirect nodes in the path */ for (i = 0; i < idx + 1; i++) { /* reference count'll be increased */ pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]); if (IS_ERR(pages[i])) { err = PTR_ERR(pages[i]); idx = i - 1; goto fail; } nid[i + 1] = get_nid(pages[i], offset[i + 1], false); } ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); /* free direct nodes linked to a partial indirect node */ for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { child_nid = get_nid(pages[idx], i, false); if (!child_nid) continue; dn->nid = child_nid; err = truncate_dnode(dn); if (err < 0) goto fail; if (set_nid(pages[idx], i, 0, false)) dn->node_changed = true; } if (offset[idx + 1] == 0) { dn->node_page = pages[idx]; dn->nid = nid[idx]; truncate_node(dn); } else { f2fs_put_page(pages[idx], 1); } offset[idx]++; offset[idx + 1] = 0; idx--; fail: for (i = idx; i >= 0; i--) f2fs_put_page(pages[i], 1); trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); return err; } /* * All the block addresses of data and nodes should be nullified. */ int truncate_inode_blocks(struct inode *inode, pgoff_t from) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int err = 0, cont = 1; int level, offset[4], noffset[4]; unsigned int nofs = 0; struct f2fs_inode *ri; struct dnode_of_data dn; struct page *page; trace_f2fs_truncate_inode_blocks_enter(inode, from); level = get_node_path(inode, from, offset, noffset); page = get_node_page(sbi, inode->i_ino); if (IS_ERR(page)) { trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); return PTR_ERR(page); } set_new_dnode(&dn, inode, page, NULL, 0); unlock_page(page); ri = F2FS_INODE(page); switch (level) { case 0: case 1: nofs = noffset[1]; break; case 2: nofs = noffset[1]; if (!offset[level - 1]) goto skip_partial; err = truncate_partial_nodes(&dn, ri, offset, level); if (err < 0 && err != -ENOENT) goto fail; nofs += 1 + NIDS_PER_BLOCK; break; case 3: nofs = 5 + 2 * NIDS_PER_BLOCK; if (!offset[level - 1]) goto skip_partial; err = truncate_partial_nodes(&dn, ri, offset, level); if (err < 0 && err != -ENOENT) goto fail; break; default: BUG(); } skip_partial: while (cont) { dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); switch (offset[0]) { case NODE_DIR1_BLOCK: case NODE_DIR2_BLOCK: err = truncate_dnode(&dn); break; case NODE_IND1_BLOCK: case NODE_IND2_BLOCK: err = truncate_nodes(&dn, nofs, offset[1], 2); break; case NODE_DIND_BLOCK: err = truncate_nodes(&dn, nofs, offset[1], 3); cont = 0; break; default: BUG(); } if (err < 0 && err != -ENOENT) goto fail; if (offset[1] == 0 && ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { lock_page(page); BUG_ON(page->mapping != NODE_MAPPING(sbi)); f2fs_wait_on_page_writeback(page, NODE, true); ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; set_page_dirty(page); unlock_page(page); } offset[1] = 0; offset[0]++; nofs += err; } fail: f2fs_put_page(page, 0); trace_f2fs_truncate_inode_blocks_exit(inode, err); return err > 0 ? 0 : err; } int truncate_xattr_node(struct inode *inode, struct page *page) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t nid = F2FS_I(inode)->i_xattr_nid; struct dnode_of_data dn; struct page *npage; if (!nid) return 0; npage = get_node_page(sbi, nid); if (IS_ERR(npage)) return PTR_ERR(npage); f2fs_i_xnid_write(inode, 0); set_new_dnode(&dn, inode, page, npage, nid); if (page) dn.inode_page_locked = true; truncate_node(&dn); return 0; } /* * Caller should grab and release a rwsem by calling f2fs_lock_op() and * f2fs_unlock_op(). */ int remove_inode_page(struct inode *inode) { struct dnode_of_data dn; int err; set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); if (err) return err; err = truncate_xattr_node(inode, dn.inode_page); if (err) { f2fs_put_dnode(&dn); return err; } /* remove potential inline_data blocks */ if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) truncate_data_blocks_range(&dn, 1); /* 0 is possible, after f2fs_new_inode() has failed */ f2fs_bug_on(F2FS_I_SB(inode), inode->i_blocks != 0 && inode->i_blocks != 1); /* will put inode & node pages */ truncate_node(&dn); return 0; } struct page *new_inode_page(struct inode *inode) { struct dnode_of_data dn; /* allocate inode page for new inode */ set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); /* caller should f2fs_put_page(page, 1); */ return new_node_page(&dn, 0, NULL); } struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs, struct page *ipage) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct node_info new_ni; struct page *page; int err; if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) return ERR_PTR(-EPERM); page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false); if (!page) return ERR_PTR(-ENOMEM); if (unlikely(!inc_valid_node_count(sbi, dn->inode))) { err = -ENOSPC; goto fail; } #ifdef CONFIG_F2FS_CHECK_FS get_node_info(sbi, dn->nid, &new_ni); f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR); #endif new_ni.nid = dn->nid; new_ni.ino = dn->inode->i_ino; new_ni.blk_addr = NULL_ADDR; new_ni.flag = 0; new_ni.version = 0; set_node_addr(sbi, &new_ni, NEW_ADDR, false); f2fs_wait_on_page_writeback(page, NODE, true); fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); set_cold_node(dn->inode, page); if (!PageUptodate(page)) SetPageUptodate(page); if (set_page_dirty(page)) dn->node_changed = true; if (f2fs_has_xattr_block(ofs)) f2fs_i_xnid_write(dn->inode, dn->nid); if (ofs == 0) inc_valid_inode_count(sbi); return page; fail: clear_node_page_dirty(page); f2fs_put_page(page, 1); return ERR_PTR(err); } /* * Caller should do after getting the following values. * 0: f2fs_put_page(page, 0) * LOCKED_PAGE or error: f2fs_put_page(page, 1) */ static int read_node_page(struct page *page, int op_flags) { struct f2fs_sb_info *sbi = F2FS_P_SB(page); struct node_info ni; struct f2fs_io_info fio = { .sbi = sbi, .type = NODE, .op = REQ_OP_READ, .op_flags = op_flags, .page = page, .encrypted_page = NULL, }; if (PageUptodate(page)) return LOCKED_PAGE; get_node_info(sbi, page->index, &ni); if (unlikely(ni.blk_addr == NULL_ADDR)) { ClearPageUptodate(page); return -ENOENT; } fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr; return f2fs_submit_page_bio(&fio); } /* * Readahead a node page */ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) { struct page *apage; int err; if (!nid) return; f2fs_bug_on(sbi, check_nid_range(sbi, nid)); rcu_read_lock(); apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid); rcu_read_unlock(); if (apage) return; apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); if (!apage) return; err = read_node_page(apage, REQ_RAHEAD); f2fs_put_page(apage, err ? 1 : 0); } static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, struct page *parent, int start) { struct page *page; int err; if (!nid) return ERR_PTR(-ENOENT); f2fs_bug_on(sbi, check_nid_range(sbi, nid)); repeat: page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); if (!page) return ERR_PTR(-ENOMEM); err = read_node_page(page, 0); if (err < 0) { f2fs_put_page(page, 1); return ERR_PTR(err); } else if (err == LOCKED_PAGE) { goto page_hit; } if (parent) ra_node_pages(parent, start + 1, MAX_RA_NODE); lock_page(page); if (unlikely(page->mapping != NODE_MAPPING(sbi))) { f2fs_put_page(page, 1); goto repeat; } if (unlikely(!PageUptodate(page))) goto out_err; page_hit: if(unlikely(nid != nid_of_node(page))) { f2fs_bug_on(sbi, 1); ClearPageUptodate(page); out_err: f2fs_put_page(page, 1); return ERR_PTR(-EIO); } return page; } struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) { return __get_node_page(sbi, nid, NULL, 0); } struct page *get_node_page_ra(struct page *parent, int start) { struct f2fs_sb_info *sbi = F2FS_P_SB(parent); nid_t nid = get_nid(parent, start, false); return __get_node_page(sbi, nid, parent, start); } static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) { struct inode *inode; struct page *page; int ret; /* should flush inline_data before evict_inode */ inode = ilookup(sbi->sb, ino); if (!inode) return; page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0); if (!page) goto iput_out; if (!PageUptodate(page)) goto page_out; if (!PageDirty(page)) goto page_out; if (!clear_page_dirty_for_io(page)) goto page_out; ret = f2fs_write_inline_data(inode, page); inode_dec_dirty_pages(inode); remove_dirty_inode(inode); if (ret) set_page_dirty(page); page_out: f2fs_put_page(page, 1); iput_out: iput(inode); } void move_node_page(struct page *node_page, int gc_type) { if (gc_type == FG_GC) { struct f2fs_sb_info *sbi = F2FS_P_SB(node_page); struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 1, .for_reclaim = 0, }; set_page_dirty(node_page); f2fs_wait_on_page_writeback(node_page, NODE, true); f2fs_bug_on(sbi, PageWriteback(node_page)); if (!clear_page_dirty_for_io(node_page)) goto out_page; if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc)) unlock_page(node_page); goto release_page; } else { /* set page dirty and write it */ if (!PageWriteback(node_page)) set_page_dirty(node_page); } out_page: unlock_page(node_page); release_page: f2fs_put_page(node_page, 0); } static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) { pgoff_t index, end; struct pagevec pvec; struct page *last_page = NULL; pagevec_init(&pvec, 0); index = 0; end = ULONG_MAX; while (index <= end) { int i, nr_pages; nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; if (unlikely(f2fs_cp_error(sbi))) { f2fs_put_page(last_page, 0); pagevec_release(&pvec); return ERR_PTR(-EIO); } if (!IS_DNODE(page) || !is_cold_node(page)) continue; if (ino_of_node(page) != ino) continue; lock_page(page); if (unlikely(page->mapping != NODE_MAPPING(sbi))) { continue_unlock: unlock_page(page); continue; } if (ino_of_node(page) != ino) goto continue_unlock; if (!PageDirty(page)) { /* someone wrote it for us */ goto continue_unlock; } if (last_page) f2fs_put_page(last_page, 0); get_page(page); last_page = page; unlock_page(page); } pagevec_release(&pvec); cond_resched(); } return last_page; } static int __write_node_page(struct page *page, bool atomic, bool *submitted, struct writeback_control *wbc) { struct f2fs_sb_info *sbi = F2FS_P_SB(page); nid_t nid; struct node_info ni; struct f2fs_io_info fio = { .sbi = sbi, .type = NODE, .op = REQ_OP_WRITE, .op_flags = wbc_to_write_flags(wbc), .page = page, .encrypted_page = NULL, .submitted = false, }; trace_f2fs_writepage(page, NODE); if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) goto redirty_out; if (unlikely(f2fs_cp_error(sbi))) goto redirty_out; /* get old block addr of this node page */ nid = nid_of_node(page); f2fs_bug_on(sbi, page->index != nid); if (wbc->for_reclaim) { if (!down_read_trylock(&sbi->node_write)) goto redirty_out; } else { down_read(&sbi->node_write); } get_node_info(sbi, nid, &ni); /* This page is already truncated */ if (unlikely(ni.blk_addr == NULL_ADDR)) { ClearPageUptodate(page); dec_page_count(sbi, F2FS_DIRTY_NODES); up_read(&sbi->node_write); unlock_page(page); return 0; } if (atomic && !test_opt(sbi, NOBARRIER)) fio.op_flags |= REQ_PREFLUSH | REQ_FUA; set_page_writeback(page); fio.old_blkaddr = ni.blk_addr; write_node_page(nid, &fio); set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); dec_page_count(sbi, F2FS_DIRTY_NODES); up_read(&sbi->node_write); if (wbc->for_reclaim) { f2fs_submit_merged_bio_cond(sbi, page->mapping->host, 0, page->index, NODE, WRITE); submitted = NULL; } unlock_page(page); if (unlikely(f2fs_cp_error(sbi))) { f2fs_submit_merged_bio(sbi, NODE, WRITE); submitted = NULL; } if (submitted) *submitted = fio.submitted; return 0; redirty_out: redirty_page_for_writepage(wbc, page); return AOP_WRITEPAGE_ACTIVATE; } static int f2fs_write_node_page(struct page *page, struct writeback_control *wbc) { return __write_node_page(page, false, NULL, wbc); } int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, struct writeback_control *wbc, bool atomic) { pgoff_t index, end; pgoff_t last_idx = ULONG_MAX; struct pagevec pvec; int ret = 0; struct page *last_page = NULL; bool marked = false; nid_t ino = inode->i_ino; if (atomic) { last_page = last_fsync_dnode(sbi, ino); if (IS_ERR_OR_NULL(last_page)) return PTR_ERR_OR_ZERO(last_page); } retry: pagevec_init(&pvec, 0); index = 0; end = ULONG_MAX; while (index <= end) { int i, nr_pages; nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; bool submitted = false; if (unlikely(f2fs_cp_error(sbi))) { f2fs_put_page(last_page, 0); pagevec_release(&pvec); ret = -EIO; goto out; } if (!IS_DNODE(page) || !is_cold_node(page)) continue; if (ino_of_node(page) != ino) continue; lock_page(page); if (unlikely(page->mapping != NODE_MAPPING(sbi))) { continue_unlock: unlock_page(page); continue; } if (ino_of_node(page) != ino) goto continue_unlock; if (!PageDirty(page) && page != last_page) { /* someone wrote it for us */ goto continue_unlock; } f2fs_wait_on_page_writeback(page, NODE, true); BUG_ON(PageWriteback(page)); if (!atomic || page == last_page) { set_fsync_mark(page, 1); if (IS_INODE(page)) { if (is_inode_flag_set(inode, FI_DIRTY_INODE)) update_inode(inode, page); set_dentry_mark(page, need_dentry_mark(sbi, ino)); } /* may be written by other thread */ if (!PageDirty(page)) set_page_dirty(page); } if (!clear_page_dirty_for_io(page)) goto continue_unlock; ret = __write_node_page(page, atomic && page == last_page, &submitted, wbc); if (ret) { unlock_page(page); f2fs_put_page(last_page, 0); break; } else if (submitted) { last_idx = page->index; } if (page == last_page) { f2fs_put_page(page, 0); marked = true; break; } } pagevec_release(&pvec); cond_resched(); if (ret || marked) break; } if (!ret && atomic && !marked) { f2fs_msg(sbi->sb, KERN_DEBUG, "Retry to write fsync mark: ino=%u, idx=%lx", ino, last_page->index); lock_page(last_page); f2fs_wait_on_page_writeback(last_page, NODE, true); set_page_dirty(last_page); unlock_page(last_page); goto retry; } out: if (last_idx != ULONG_MAX) f2fs_submit_merged_bio_cond(sbi, NULL, ino, last_idx, NODE, WRITE); return ret ? -EIO: 0; } int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc) { pgoff_t index, end; struct pagevec pvec; int step = 0; int nwritten = 0; int ret = 0; pagevec_init(&pvec, 0); next_step: index = 0; end = ULONG_MAX; while (index <= end) { int i, nr_pages; nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; bool submitted = false; if (unlikely(f2fs_cp_error(sbi))) { pagevec_release(&pvec); ret = -EIO; goto out; } /* * flushing sequence with step: * 0. indirect nodes * 1. dentry dnodes * 2. file dnodes */ if (step == 0 && IS_DNODE(page)) continue; if (step == 1 && (!IS_DNODE(page) || is_cold_node(page))) continue; if (step == 2 && (!IS_DNODE(page) || !is_cold_node(page))) continue; lock_node: if (!trylock_page(page)) continue; if (unlikely(page->mapping != NODE_MAPPING(sbi))) { continue_unlock: unlock_page(page); continue; } if (!PageDirty(page)) { /* someone wrote it for us */ goto continue_unlock; } /* flush inline_data */ if (is_inline_node(page)) { clear_inline_node(page); unlock_page(page); flush_inline_data(sbi, ino_of_node(page)); goto lock_node; } f2fs_wait_on_page_writeback(page, NODE, true); BUG_ON(PageWriteback(page)); if (!clear_page_dirty_for_io(page)) goto continue_unlock; set_fsync_mark(page, 0); set_dentry_mark(page, 0); ret = __write_node_page(page, false, &submitted, wbc); if (ret) unlock_page(page); else if (submitted) nwritten++; if (--wbc->nr_to_write == 0) break; } pagevec_release(&pvec); cond_resched(); if (wbc->nr_to_write == 0) { step = 2; break; } } if (step < 2) { step++; goto next_step; } out: if (nwritten) f2fs_submit_merged_bio(sbi, NODE, WRITE); return ret; } int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino) { pgoff_t index = 0, end = ULONG_MAX; struct pagevec pvec; int ret2, ret = 0; pagevec_init(&pvec, 0); while (index <= end) { int i, nr_pages; nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, PAGECACHE_TAG_WRITEBACK, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; /* until radix tree lookup accepts end_index */ if (unlikely(page->index > end)) continue; if (ino && ino_of_node(page) == ino) { f2fs_wait_on_page_writeback(page, NODE, true); if (TestClearPageError(page)) ret = -EIO; } } pagevec_release(&pvec); cond_resched(); } ret2 = filemap_check_errors(NODE_MAPPING(sbi)); if (!ret) ret = ret2; return ret; } static int f2fs_write_node_pages(struct address_space *mapping, struct writeback_control *wbc) { struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); struct blk_plug plug; long diff; /* balancing f2fs's metadata in background */ f2fs_balance_fs_bg(sbi); /* collect a number of dirty node pages and write together */ if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE)) goto skip_write; trace_f2fs_writepages(mapping->host, wbc, NODE); diff = nr_pages_to_write(sbi, NODE, wbc); wbc->sync_mode = WB_SYNC_NONE; blk_start_plug(&plug); sync_node_pages(sbi, wbc); blk_finish_plug(&plug); wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); return 0; skip_write: wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); trace_f2fs_writepages(mapping->host, wbc, NODE); return 0; } static int f2fs_set_node_page_dirty(struct page *page) { trace_f2fs_set_page_dirty(page, NODE); if (!PageUptodate(page)) SetPageUptodate(page); if (!PageDirty(page)) { f2fs_set_page_dirty_nobuffers(page); inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); SetPagePrivate(page); f2fs_trace_pid(page); return 1; } return 0; } /* * Structure of the f2fs node operations */ const struct address_space_operations f2fs_node_aops = { .writepage = f2fs_write_node_page, .writepages = f2fs_write_node_pages, .set_page_dirty = f2fs_set_node_page_dirty, .invalidatepage = f2fs_invalidate_page, .releasepage = f2fs_release_page, #ifdef CONFIG_MIGRATION .migratepage = f2fs_migrate_page, #endif }; static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, nid_t n) { return radix_tree_lookup(&nm_i->free_nid_root, n); } static int __insert_nid_to_list(struct f2fs_sb_info *sbi, struct free_nid *i, enum nid_list list, bool new) { struct f2fs_nm_info *nm_i = NM_I(sbi); if (new) { int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); if (err) return err; } f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW : i->state != NID_ALLOC); nm_i->nid_cnt[list]++; list_add_tail(&i->list, &nm_i->nid_list[list]); return 0; } static void __remove_nid_from_list(struct f2fs_sb_info *sbi, struct free_nid *i, enum nid_list list, bool reuse) { struct f2fs_nm_info *nm_i = NM_I(sbi); f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW : i->state != NID_ALLOC); nm_i->nid_cnt[list]--; list_del(&i->list); if (!reuse) radix_tree_delete(&nm_i->free_nid_root, i->nid); } /* return if the nid is recognized as free */ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i, *e; struct nat_entry *ne; int err = -EINVAL; bool ret = false; /* 0 nid should not be used */ if (unlikely(nid == 0)) return false; i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); i->nid = nid; i->state = NID_NEW; if (radix_tree_preload(GFP_NOFS)) goto err; spin_lock(&nm_i->nid_list_lock); if (build) { /* * Thread A Thread B * - f2fs_create * - f2fs_new_inode * - alloc_nid * - __insert_nid_to_list(ALLOC_NID_LIST) * - f2fs_balance_fs_bg * - build_free_nids * - __build_free_nids * - scan_nat_page * - add_free_nid * - __lookup_nat_cache * - f2fs_add_link * - init_inode_metadata * - new_inode_page * - new_node_page * - set_node_addr * - alloc_nid_done * - __remove_nid_from_list(ALLOC_NID_LIST) * - __insert_nid_to_list(FREE_NID_LIST) */ ne = __lookup_nat_cache(nm_i, nid); if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || nat_get_blkaddr(ne) != NULL_ADDR)) goto err_out; e = __lookup_free_nid_list(nm_i, nid); if (e) { if (e->state == NID_NEW) ret = true; goto err_out; } } ret = true; err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true); err_out: spin_unlock(&nm_i->nid_list_lock); radix_tree_preload_end(); err: if (err) kmem_cache_free(free_nid_slab, i); return ret; } static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i; bool need_free = false; spin_lock(&nm_i->nid_list_lock); i = __lookup_free_nid_list(nm_i, nid); if (i && i->state == NID_NEW) { __remove_nid_from_list(sbi, i, FREE_NID_LIST, false); need_free = true; } spin_unlock(&nm_i->nid_list_lock); if (need_free) kmem_cache_free(free_nid_slab, i); } static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set, bool build) { struct f2fs_nm_info *nm_i = NM_I(sbi); unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); unsigned int nid_ofs = nid - START_NID(nid); if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) return; if (set) __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); else __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); if (set) nm_i->free_nid_count[nat_ofs]++; else if (!build) nm_i->free_nid_count[nat_ofs]--; } static void scan_nat_page(struct f2fs_sb_info *sbi, struct page *nat_page, nid_t start_nid) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct f2fs_nat_block *nat_blk = page_address(nat_page); block_t blk_addr; unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); int i; if (test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) return; __set_bit_le(nat_ofs, nm_i->nat_block_bitmap); i = start_nid % NAT_ENTRY_PER_BLOCK; for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { bool freed = false; if (unlikely(start_nid >= nm_i->max_nid)) break; blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); f2fs_bug_on(sbi, blk_addr == NEW_ADDR); if (blk_addr == NULL_ADDR) freed = add_free_nid(sbi, start_nid, true); spin_lock(&NM_I(sbi)->nid_list_lock); update_free_nid_bitmap(sbi, start_nid, freed, true); spin_unlock(&NM_I(sbi)->nid_list_lock); } } static void scan_free_nid_bits(struct f2fs_sb_info *sbi) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct f2fs_journal *journal = curseg->journal; unsigned int i, idx; down_read(&nm_i->nat_tree_lock); for (i = 0; i < nm_i->nat_blocks; i++) { if (!test_bit_le(i, nm_i->nat_block_bitmap)) continue; if (!nm_i->free_nid_count[i]) continue; for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { nid_t nid; if (!test_bit_le(idx, nm_i->free_nid_bitmap[i])) continue; nid = i * NAT_ENTRY_PER_BLOCK + idx; add_free_nid(sbi, nid, true); if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS) goto out; } } out: down_read(&curseg->journal_rwsem); for (i = 0; i < nats_in_cursum(journal); i++) { block_t addr; nid_t nid; addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); nid = le32_to_cpu(nid_in_journal(journal, i)); if (addr == NULL_ADDR) add_free_nid(sbi, nid, true); else remove_free_nid(sbi, nid); } up_read(&curseg->journal_rwsem); up_read(&nm_i->nat_tree_lock); } static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct f2fs_journal *journal = curseg->journal; int i = 0; nid_t nid = nm_i->next_scan_nid; /* Enough entries */ if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK) return; if (!sync && !available_free_memory(sbi, FREE_NIDS)) return; if (!mount) { /* try to find free nids in free_nid_bitmap */ scan_free_nid_bits(sbi); if (nm_i->nid_cnt[FREE_NID_LIST]) return; } /* readahead nat pages to be scanned */ ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT, true); down_read(&nm_i->nat_tree_lock); while (1) { struct page *page = get_current_nat_page(sbi, nid); scan_nat_page(sbi, page, nid); f2fs_put_page(page, 1); nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); if (unlikely(nid >= nm_i->max_nid)) nid = 0; if (++i >= FREE_NID_PAGES) break; } /* go to the next free nat pages to find free nids abundantly */ nm_i->next_scan_nid = nid; /* find free nids from current sum_pages */ down_read(&curseg->journal_rwsem); for (i = 0; i < nats_in_cursum(journal); i++) { block_t addr; addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); nid = le32_to_cpu(nid_in_journal(journal, i)); if (addr == NULL_ADDR) add_free_nid(sbi, nid, true); else remove_free_nid(sbi, nid); } up_read(&curseg->journal_rwsem); up_read(&nm_i->nat_tree_lock); ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), nm_i->ra_nid_pages, META_NAT, false); } void build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) { mutex_lock(&NM_I(sbi)->build_lock); __build_free_nids(sbi, sync, mount); mutex_unlock(&NM_I(sbi)->build_lock); } /* * If this function returns success, caller can obtain a new nid * from second parameter of this function. * The returned nid could be used ino as well as nid when inode is created. */ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i = NULL; retry: #ifdef CONFIG_F2FS_FAULT_INJECTION if (time_to_inject(sbi, FAULT_ALLOC_NID)) { f2fs_show_injection_info(FAULT_ALLOC_NID); return false; } #endif spin_lock(&nm_i->nid_list_lock); if (unlikely(nm_i->available_nids == 0)) { spin_unlock(&nm_i->nid_list_lock); return false; } /* We should not use stale free nids created by build_free_nids */ if (nm_i->nid_cnt[FREE_NID_LIST] && !on_build_free_nids(nm_i)) { f2fs_bug_on(sbi, list_empty(&nm_i->nid_list[FREE_NID_LIST])); i = list_first_entry(&nm_i->nid_list[FREE_NID_LIST], struct free_nid, list); *nid = i->nid; __remove_nid_from_list(sbi, i, FREE_NID_LIST, true); i->state = NID_ALLOC; __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false); nm_i->available_nids--; update_free_nid_bitmap(sbi, *nid, false, false); spin_unlock(&nm_i->nid_list_lock); return true; } spin_unlock(&nm_i->nid_list_lock); /* Let's scan nat pages and its caches to get free nids */ build_free_nids(sbi, true, false); goto retry; } /* * alloc_nid() should be called prior to this function. */ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i; spin_lock(&nm_i->nid_list_lock); i = __lookup_free_nid_list(nm_i, nid); f2fs_bug_on(sbi, !i); __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false); spin_unlock(&nm_i->nid_list_lock); kmem_cache_free(free_nid_slab, i); } /* * alloc_nid() should be called prior to this function. */ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i; bool need_free = false; if (!nid) return; spin_lock(&nm_i->nid_list_lock); i = __lookup_free_nid_list(nm_i, nid); f2fs_bug_on(sbi, !i); if (!available_free_memory(sbi, FREE_NIDS)) { __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false); need_free = true; } else { __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, true); i->state = NID_NEW; __insert_nid_to_list(sbi, i, FREE_NID_LIST, false); } nm_i->available_nids++; update_free_nid_bitmap(sbi, nid, true, false); spin_unlock(&nm_i->nid_list_lock); if (need_free) kmem_cache_free(free_nid_slab, i); } int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i, *next; int nr = nr_shrink; if (nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS) return 0; if (!mutex_trylock(&nm_i->build_lock)) return 0; spin_lock(&nm_i->nid_list_lock); list_for_each_entry_safe(i, next, &nm_i->nid_list[FREE_NID_LIST], list) { if (nr_shrink <= 0 || nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS) break; __remove_nid_from_list(sbi, i, FREE_NID_LIST, false); kmem_cache_free(free_nid_slab, i); nr_shrink--; } spin_unlock(&nm_i->nid_list_lock); mutex_unlock(&nm_i->build_lock); return nr - nr_shrink; } void recover_inline_xattr(struct inode *inode, struct page *page) { void *src_addr, *dst_addr; size_t inline_size; struct page *ipage; struct f2fs_inode *ri; ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage)); ri = F2FS_INODE(page); if (!(ri->i_inline & F2FS_INLINE_XATTR)) { clear_inode_flag(inode, FI_INLINE_XATTR); goto update_inode; } dst_addr = inline_xattr_addr(ipage); src_addr = inline_xattr_addr(page); inline_size = inline_xattr_size(inode); f2fs_wait_on_page_writeback(ipage, NODE, true); memcpy(dst_addr, src_addr, inline_size); update_inode: update_inode(inode, ipage); f2fs_put_page(ipage, 1); } int recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; nid_t new_xnid = nid_of_node(page); struct node_info ni; struct page *xpage; if (!prev_xnid) goto recover_xnid; /* 1: invalidate the previous xattr nid */ get_node_info(sbi, prev_xnid, &ni); f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR); invalidate_blocks(sbi, ni.blk_addr); dec_valid_node_count(sbi, inode); set_node_addr(sbi, &ni, NULL_ADDR, false); recover_xnid: /* 2: update xattr nid in inode */ remove_free_nid(sbi, new_xnid); f2fs_i_xnid_write(inode, new_xnid); if (unlikely(!inc_valid_node_count(sbi, inode))) f2fs_bug_on(sbi, 1); update_inode_page(inode); /* 3: update and set xattr node page dirty */ xpage = grab_cache_page(NODE_MAPPING(sbi), new_xnid); if (!xpage) return -ENOMEM; memcpy(F2FS_NODE(xpage), F2FS_NODE(page), PAGE_SIZE); get_node_info(sbi, new_xnid, &ni); ni.ino = inode->i_ino; set_node_addr(sbi, &ni, NEW_ADDR, false); set_page_dirty(xpage); f2fs_put_page(xpage, 1); return 0; } int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) { struct f2fs_inode *src, *dst; nid_t ino = ino_of_node(page); struct node_info old_ni, new_ni; struct page *ipage; get_node_info(sbi, ino, &old_ni); if (unlikely(old_ni.blk_addr != NULL_ADDR)) return -EINVAL; retry: ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false); if (!ipage) { congestion_wait(BLK_RW_ASYNC, HZ/50); goto retry; } /* Should not use this inode from free nid list */ remove_free_nid(sbi, ino); if (!PageUptodate(ipage)) SetPageUptodate(ipage); fill_node_footer(ipage, ino, ino, 0, true); src = F2FS_INODE(page); dst = F2FS_INODE(ipage); memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src); dst->i_size = 0; dst->i_blocks = cpu_to_le64(1); dst->i_links = cpu_to_le32(1); dst->i_xattr_nid = 0; dst->i_inline = src->i_inline & F2FS_INLINE_XATTR; new_ni = old_ni; new_ni.ino = ino; if (unlikely(!inc_valid_node_count(sbi, NULL))) WARN_ON(1); set_node_addr(sbi, &new_ni, NEW_ADDR, false); inc_valid_inode_count(sbi); set_page_dirty(ipage); f2fs_put_page(ipage, 1); return 0; } int restore_node_summary(struct f2fs_sb_info *sbi, unsigned int segno, struct f2fs_summary_block *sum) { struct f2fs_node *rn; struct f2fs_summary *sum_entry; block_t addr; int i, idx, last_offset, nrpages; /* scan the node segment */ last_offset = sbi->blocks_per_seg; addr = START_BLOCK(sbi, segno); sum_entry = &sum->entries[0]; for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { nrpages = min(last_offset - i, BIO_MAX_PAGES); /* readahead node pages */ ra_meta_pages(sbi, addr, nrpages, META_POR, true); for (idx = addr; idx < addr + nrpages; idx++) { struct page *page = get_tmp_page(sbi, idx); rn = F2FS_NODE(page); sum_entry->nid = rn->footer.nid; sum_entry->version = 0; sum_entry->ofs_in_node = 0; sum_entry++; f2fs_put_page(page, 1); } invalidate_mapping_pages(META_MAPPING(sbi), addr, addr + nrpages); } return 0; } static void remove_nats_in_journal(struct f2fs_sb_info *sbi) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct f2fs_journal *journal = curseg->journal; int i; down_write(&curseg->journal_rwsem); for (i = 0; i < nats_in_cursum(journal); i++) { struct nat_entry *ne; struct f2fs_nat_entry raw_ne; nid_t nid = le32_to_cpu(nid_in_journal(journal, i)); raw_ne = nat_in_journal(journal, i); ne = __lookup_nat_cache(nm_i, nid); if (!ne) { ne = grab_nat_entry(nm_i, nid, true); node_info_from_raw_nat(&ne->ni, &raw_ne); } /* * if a free nat in journal has not been used after last * checkpoint, we should remove it from available nids, * since later we will add it again. */ if (!get_nat_flag(ne, IS_DIRTY) && le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) { spin_lock(&nm_i->nid_list_lock); nm_i->available_nids--; spin_unlock(&nm_i->nid_list_lock); } __set_nat_cache_dirty(nm_i, ne); } update_nats_in_cursum(journal, -i); up_write(&curseg->journal_rwsem); } static void __adjust_nat_entry_set(struct nat_entry_set *nes, struct list_head *head, int max) { struct nat_entry_set *cur; if (nes->entry_cnt >= max) goto add_out; list_for_each_entry(cur, head, set_list) { if (cur->entry_cnt >= nes->entry_cnt) { list_add(&nes->set_list, cur->set_list.prev); return; } } add_out: list_add_tail(&nes->set_list, head); } static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, struct page *page) { struct f2fs_nm_info *nm_i = NM_I(sbi); unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK; struct f2fs_nat_block *nat_blk = page_address(page); int valid = 0; int i; if (!enabled_nat_bits(sbi, NULL)) return; for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) { if (start_nid == 0 && i == 0) valid++; if (nat_blk->entries[i].block_addr) valid++; } if (valid == 0) { __set_bit_le(nat_index, nm_i->empty_nat_bits); __clear_bit_le(nat_index, nm_i->full_nat_bits); return; } __clear_bit_le(nat_index, nm_i->empty_nat_bits); if (valid == NAT_ENTRY_PER_BLOCK) __set_bit_le(nat_index, nm_i->full_nat_bits); else __clear_bit_le(nat_index, nm_i->full_nat_bits); } static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, struct nat_entry_set *set, struct cp_control *cpc) { struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct f2fs_journal *journal = curseg->journal; nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK; bool to_journal = true; struct f2fs_nat_block *nat_blk; struct nat_entry *ne, *cur; struct page *page = NULL; /* * there are two steps to flush nat entries: * #1, flush nat entries to journal in current hot data summary block. * #2, flush nat entries to nat page. */ if (enabled_nat_bits(sbi, cpc) || !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL)) to_journal = false; if (to_journal) { down_write(&curseg->journal_rwsem); } else { page = get_next_nat_page(sbi, start_nid); nat_blk = page_address(page); f2fs_bug_on(sbi, !nat_blk); } /* flush dirty nats in nat entry set */ list_for_each_entry_safe(ne, cur, &set->entry_list, list) { struct f2fs_nat_entry *raw_ne; nid_t nid = nat_get_nid(ne); int offset; if (nat_get_blkaddr(ne) == NEW_ADDR) continue; if (to_journal) { offset = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 1); f2fs_bug_on(sbi, offset < 0); raw_ne = &nat_in_journal(journal, offset); nid_in_journal(journal, offset) = cpu_to_le32(nid); } else { raw_ne = &nat_blk->entries[nid - start_nid]; } raw_nat_from_node_info(raw_ne, &ne->ni); nat_reset_flag(ne); __clear_nat_cache_dirty(NM_I(sbi), set, ne); if (nat_get_blkaddr(ne) == NULL_ADDR) { add_free_nid(sbi, nid, false); spin_lock(&NM_I(sbi)->nid_list_lock); NM_I(sbi)->available_nids++; update_free_nid_bitmap(sbi, nid, true, false); spin_unlock(&NM_I(sbi)->nid_list_lock); } else { spin_lock(&NM_I(sbi)->nid_list_lock); update_free_nid_bitmap(sbi, nid, false, false); spin_unlock(&NM_I(sbi)->nid_list_lock); } } if (to_journal) { up_write(&curseg->journal_rwsem); } else { __update_nat_bits(sbi, start_nid, page); f2fs_put_page(page, 1); } f2fs_bug_on(sbi, set->entry_cnt); radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); kmem_cache_free(nat_entry_set_slab, set); } /* * This function is called during the checkpointing process. */ void flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct f2fs_journal *journal = curseg->journal; struct nat_entry_set *setvec[SETVEC_SIZE]; struct nat_entry_set *set, *tmp; unsigned int found; nid_t set_idx = 0; LIST_HEAD(sets); if (!nm_i->dirty_nat_cnt) return; down_write(&nm_i->nat_tree_lock); /* * if there are no enough space in journal to store dirty nat * entries, remove all entries from journal and merge them * into nat entry set. */ if (enabled_nat_bits(sbi, cpc) || !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL)) remove_nats_in_journal(sbi); while ((found = __gang_lookup_nat_set(nm_i, set_idx, SETVEC_SIZE, setvec))) { unsigned idx; set_idx = setvec[found - 1]->set + 1; for (idx = 0; idx < found; idx++) __adjust_nat_entry_set(setvec[idx], &sets, MAX_NAT_JENTRIES(journal)); } /* flush dirty nats in nat entry set */ list_for_each_entry_safe(set, tmp, &sets, set_list) __flush_nat_entry_set(sbi, set, cpc); up_write(&nm_i->nat_tree_lock); f2fs_bug_on(sbi, nm_i->dirty_nat_cnt); } static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi); unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE; unsigned int i; __u64 cp_ver = cur_cp_version(ckpt); block_t nat_bits_addr; if (!enabled_nat_bits(sbi, NULL)) return 0; nm_i->nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 + F2FS_BLKSIZE - 1); nm_i->nat_bits = kzalloc(nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL); if (!nm_i->nat_bits) return -ENOMEM; nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg - nm_i->nat_bits_blocks; for (i = 0; i < nm_i->nat_bits_blocks; i++) { struct page *page = get_meta_page(sbi, nat_bits_addr++); memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS), page_address(page), F2FS_BLKSIZE); f2fs_put_page(page, 1); } cp_ver |= (cur_cp_crc(ckpt) << 32); if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) { disable_nat_bits(sbi, true); return 0; } nm_i->full_nat_bits = nm_i->nat_bits + 8; nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; f2fs_msg(sbi->sb, KERN_NOTICE, "Found nat_bits in checkpoint"); return 0; } inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) { struct f2fs_nm_info *nm_i = NM_I(sbi); unsigned int i = 0; nid_t nid, last_nid; if (!enabled_nat_bits(sbi, NULL)) return; for (i = 0; i < nm_i->nat_blocks; i++) { i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); if (i >= nm_i->nat_blocks) break; __set_bit_le(i, nm_i->nat_block_bitmap); nid = i * NAT_ENTRY_PER_BLOCK; last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK; spin_lock(&NM_I(sbi)->nid_list_lock); for (; nid < last_nid; nid++) update_free_nid_bitmap(sbi, nid, true, true); spin_unlock(&NM_I(sbi)->nid_list_lock); } for (i = 0; i < nm_i->nat_blocks; i++) { i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); if (i >= nm_i->nat_blocks) break; __set_bit_le(i, nm_i->nat_block_bitmap); } } static int init_node_manager(struct f2fs_sb_info *sbi) { struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi); unsigned char *version_bitmap; unsigned int nat_segs; int err; nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); /* segment_count_nat includes pair segment so divide to 2. */ nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks; /* not used nids: 0, node, meta, (and root counted as valid node) */ nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - F2FS_RESERVED_NODE_NUM; nm_i->nid_cnt[FREE_NID_LIST] = 0; nm_i->nid_cnt[ALLOC_NID_LIST] = 0; nm_i->nat_cnt = 0; nm_i->ram_thresh = DEF_RAM_THRESHOLD; nm_i->ra_nid_pages = DEF_RA_NID_PAGES; nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); INIT_LIST_HEAD(&nm_i->nid_list[FREE_NID_LIST]); INIT_LIST_HEAD(&nm_i->nid_list[ALLOC_NID_LIST]); INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); INIT_LIST_HEAD(&nm_i->nat_entries); mutex_init(&nm_i->build_lock); spin_lock_init(&nm_i->nid_list_lock); init_rwsem(&nm_i->nat_tree_lock); nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); if (!version_bitmap) return -EFAULT; nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, GFP_KERNEL); if (!nm_i->nat_bitmap) return -ENOMEM; err = __get_nat_bitmaps(sbi); if (err) return err; #ifdef CONFIG_F2FS_CHECK_FS nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size, GFP_KERNEL); if (!nm_i->nat_bitmap_mir) return -ENOMEM; #endif return 0; } static int init_free_nid_cache(struct f2fs_sb_info *sbi) { struct f2fs_nm_info *nm_i = NM_I(sbi); nm_i->free_nid_bitmap = f2fs_kvzalloc(nm_i->nat_blocks * NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL); if (!nm_i->free_nid_bitmap) return -ENOMEM; nm_i->nat_block_bitmap = f2fs_kvzalloc(nm_i->nat_blocks / 8, GFP_KERNEL); if (!nm_i->nat_block_bitmap) return -ENOMEM; nm_i->free_nid_count = f2fs_kvzalloc(nm_i->nat_blocks * sizeof(unsigned short), GFP_KERNEL); if (!nm_i->free_nid_count) return -ENOMEM; return 0; } int build_node_manager(struct f2fs_sb_info *sbi) { int err; sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL); if (!sbi->nm_info) return -ENOMEM; err = init_node_manager(sbi); if (err) return err; err = init_free_nid_cache(sbi); if (err) return err; /* load free nid status from nat_bits table */ load_free_nid_bitmap(sbi); build_free_nids(sbi, true, true); return 0; } void destroy_node_manager(struct f2fs_sb_info *sbi) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i, *next_i; struct nat_entry *natvec[NATVEC_SIZE]; struct nat_entry_set *setvec[SETVEC_SIZE]; nid_t nid = 0; unsigned int found; if (!nm_i) return; /* destroy free nid list */ spin_lock(&nm_i->nid_list_lock); list_for_each_entry_safe(i, next_i, &nm_i->nid_list[FREE_NID_LIST], list) { __remove_nid_from_list(sbi, i, FREE_NID_LIST, false); spin_unlock(&nm_i->nid_list_lock); kmem_cache_free(free_nid_slab, i); spin_lock(&nm_i->nid_list_lock); } f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID_LIST]); f2fs_bug_on(sbi, nm_i->nid_cnt[ALLOC_NID_LIST]); f2fs_bug_on(sbi, !list_empty(&nm_i->nid_list[ALLOC_NID_LIST])); spin_unlock(&nm_i->nid_list_lock); /* destroy nat cache */ down_write(&nm_i->nat_tree_lock); while ((found = __gang_lookup_nat_cache(nm_i, nid, NATVEC_SIZE, natvec))) { unsigned idx; nid = nat_get_nid(natvec[found - 1]) + 1; for (idx = 0; idx < found; idx++) __del_from_nat_cache(nm_i, natvec[idx]); } f2fs_bug_on(sbi, nm_i->nat_cnt); /* destroy nat set cache */ nid = 0; while ((found = __gang_lookup_nat_set(nm_i, nid, SETVEC_SIZE, setvec))) { unsigned idx; nid = setvec[found - 1]->set + 1; for (idx = 0; idx < found; idx++) { /* entry_cnt is not zero, when cp_error was occurred */ f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); kmem_cache_free(nat_entry_set_slab, setvec[idx]); } } up_write(&nm_i->nat_tree_lock); kvfree(nm_i->nat_block_bitmap); kvfree(nm_i->free_nid_bitmap); kvfree(nm_i->free_nid_count); kfree(nm_i->nat_bitmap); kfree(nm_i->nat_bits); #ifdef CONFIG_F2FS_CHECK_FS kfree(nm_i->nat_bitmap_mir); #endif sbi->nm_info = NULL; kfree(nm_i); } int __init create_node_manager_caches(void) { nat_entry_slab = f2fs_kmem_cache_create("nat_entry", sizeof(struct nat_entry)); if (!nat_entry_slab) goto fail; free_nid_slab = f2fs_kmem_cache_create("free_nid", sizeof(struct free_nid)); if (!free_nid_slab) goto destroy_nat_entry; nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set", sizeof(struct nat_entry_set)); if (!nat_entry_set_slab) goto destroy_free_nid; return 0; destroy_free_nid: kmem_cache_destroy(free_nid_slab); destroy_nat_entry: kmem_cache_destroy(nat_entry_slab); fail: return -ENOMEM; } void destroy_node_manager_caches(void) { kmem_cache_destroy(nat_entry_set_slab); kmem_cache_destroy(free_nid_slab); kmem_cache_destroy(nat_entry_slab); }
./CrossVul/dataset_final_sorted/CWE-362/c/good_3026_0
crossvul-cpp_data_bad_3275_2
/* * linux/fs/namei.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* * Some corrections by tytso. */ /* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname * lookup logic. */ /* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture. */ #include <linux/init.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/pagemap.h> #include <linux/fsnotify.h> #include <linux/personality.h> #include <linux/security.h> #include <linux/ima.h> #include <linux/syscalls.h> #include <linux/mount.h> #include <linux/audit.h> #include <linux/capability.h> #include <linux/file.h> #include <linux/fcntl.h> #include <linux/device_cgroup.h> #include <linux/fs_struct.h> #include <linux/posix_acl.h> #include <linux/hash.h> #include <linux/bitops.h> #include <linux/init_task.h> #include <linux/uaccess.h> #include "internal.h" #include "mount.h" /* [Feb-1997 T. Schoebel-Theuer] * Fundamental changes in the pathname lookup mechanisms (namei) * were necessary because of omirr. The reason is that omirr needs * to know the _real_ pathname, not the user-supplied one, in case * of symlinks (and also when transname replacements occur). * * The new code replaces the old recursive symlink resolution with * an iterative one (in case of non-nested symlink chains). It does * this with calls to <fs>_follow_link(). * As a side effect, dir_namei(), _namei() and follow_link() are now * replaced with a single function lookup_dentry() that can handle all * the special cases of the former code. * * With the new dcache, the pathname is stored at each inode, at least as * long as the refcount of the inode is positive. As a side effect, the * size of the dcache depends on the inode cache and thus is dynamic. * * [29-Apr-1998 C. Scott Ananian] Updated above description of symlink * resolution to correspond with current state of the code. * * Note that the symlink resolution is not *completely* iterative. * There is still a significant amount of tail- and mid- recursion in * the algorithm. Also, note that <fs>_readlink() is not used in * lookup_dentry(): lookup_dentry() on the result of <fs>_readlink() * may return different results than <fs>_follow_link(). Many virtual * filesystems (including /proc) exhibit this behavior. */ /* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation: * New symlink semantics: when open() is called with flags O_CREAT | O_EXCL * and the name already exists in form of a symlink, try to create the new * name indicated by the symlink. The old code always complained that the * name already exists, due to not following the symlink even if its target * is nonexistent. The new semantics affects also mknod() and link() when * the name is a symlink pointing to a non-existent name. * * I don't know which semantics is the right one, since I have no access * to standards. But I found by trial that HP-UX 9.0 has the full "new" * semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the * "old" one. Personally, I think the new semantics is much more logical. * Note that "ln old new" where "new" is a symlink pointing to a non-existing * file does succeed in both HP-UX and SunOs, but not in Solaris * and in the old Linux semantics. */ /* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink * semantics. See the comments in "open_namei" and "do_link" below. * * [10-Sep-98 Alan Modra] Another symlink change. */ /* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks: * inside the path - always follow. * in the last component in creation/removal/renaming - never follow. * if LOOKUP_FOLLOW passed - follow. * if the pathname has trailing slashes - follow. * otherwise - don't follow. * (applied in that order). * * [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT * restored for 2.4. This is the last surviving part of old 4.2BSD bug. * During the 2.4 we need to fix the userland stuff depending on it - * hopefully we will be able to get rid of that wart in 2.5. So far only * XEmacs seems to be relying on it... */ /* * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives * any extra contention... */ /* In order to reduce some races, while at the same time doing additional * checking and hopefully speeding things up, we copy filenames to the * kernel data space before using them.. * * POSIX.1 2.4: an empty pathname is invalid (ENOENT). * PATH_MAX includes the nul terminator --RR. */ #define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname)) struct filename * getname_flags(const char __user *filename, int flags, int *empty) { struct filename *result; char *kname; int len; result = audit_reusename(filename); if (result) return result; result = __getname(); if (unlikely(!result)) return ERR_PTR(-ENOMEM); /* * First, try to embed the struct filename inside the names_cache * allocation */ kname = (char *)result->iname; result->name = kname; len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX); if (unlikely(len < 0)) { __putname(result); return ERR_PTR(len); } /* * Uh-oh. We have a name that's approaching PATH_MAX. Allocate a * separate struct filename so we can dedicate the entire * names_cache allocation for the pathname, and re-do the copy from * userland. */ if (unlikely(len == EMBEDDED_NAME_MAX)) { const size_t size = offsetof(struct filename, iname[1]); kname = (char *)result; /* * size is chosen that way we to guarantee that * result->iname[0] is within the same object and that * kname can't be equal to result->iname, no matter what. */ result = kzalloc(size, GFP_KERNEL); if (unlikely(!result)) { __putname(kname); return ERR_PTR(-ENOMEM); } result->name = kname; len = strncpy_from_user(kname, filename, PATH_MAX); if (unlikely(len < 0)) { __putname(kname); kfree(result); return ERR_PTR(len); } if (unlikely(len == PATH_MAX)) { __putname(kname); kfree(result); return ERR_PTR(-ENAMETOOLONG); } } result->refcnt = 1; /* The empty path is special. */ if (unlikely(!len)) { if (empty) *empty = 1; if (!(flags & LOOKUP_EMPTY)) { putname(result); return ERR_PTR(-ENOENT); } } result->uptr = filename; result->aname = NULL; audit_getname(result); return result; } struct filename * getname(const char __user * filename) { return getname_flags(filename, 0, NULL); } struct filename * getname_kernel(const char * filename) { struct filename *result; int len = strlen(filename) + 1; result = __getname(); if (unlikely(!result)) return ERR_PTR(-ENOMEM); if (len <= EMBEDDED_NAME_MAX) { result->name = (char *)result->iname; } else if (len <= PATH_MAX) { struct filename *tmp; tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); if (unlikely(!tmp)) { __putname(result); return ERR_PTR(-ENOMEM); } tmp->name = (char *)result; result = tmp; } else { __putname(result); return ERR_PTR(-ENAMETOOLONG); } memcpy((char *)result->name, filename, len); result->uptr = NULL; result->aname = NULL; result->refcnt = 1; audit_getname(result); return result; } void putname(struct filename *name) { BUG_ON(name->refcnt <= 0); if (--name->refcnt > 0) return; if (name->name != name->iname) { __putname(name->name); kfree(name); } else __putname(name); } static int check_acl(struct inode *inode, int mask) { #ifdef CONFIG_FS_POSIX_ACL struct posix_acl *acl; if (mask & MAY_NOT_BLOCK) { acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS); if (!acl) return -EAGAIN; /* no ->get_acl() calls in RCU mode... */ if (is_uncached_acl(acl)) return -ECHILD; return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK); } acl = get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl) { int error = posix_acl_permission(inode, acl, mask); posix_acl_release(acl); return error; } #endif return -EAGAIN; } /* * This does the basic permission checking */ static int acl_permission_check(struct inode *inode, int mask) { unsigned int mode = inode->i_mode; if (likely(uid_eq(current_fsuid(), inode->i_uid))) mode >>= 6; else { if (IS_POSIXACL(inode) && (mode & S_IRWXG)) { int error = check_acl(inode, mask); if (error != -EAGAIN) return error; } if (in_group_p(inode->i_gid)) mode >>= 3; } /* * If the DACs are ok we don't need any capability check. */ if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) return 0; return -EACCES; } /** * generic_permission - check for access rights on a Posix-like filesystem * @inode: inode to check access rights for * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...) * * Used to check for read/write/execute permissions on a file. * We use "fsuid" for this, letting us set arbitrary permissions * for filesystem access without changing the "normal" uids which * are used for other things. * * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk * request cannot be satisfied (eg. requires blocking or too much complexity). * It would then be called again in ref-walk mode. */ int generic_permission(struct inode *inode, int mask) { int ret; /* * Do the basic permission checks. */ ret = acl_permission_check(inode, mask); if (ret != -EACCES) return ret; if (S_ISDIR(inode->i_mode)) { /* DACs are overridable for directories */ if (!(mask & MAY_WRITE)) if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH)) return 0; if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) return 0; return -EACCES; } /* * Searching includes executable on directories, else just read. */ mask &= MAY_READ | MAY_WRITE | MAY_EXEC; if (mask == MAY_READ) if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH)) return 0; /* * Read/write DACs are always overridable. * Executable DACs are overridable when there is * at least one exec bit set. */ if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO)) if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) return 0; return -EACCES; } EXPORT_SYMBOL(generic_permission); /* * We _really_ want to just do "generic_permission()" without * even looking at the inode->i_op values. So we keep a cache * flag in inode->i_opflags, that says "this has not special * permission function, use the fast case". */ static inline int do_inode_permission(struct inode *inode, int mask) { if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) { if (likely(inode->i_op->permission)) return inode->i_op->permission(inode, mask); /* This gets set once for the inode lifetime */ spin_lock(&inode->i_lock); inode->i_opflags |= IOP_FASTPERM; spin_unlock(&inode->i_lock); } return generic_permission(inode, mask); } /** * __inode_permission - Check for access rights to a given inode * @inode: Inode to check permission on * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Check for read/write/execute permissions on an inode. * * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask. * * This does not check for a read-only file system. You probably want * inode_permission(). */ int __inode_permission(struct inode *inode, int mask) { int retval; if (unlikely(mask & MAY_WRITE)) { /* * Nobody gets write access to an immutable file. */ if (IS_IMMUTABLE(inode)) return -EPERM; /* * Updating mtime will likely cause i_uid and i_gid to be * written back improperly if their true value is unknown * to the vfs. */ if (HAS_UNMAPPED_ID(inode)) return -EACCES; } retval = do_inode_permission(inode, mask); if (retval) return retval; retval = devcgroup_inode_permission(inode, mask); if (retval) return retval; return security_inode_permission(inode, mask); } EXPORT_SYMBOL(__inode_permission); /** * sb_permission - Check superblock-level permissions * @sb: Superblock of inode to check permission on * @inode: Inode to check permission on * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Separate out file-system wide checks from inode-specific permission checks. */ static int sb_permission(struct super_block *sb, struct inode *inode, int mask) { if (unlikely(mask & MAY_WRITE)) { umode_t mode = inode->i_mode; /* Nobody gets write access to a read-only fs. */ if ((sb->s_flags & MS_RDONLY) && (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) return -EROFS; } return 0; } /** * inode_permission - Check for access rights to a given inode * @inode: Inode to check permission on * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Check for read/write/execute permissions on an inode. We use fs[ug]id for * this, letting us set arbitrary permissions for filesystem access without * changing the "normal" UIDs which are used for other things. * * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask. */ int inode_permission(struct inode *inode, int mask) { int retval; retval = sb_permission(inode->i_sb, inode, mask); if (retval) return retval; return __inode_permission(inode, mask); } EXPORT_SYMBOL(inode_permission); /** * path_get - get a reference to a path * @path: path to get the reference to * * Given a path increment the reference count to the dentry and the vfsmount. */ void path_get(const struct path *path) { mntget(path->mnt); dget(path->dentry); } EXPORT_SYMBOL(path_get); /** * path_put - put a reference to a path * @path: path to put the reference to * * Given a path decrement the reference count to the dentry and the vfsmount. */ void path_put(const struct path *path) { dput(path->dentry); mntput(path->mnt); } EXPORT_SYMBOL(path_put); #define EMBEDDED_LEVELS 2 struct nameidata { struct path path; struct qstr last; struct path root; struct inode *inode; /* path.dentry.d_inode */ unsigned int flags; unsigned seq, m_seq; int last_type; unsigned depth; int total_link_count; struct saved { struct path link; struct delayed_call done; const char *name; unsigned seq; } *stack, internal[EMBEDDED_LEVELS]; struct filename *name; struct nameidata *saved; struct inode *link_inode; unsigned root_seq; int dfd; }; static void set_nameidata(struct nameidata *p, int dfd, struct filename *name) { struct nameidata *old = current->nameidata; p->stack = p->internal; p->dfd = dfd; p->name = name; p->total_link_count = old ? old->total_link_count : 0; p->saved = old; current->nameidata = p; } static void restore_nameidata(void) { struct nameidata *now = current->nameidata, *old = now->saved; current->nameidata = old; if (old) old->total_link_count = now->total_link_count; if (now->stack != now->internal) kfree(now->stack); } static int __nd_alloc_stack(struct nameidata *nd) { struct saved *p; if (nd->flags & LOOKUP_RCU) { p= kmalloc(MAXSYMLINKS * sizeof(struct saved), GFP_ATOMIC); if (unlikely(!p)) return -ECHILD; } else { p= kmalloc(MAXSYMLINKS * sizeof(struct saved), GFP_KERNEL); if (unlikely(!p)) return -ENOMEM; } memcpy(p, nd->internal, sizeof(nd->internal)); nd->stack = p; return 0; } /** * path_connected - Verify that a path->dentry is below path->mnt.mnt_root * @path: nameidate to verify * * Rename can sometimes move a file or directory outside of a bind * mount, path_connected allows those cases to be detected. */ static bool path_connected(const struct path *path) { struct vfsmount *mnt = path->mnt; /* Only bind mounts can have disconnected paths */ if (mnt->mnt_root == mnt->mnt_sb->s_root) return true; return is_subdir(path->dentry, mnt->mnt_root); } static inline int nd_alloc_stack(struct nameidata *nd) { if (likely(nd->depth != EMBEDDED_LEVELS)) return 0; if (likely(nd->stack != nd->internal)) return 0; return __nd_alloc_stack(nd); } static void drop_links(struct nameidata *nd) { int i = nd->depth; while (i--) { struct saved *last = nd->stack + i; do_delayed_call(&last->done); clear_delayed_call(&last->done); } } static void terminate_walk(struct nameidata *nd) { drop_links(nd); if (!(nd->flags & LOOKUP_RCU)) { int i; path_put(&nd->path); for (i = 0; i < nd->depth; i++) path_put(&nd->stack[i].link); if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { path_put(&nd->root); nd->root.mnt = NULL; } } else { nd->flags &= ~LOOKUP_RCU; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); } nd->depth = 0; } /* path_put is needed afterwards regardless of success or failure */ static bool legitimize_path(struct nameidata *nd, struct path *path, unsigned seq) { int res = __legitimize_mnt(path->mnt, nd->m_seq); if (unlikely(res)) { if (res > 0) path->mnt = NULL; path->dentry = NULL; return false; } if (unlikely(!lockref_get_not_dead(&path->dentry->d_lockref))) { path->dentry = NULL; return false; } return !read_seqcount_retry(&path->dentry->d_seq, seq); } static bool legitimize_links(struct nameidata *nd) { int i; for (i = 0; i < nd->depth; i++) { struct saved *last = nd->stack + i; if (unlikely(!legitimize_path(nd, &last->link, last->seq))) { drop_links(nd); nd->depth = i + 1; return false; } } return true; } /* * Path walking has 2 modes, rcu-walk and ref-walk (see * Documentation/filesystems/path-lookup.txt). In situations when we can't * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab * normal reference counts on dentries and vfsmounts to transition to ref-walk * mode. Refcounts are grabbed at the last known good point before rcu-walk * got stuck, so ref-walk may continue from there. If this is not successful * (eg. a seqcount has changed), then failure is returned and it's up to caller * to restart the path walk from the beginning in ref-walk mode. */ /** * unlazy_walk - try to switch to ref-walk mode. * @nd: nameidata pathwalk data * Returns: 0 on success, -ECHILD on failure * * unlazy_walk attempts to legitimize the current nd->path and nd->root * for ref-walk mode. * Must be called from rcu-walk context. * Nothing should touch nameidata between unlazy_walk() failure and * terminate_walk(). */ static int unlazy_walk(struct nameidata *nd) { struct dentry *parent = nd->path.dentry; BUG_ON(!(nd->flags & LOOKUP_RCU)); nd->flags &= ~LOOKUP_RCU; if (unlikely(!legitimize_links(nd))) goto out2; if (unlikely(!legitimize_path(nd, &nd->path, nd->seq))) goto out1; if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { if (unlikely(!legitimize_path(nd, &nd->root, nd->root_seq))) goto out; } rcu_read_unlock(); BUG_ON(nd->inode != parent->d_inode); return 0; out2: nd->path.mnt = NULL; nd->path.dentry = NULL; out1: if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; out: rcu_read_unlock(); return -ECHILD; } /** * unlazy_child - try to switch to ref-walk mode. * @nd: nameidata pathwalk data * @dentry: child of nd->path.dentry * @seq: seq number to check dentry against * Returns: 0 on success, -ECHILD on failure * * unlazy_child attempts to legitimize the current nd->path, nd->root and dentry * for ref-walk mode. @dentry must be a path found by a do_lookup call on * @nd. Must be called from rcu-walk context. * Nothing should touch nameidata between unlazy_child() failure and * terminate_walk(). */ static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned seq) { BUG_ON(!(nd->flags & LOOKUP_RCU)); nd->flags &= ~LOOKUP_RCU; if (unlikely(!legitimize_links(nd))) goto out2; if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq))) goto out2; if (unlikely(!lockref_get_not_dead(&nd->path.dentry->d_lockref))) goto out1; /* * We need to move both the parent and the dentry from the RCU domain * to be properly refcounted. And the sequence number in the dentry * validates *both* dentry counters, since we checked the sequence * number of the parent after we got the child sequence number. So we * know the parent must still be valid if the child sequence number is */ if (unlikely(!lockref_get_not_dead(&dentry->d_lockref))) goto out; if (unlikely(read_seqcount_retry(&dentry->d_seq, seq))) { rcu_read_unlock(); dput(dentry); goto drop_root_mnt; } /* * Sequence counts matched. Now make sure that the root is * still valid and get it if required. */ if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { if (unlikely(!legitimize_path(nd, &nd->root, nd->root_seq))) { rcu_read_unlock(); dput(dentry); return -ECHILD; } } rcu_read_unlock(); return 0; out2: nd->path.mnt = NULL; out1: nd->path.dentry = NULL; out: rcu_read_unlock(); drop_root_mnt: if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; return -ECHILD; } static inline int d_revalidate(struct dentry *dentry, unsigned int flags) { if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) return dentry->d_op->d_revalidate(dentry, flags); else return 1; } /** * complete_walk - successful completion of path walk * @nd: pointer nameidata * * If we had been in RCU mode, drop out of it and legitimize nd->path. * Revalidate the final result, unless we'd already done that during * the path walk or the filesystem doesn't ask for it. Return 0 on * success, -error on failure. In case of failure caller does not * need to drop nd->path. */ static int complete_walk(struct nameidata *nd) { struct dentry *dentry = nd->path.dentry; int status; if (nd->flags & LOOKUP_RCU) { if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; if (unlikely(unlazy_walk(nd))) return -ECHILD; } if (likely(!(nd->flags & LOOKUP_JUMPED))) return 0; if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE))) return 0; status = dentry->d_op->d_weak_revalidate(dentry, nd->flags); if (status > 0) return 0; if (!status) status = -ESTALE; return status; } static void set_root(struct nameidata *nd) { struct fs_struct *fs = current->fs; if (nd->flags & LOOKUP_RCU) { unsigned seq; do { seq = read_seqcount_begin(&fs->seq); nd->root = fs->root; nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } else { get_fs_root(fs, &nd->root); } } static void path_put_conditional(struct path *path, struct nameidata *nd) { dput(path->dentry); if (path->mnt != nd->path.mnt) mntput(path->mnt); } static inline void path_to_nameidata(const struct path *path, struct nameidata *nd) { if (!(nd->flags & LOOKUP_RCU)) { dput(nd->path.dentry); if (nd->path.mnt != path->mnt) mntput(nd->path.mnt); } nd->path.mnt = path->mnt; nd->path.dentry = path->dentry; } static int nd_jump_root(struct nameidata *nd) { if (nd->flags & LOOKUP_RCU) { struct dentry *d; nd->path = nd->root; d = nd->path.dentry; nd->inode = d->d_inode; nd->seq = nd->root_seq; if (unlikely(read_seqcount_retry(&d->d_seq, nd->seq))) return -ECHILD; } else { path_put(&nd->path); nd->path = nd->root; path_get(&nd->path); nd->inode = nd->path.dentry->d_inode; } nd->flags |= LOOKUP_JUMPED; return 0; } /* * Helper to directly jump to a known parsed path from ->get_link, * caller must have taken a reference to path beforehand. */ void nd_jump_link(struct path *path) { struct nameidata *nd = current->nameidata; path_put(&nd->path); nd->path = *path; nd->inode = nd->path.dentry->d_inode; nd->flags |= LOOKUP_JUMPED; } static inline void put_link(struct nameidata *nd) { struct saved *last = nd->stack + --nd->depth; do_delayed_call(&last->done); if (!(nd->flags & LOOKUP_RCU)) path_put(&last->link); } int sysctl_protected_symlinks __read_mostly = 0; int sysctl_protected_hardlinks __read_mostly = 0; /** * may_follow_link - Check symlink following for unsafe situations * @nd: nameidata pathwalk data * * In the case of the sysctl_protected_symlinks sysctl being enabled, * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is * in a sticky world-writable directory. This is to protect privileged * processes from failing races against path names that may change out * from under them by way of other users creating malicious symlinks. * It will permit symlinks to be followed only when outside a sticky * world-writable directory, or when the uid of the symlink and follower * match, or when the directory owner matches the symlink's owner. * * Returns 0 if following the symlink is allowed, -ve on error. */ static inline int may_follow_link(struct nameidata *nd) { const struct inode *inode; const struct inode *parent; kuid_t puid; if (!sysctl_protected_symlinks) return 0; /* Allowed if owner and follower match. */ inode = nd->link_inode; if (uid_eq(current_cred()->fsuid, inode->i_uid)) return 0; /* Allowed if parent directory not sticky and world-writable. */ parent = nd->inode; if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH)) return 0; /* Allowed if parent directory and link owner match. */ puid = parent->i_uid; if (uid_valid(puid) && uid_eq(puid, inode->i_uid)) return 0; if (nd->flags & LOOKUP_RCU) return -ECHILD; audit_log_link_denied("follow_link", &nd->stack[0].link); return -EACCES; } /** * safe_hardlink_source - Check for safe hardlink conditions * @inode: the source inode to hardlink from * * Return false if at least one of the following conditions: * - inode is not a regular file * - inode is setuid * - inode is setgid and group-exec * - access failure for read and write * * Otherwise returns true. */ static bool safe_hardlink_source(struct inode *inode) { umode_t mode = inode->i_mode; /* Special files should not get pinned to the filesystem. */ if (!S_ISREG(mode)) return false; /* Setuid files should not get pinned to the filesystem. */ if (mode & S_ISUID) return false; /* Executable setgid files should not get pinned to the filesystem. */ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) return false; /* Hardlinking to unreadable or unwritable sources is dangerous. */ if (inode_permission(inode, MAY_READ | MAY_WRITE)) return false; return true; } /** * may_linkat - Check permissions for creating a hardlink * @link: the source to hardlink from * * Block hardlink when all of: * - sysctl_protected_hardlinks enabled * - fsuid does not match inode * - hardlink source is unsafe (see safe_hardlink_source() above) * - not CAP_FOWNER in a namespace with the inode owner uid mapped * * Returns 0 if successful, -ve on error. */ static int may_linkat(struct path *link) { struct inode *inode; if (!sysctl_protected_hardlinks) return 0; inode = link->dentry->d_inode; /* Source inode owner (or CAP_FOWNER) can hardlink all they like, * otherwise, it must be a safe source. */ if (safe_hardlink_source(inode) || inode_owner_or_capable(inode)) return 0; audit_log_link_denied("linkat", link); return -EPERM; } static __always_inline const char *get_link(struct nameidata *nd) { struct saved *last = nd->stack + nd->depth - 1; struct dentry *dentry = last->link.dentry; struct inode *inode = nd->link_inode; int error; const char *res; if (!(nd->flags & LOOKUP_RCU)) { touch_atime(&last->link); cond_resched(); } else if (atime_needs_update_rcu(&last->link, inode)) { if (unlikely(unlazy_walk(nd))) return ERR_PTR(-ECHILD); touch_atime(&last->link); } error = security_inode_follow_link(dentry, inode, nd->flags & LOOKUP_RCU); if (unlikely(error)) return ERR_PTR(error); nd->last_type = LAST_BIND; res = inode->i_link; if (!res) { const char * (*get)(struct dentry *, struct inode *, struct delayed_call *); get = inode->i_op->get_link; if (nd->flags & LOOKUP_RCU) { res = get(NULL, inode, &last->done); if (res == ERR_PTR(-ECHILD)) { if (unlikely(unlazy_walk(nd))) return ERR_PTR(-ECHILD); res = get(dentry, inode, &last->done); } } else { res = get(dentry, inode, &last->done); } if (IS_ERR_OR_NULL(res)) return res; } if (*res == '/') { if (!nd->root.mnt) set_root(nd); if (unlikely(nd_jump_root(nd))) return ERR_PTR(-ECHILD); while (unlikely(*++res == '/')) ; } if (!*res) res = NULL; return res; } /* * follow_up - Find the mountpoint of path's vfsmount * * Given a path, find the mountpoint of its source file system. * Replace @path with the path of the mountpoint in the parent mount. * Up is towards /. * * Return 1 if we went up a level and 0 if we were already at the * root. */ int follow_up(struct path *path) { struct mount *mnt = real_mount(path->mnt); struct mount *parent; struct dentry *mountpoint; read_seqlock_excl(&mount_lock); parent = mnt->mnt_parent; if (parent == mnt) { read_sequnlock_excl(&mount_lock); return 0; } mntget(&parent->mnt); mountpoint = dget(mnt->mnt_mountpoint); read_sequnlock_excl(&mount_lock); dput(path->dentry); path->dentry = mountpoint; mntput(path->mnt); path->mnt = &parent->mnt; return 1; } EXPORT_SYMBOL(follow_up); /* * Perform an automount * - return -EISDIR to tell follow_managed() to stop and return the path we * were called with. */ static int follow_automount(struct path *path, struct nameidata *nd, bool *need_mntput) { struct vfsmount *mnt; int err; if (!path->dentry->d_op || !path->dentry->d_op->d_automount) return -EREMOTE; /* We don't want to mount if someone's just doing a stat - * unless they're stat'ing a directory and appended a '/' to * the name. * * We do, however, want to mount if someone wants to open or * create a file of any type under the mountpoint, wants to * traverse through the mountpoint or wants to open the * mounted directory. Also, autofs may mark negative dentries * as being automount points. These will need the attentions * of the daemon to instantiate them before they can be used. */ if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) && path->dentry->d_inode) return -EISDIR; if (path->dentry->d_sb->s_user_ns != &init_user_ns) return -EACCES; nd->total_link_count++; if (nd->total_link_count >= 40) return -ELOOP; mnt = path->dentry->d_op->d_automount(path); if (IS_ERR(mnt)) { /* * The filesystem is allowed to return -EISDIR here to indicate * it doesn't want to automount. For instance, autofs would do * this so that its userspace daemon can mount on this dentry. * * However, we can only permit this if it's a terminal point in * the path being looked up; if it wasn't then the remainder of * the path is inaccessible and we should say so. */ if (PTR_ERR(mnt) == -EISDIR && (nd->flags & LOOKUP_PARENT)) return -EREMOTE; return PTR_ERR(mnt); } if (!mnt) /* mount collision */ return 0; if (!*need_mntput) { /* lock_mount() may release path->mnt on error */ mntget(path->mnt); *need_mntput = true; } err = finish_automount(mnt, path); switch (err) { case -EBUSY: /* Someone else made a mount here whilst we were busy */ return 0; case 0: path_put(path); path->mnt = mnt; path->dentry = dget(mnt->mnt_root); return 0; default: return err; } } /* * Handle a dentry that is managed in some way. * - Flagged for transit management (autofs) * - Flagged as mountpoint * - Flagged as automount point * * This may only be called in refwalk mode. * * Serialization is taken care of in namespace.c */ static int follow_managed(struct path *path, struct nameidata *nd) { struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */ unsigned managed; bool need_mntput = false; int ret = 0; /* Given that we're not holding a lock here, we retain the value in a * local variable for each dentry as we look at it so that we don't see * the components of that value change under us */ while (managed = ACCESS_ONCE(path->dentry->d_flags), managed &= DCACHE_MANAGED_DENTRY, unlikely(managed != 0)) { /* Allow the filesystem to manage the transit without i_mutex * being held. */ if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage(path, false); if (ret < 0) break; } /* Transit to a mounted filesystem. */ if (managed & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); if (mounted) { dput(path->dentry); if (need_mntput) mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); need_mntput = true; continue; } /* Something is mounted on this dentry in another * namespace and/or whatever was mounted there in this * namespace got unmounted before lookup_mnt() could * get it */ } /* Handle an automount point */ if (managed & DCACHE_NEED_AUTOMOUNT) { ret = follow_automount(path, nd, &need_mntput); if (ret < 0) break; continue; } /* We didn't change the current path point */ break; } if (need_mntput && path->mnt == mnt) mntput(path->mnt); if (ret == -EISDIR || !ret) ret = 1; if (need_mntput) nd->flags |= LOOKUP_JUMPED; if (unlikely(ret < 0)) path_put_conditional(path, nd); return ret; } int follow_down_one(struct path *path) { struct vfsmount *mounted; mounted = lookup_mnt(path); if (mounted) { dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); return 1; } return 0; } EXPORT_SYMBOL(follow_down_one); static inline int managed_dentry_rcu(const struct path *path) { return (path->dentry->d_flags & DCACHE_MANAGE_TRANSIT) ? path->dentry->d_op->d_manage(path, true) : 0; } /* * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if * we meet a managed dentry that would need blocking. */ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, struct inode **inode, unsigned *seqp) { for (;;) { struct mount *mounted; /* * Don't forget we might have a non-mountpoint managed dentry * that wants to block transit. */ switch (managed_dentry_rcu(path)) { case -ECHILD: default: return false; case -EISDIR: return true; case 0: break; } if (!d_mountpoint(path->dentry)) return !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); mounted = __lookup_mnt(path->mnt, path->dentry); if (!mounted) break; path->mnt = &mounted->mnt; path->dentry = mounted->mnt.mnt_root; nd->flags |= LOOKUP_JUMPED; *seqp = read_seqcount_begin(&path->dentry->d_seq); /* * Update the inode too. We don't need to re-check the * dentry sequence number here after this d_inode read, * because a mount-point is always pinned. */ *inode = path->dentry->d_inode; } return !read_seqretry(&mount_lock, nd->m_seq) && !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); } static int follow_dotdot_rcu(struct nameidata *nd) { struct inode *inode = nd->inode; while (1) { if (path_equal(&nd->path, &nd->root)) break; if (nd->path.dentry != nd->path.mnt->mnt_root) { struct dentry *old = nd->path.dentry; struct dentry *parent = old->d_parent; unsigned seq; inode = parent->d_inode; seq = read_seqcount_begin(&parent->d_seq); if (unlikely(read_seqcount_retry(&old->d_seq, nd->seq))) return -ECHILD; nd->path.dentry = parent; nd->seq = seq; if (unlikely(!path_connected(&nd->path))) return -ENOENT; break; } else { struct mount *mnt = real_mount(nd->path.mnt); struct mount *mparent = mnt->mnt_parent; struct dentry *mountpoint = mnt->mnt_mountpoint; struct inode *inode2 = mountpoint->d_inode; unsigned seq = read_seqcount_begin(&mountpoint->d_seq); if (unlikely(read_seqretry(&mount_lock, nd->m_seq))) return -ECHILD; if (&mparent->mnt == nd->path.mnt) break; /* we know that mountpoint was pinned */ nd->path.dentry = mountpoint; nd->path.mnt = &mparent->mnt; inode = inode2; nd->seq = seq; } } while (unlikely(d_mountpoint(nd->path.dentry))) { struct mount *mounted; mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry); if (unlikely(read_seqretry(&mount_lock, nd->m_seq))) return -ECHILD; if (!mounted) break; nd->path.mnt = &mounted->mnt; nd->path.dentry = mounted->mnt.mnt_root; inode = nd->path.dentry->d_inode; nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); } nd->inode = inode; return 0; } /* * Follow down to the covering mount currently visible to userspace. At each * point, the filesystem owning that dentry may be queried as to whether the * caller is permitted to proceed or not. */ int follow_down(struct path *path) { unsigned managed; int ret; while (managed = ACCESS_ONCE(path->dentry->d_flags), unlikely(managed & DCACHE_MANAGED_DENTRY)) { /* Allow the filesystem to manage the transit without i_mutex * being held. * * We indicate to the filesystem if someone is trying to mount * something here. This gives autofs the chance to deny anyone * other than its daemon the right to mount on its * superstructure. * * The filesystem may sleep at this point. */ if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage(path, false); if (ret < 0) return ret == -EISDIR ? 0 : ret; } /* Transit to a mounted filesystem. */ if (managed & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); if (!mounted) break; dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); continue; } /* Don't handle automount points here */ break; } return 0; } EXPORT_SYMBOL(follow_down); /* * Skip to top of mountpoint pile in refwalk mode for follow_dotdot() */ static void follow_mount(struct path *path) { while (d_mountpoint(path->dentry)) { struct vfsmount *mounted = lookup_mnt(path); if (!mounted) break; dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); } } static int path_parent_directory(struct path *path) { struct dentry *old = path->dentry; /* rare case of legitimate dget_parent()... */ path->dentry = dget_parent(path->dentry); dput(old); if (unlikely(!path_connected(path))) return -ENOENT; return 0; } static int follow_dotdot(struct nameidata *nd) { while(1) { if (nd->path.dentry == nd->root.dentry && nd->path.mnt == nd->root.mnt) { break; } if (nd->path.dentry != nd->path.mnt->mnt_root) { int ret = path_parent_directory(&nd->path); if (ret) return ret; break; } if (!follow_up(&nd->path)) break; } follow_mount(&nd->path); nd->inode = nd->path.dentry->d_inode; return 0; } /* * This looks up the name in dcache and possibly revalidates the found dentry. * NULL is returned if the dentry does not exist in the cache. */ static struct dentry *lookup_dcache(const struct qstr *name, struct dentry *dir, unsigned int flags) { struct dentry *dentry = d_lookup(dir, name); if (dentry) { int error = d_revalidate(dentry, flags); if (unlikely(error <= 0)) { if (!error) d_invalidate(dentry); dput(dentry); return ERR_PTR(error); } } return dentry; } /* * Call i_op->lookup on the dentry. The dentry must be negative and * unhashed. * * dir->d_inode->i_mutex must be held */ static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct dentry *old; /* Don't create child dentry for a dead directory. */ if (unlikely(IS_DEADDIR(dir))) { dput(dentry); return ERR_PTR(-ENOENT); } old = dir->i_op->lookup(dir, dentry, flags); if (unlikely(old)) { dput(dentry); dentry = old; } return dentry; } static struct dentry *__lookup_hash(const struct qstr *name, struct dentry *base, unsigned int flags) { struct dentry *dentry = lookup_dcache(name, base, flags); if (dentry) return dentry; dentry = d_alloc(base, name); if (unlikely(!dentry)) return ERR_PTR(-ENOMEM); return lookup_real(base->d_inode, dentry, flags); } static int lookup_fast(struct nameidata *nd, struct path *path, struct inode **inode, unsigned *seqp) { struct vfsmount *mnt = nd->path.mnt; struct dentry *dentry, *parent = nd->path.dentry; int status = 1; int err; /* * Rename seqlock is not required here because in the off chance * of a false negative due to a concurrent rename, the caller is * going to fall back to non-racy lookup. */ if (nd->flags & LOOKUP_RCU) { unsigned seq; bool negative; dentry = __d_lookup_rcu(parent, &nd->last, &seq); if (unlikely(!dentry)) { if (unlazy_walk(nd)) return -ECHILD; return 0; } /* * This sequence count validates that the inode matches * the dentry name information from lookup. */ *inode = d_backing_inode(dentry); negative = d_is_negative(dentry); if (unlikely(read_seqcount_retry(&dentry->d_seq, seq))) return -ECHILD; /* * This sequence count validates that the parent had no * changes while we did the lookup of the dentry above. * * The memory barrier in read_seqcount_begin of child is * enough, we can use __read_seqcount_retry here. */ if (unlikely(__read_seqcount_retry(&parent->d_seq, nd->seq))) return -ECHILD; *seqp = seq; status = d_revalidate(dentry, nd->flags); if (likely(status > 0)) { /* * Note: do negative dentry check after revalidation in * case that drops it. */ if (unlikely(negative)) return -ENOENT; path->mnt = mnt; path->dentry = dentry; if (likely(__follow_mount_rcu(nd, path, inode, seqp))) return 1; } if (unlazy_child(nd, dentry, seq)) return -ECHILD; if (unlikely(status == -ECHILD)) /* we'd been told to redo it in non-rcu mode */ status = d_revalidate(dentry, nd->flags); } else { dentry = __d_lookup(parent, &nd->last); if (unlikely(!dentry)) return 0; status = d_revalidate(dentry, nd->flags); } if (unlikely(status <= 0)) { if (!status) d_invalidate(dentry); dput(dentry); return status; } if (unlikely(d_is_negative(dentry))) { dput(dentry); return -ENOENT; } path->mnt = mnt; path->dentry = dentry; err = follow_managed(path, nd); if (likely(err > 0)) *inode = d_backing_inode(path->dentry); return err; } /* Fast lookup failed, do it the slow way */ static struct dentry *lookup_slow(const struct qstr *name, struct dentry *dir, unsigned int flags) { struct dentry *dentry = ERR_PTR(-ENOENT), *old; struct inode *inode = dir->d_inode; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); inode_lock_shared(inode); /* Don't go there if it's already dead */ if (unlikely(IS_DEADDIR(inode))) goto out; again: dentry = d_alloc_parallel(dir, name, &wq); if (IS_ERR(dentry)) goto out; if (unlikely(!d_in_lookup(dentry))) { if (!(flags & LOOKUP_NO_REVAL)) { int error = d_revalidate(dentry, flags); if (unlikely(error <= 0)) { if (!error) { d_invalidate(dentry); dput(dentry); goto again; } dput(dentry); dentry = ERR_PTR(error); } } } else { old = inode->i_op->lookup(inode, dentry, flags); d_lookup_done(dentry); if (unlikely(old)) { dput(dentry); dentry = old; } } out: inode_unlock_shared(inode); return dentry; } static inline int may_lookup(struct nameidata *nd) { if (nd->flags & LOOKUP_RCU) { int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK); if (err != -ECHILD) return err; if (unlazy_walk(nd)) return -ECHILD; } return inode_permission(nd->inode, MAY_EXEC); } static inline int handle_dots(struct nameidata *nd, int type) { if (type == LAST_DOTDOT) { if (!nd->root.mnt) set_root(nd); if (nd->flags & LOOKUP_RCU) { return follow_dotdot_rcu(nd); } else return follow_dotdot(nd); } return 0; } static int pick_link(struct nameidata *nd, struct path *link, struct inode *inode, unsigned seq) { int error; struct saved *last; if (unlikely(nd->total_link_count++ >= MAXSYMLINKS)) { path_to_nameidata(link, nd); return -ELOOP; } if (!(nd->flags & LOOKUP_RCU)) { if (link->mnt == nd->path.mnt) mntget(link->mnt); } error = nd_alloc_stack(nd); if (unlikely(error)) { if (error == -ECHILD) { if (unlikely(!legitimize_path(nd, link, seq))) { drop_links(nd); nd->depth = 0; nd->flags &= ~LOOKUP_RCU; nd->path.mnt = NULL; nd->path.dentry = NULL; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); } else if (likely(unlazy_walk(nd)) == 0) error = nd_alloc_stack(nd); } if (error) { path_put(link); return error; } } last = nd->stack + nd->depth++; last->link = *link; clear_delayed_call(&last->done); nd->link_inode = inode; last->seq = seq; return 1; } enum {WALK_FOLLOW = 1, WALK_MORE = 2}; /* * Do we need to follow links? We _really_ want to be able * to do this check without having to look at inode->i_op, * so we keep a cache of "no, this doesn't need follow_link" * for the common case. */ static inline int step_into(struct nameidata *nd, struct path *path, int flags, struct inode *inode, unsigned seq) { if (!(flags & WALK_MORE) && nd->depth) put_link(nd); if (likely(!d_is_symlink(path->dentry)) || !(flags & WALK_FOLLOW || nd->flags & LOOKUP_FOLLOW)) { /* not a symlink or should not follow */ path_to_nameidata(path, nd); nd->inode = inode; nd->seq = seq; return 0; } /* make sure that d_is_symlink above matches inode */ if (nd->flags & LOOKUP_RCU) { if (read_seqcount_retry(&path->dentry->d_seq, seq)) return -ECHILD; } return pick_link(nd, path, inode, seq); } static int walk_component(struct nameidata *nd, int flags) { struct path path; struct inode *inode; unsigned seq; int err; /* * "." and ".." are special - ".." especially so because it has * to be able to know about the current root directory and * parent relationships. */ if (unlikely(nd->last_type != LAST_NORM)) { err = handle_dots(nd, nd->last_type); if (!(flags & WALK_MORE) && nd->depth) put_link(nd); return err; } err = lookup_fast(nd, &path, &inode, &seq); if (unlikely(err <= 0)) { if (err < 0) return err; path.dentry = lookup_slow(&nd->last, nd->path.dentry, nd->flags); if (IS_ERR(path.dentry)) return PTR_ERR(path.dentry); path.mnt = nd->path.mnt; err = follow_managed(&path, nd); if (unlikely(err < 0)) return err; if (unlikely(d_is_negative(path.dentry))) { path_to_nameidata(&path, nd); return -ENOENT; } seq = 0; /* we are already out of RCU mode */ inode = d_backing_inode(path.dentry); } return step_into(nd, &path, flags, inode, seq); } /* * We can do the critical dentry name comparison and hashing * operations one word at a time, but we are limited to: * * - Architectures with fast unaligned word accesses. We could * do a "get_unaligned()" if this helps and is sufficiently * fast. * * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we * do not trap on the (extremely unlikely) case of a page * crossing operation. * * - Furthermore, we need an efficient 64-bit compile for the * 64-bit case in order to generate the "number of bytes in * the final mask". Again, that could be replaced with a * efficient population count instruction or similar. */ #ifdef CONFIG_DCACHE_WORD_ACCESS #include <asm/word-at-a-time.h> #ifdef HASH_MIX /* Architecture provides HASH_MIX and fold_hash() in <asm/hash.h> */ #elif defined(CONFIG_64BIT) /* * Register pressure in the mixing function is an issue, particularly * on 32-bit x86, but almost any function requires one state value and * one temporary. Instead, use a function designed for two state values * and no temporaries. * * This function cannot create a collision in only two iterations, so * we have two iterations to achieve avalanche. In those two iterations, * we have six layers of mixing, which is enough to spread one bit's * influence out to 2^6 = 64 state bits. * * Rotate constants are scored by considering either 64 one-bit input * deltas or 64*63/2 = 2016 two-bit input deltas, and finding the * probability of that delta causing a change to each of the 128 output * bits, using a sample of random initial states. * * The Shannon entropy of the computed probabilities is then summed * to produce a score. Ideally, any input change has a 50% chance of * toggling any given output bit. * * Mixing scores (in bits) for (12,45): * Input delta: 1-bit 2-bit * 1 round: 713.3 42542.6 * 2 rounds: 2753.7 140389.8 * 3 rounds: 5954.1 233458.2 * 4 rounds: 7862.6 256672.2 * Perfect: 8192 258048 * (64*128) (64*63/2 * 128) */ #define HASH_MIX(x, y, a) \ ( x ^= (a), \ y ^= x, x = rol64(x,12),\ x += y, y = rol64(y,45),\ y *= 9 ) /* * Fold two longs into one 32-bit hash value. This must be fast, but * latency isn't quite as critical, as there is a fair bit of additional * work done before the hash value is used. */ static inline unsigned int fold_hash(unsigned long x, unsigned long y) { y ^= x * GOLDEN_RATIO_64; y *= GOLDEN_RATIO_64; return y >> 32; } #else /* 32-bit case */ /* * Mixing scores (in bits) for (7,20): * Input delta: 1-bit 2-bit * 1 round: 330.3 9201.6 * 2 rounds: 1246.4 25475.4 * 3 rounds: 1907.1 31295.1 * 4 rounds: 2042.3 31718.6 * Perfect: 2048 31744 * (32*64) (32*31/2 * 64) */ #define HASH_MIX(x, y, a) \ ( x ^= (a), \ y ^= x, x = rol32(x, 7),\ x += y, y = rol32(y,20),\ y *= 9 ) static inline unsigned int fold_hash(unsigned long x, unsigned long y) { /* Use arch-optimized multiply if one exists */ return __hash_32(y ^ __hash_32(x)); } #endif /* * Return the hash of a string of known length. This is carfully * designed to match hash_name(), which is the more critical function. * In particular, we must end by hashing a final word containing 0..7 * payload bytes, to match the way that hash_name() iterates until it * finds the delimiter after the name. */ unsigned int full_name_hash(const void *salt, const char *name, unsigned int len) { unsigned long a, x = 0, y = (unsigned long)salt; for (;;) { if (!len) goto done; a = load_unaligned_zeropad(name); if (len < sizeof(unsigned long)) break; HASH_MIX(x, y, a); name += sizeof(unsigned long); len -= sizeof(unsigned long); } x ^= a & bytemask_from_count(len); done: return fold_hash(x, y); } EXPORT_SYMBOL(full_name_hash); /* Return the "hash_len" (hash and length) of a null-terminated string */ u64 hashlen_string(const void *salt, const char *name) { unsigned long a = 0, x = 0, y = (unsigned long)salt; unsigned long adata, mask, len; const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; len = 0; goto inside; do { HASH_MIX(x, y, a); len += sizeof(unsigned long); inside: a = load_unaligned_zeropad(name+len); } while (!has_zero(a, &adata, &constants)); adata = prep_zero_mask(a, adata, &constants); mask = create_zero_mask(adata); x ^= a & zero_bytemask(mask); return hashlen_create(fold_hash(x, y), len + find_zero(mask)); } EXPORT_SYMBOL(hashlen_string); /* * Calculate the length and hash of the path component, and * return the "hash_len" as the result. */ static inline u64 hash_name(const void *salt, const char *name) { unsigned long a = 0, b, x = 0, y = (unsigned long)salt; unsigned long adata, bdata, mask, len; const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; len = 0; goto inside; do { HASH_MIX(x, y, a); len += sizeof(unsigned long); inside: a = load_unaligned_zeropad(name+len); b = a ^ REPEAT_BYTE('/'); } while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants))); adata = prep_zero_mask(a, adata, &constants); bdata = prep_zero_mask(b, bdata, &constants); mask = create_zero_mask(adata | bdata); x ^= a & zero_bytemask(mask); return hashlen_create(fold_hash(x, y), len + find_zero(mask)); } #else /* !CONFIG_DCACHE_WORD_ACCESS: Slow, byte-at-a-time version */ /* Return the hash of a string of known length */ unsigned int full_name_hash(const void *salt, const char *name, unsigned int len) { unsigned long hash = init_name_hash(salt); while (len--) hash = partial_name_hash((unsigned char)*name++, hash); return end_name_hash(hash); } EXPORT_SYMBOL(full_name_hash); /* Return the "hash_len" (hash and length) of a null-terminated string */ u64 hashlen_string(const void *salt, const char *name) { unsigned long hash = init_name_hash(salt); unsigned long len = 0, c; c = (unsigned char)*name; while (c) { len++; hash = partial_name_hash(c, hash); c = (unsigned char)name[len]; } return hashlen_create(end_name_hash(hash), len); } EXPORT_SYMBOL(hashlen_string); /* * We know there's a real path component here of at least * one character. */ static inline u64 hash_name(const void *salt, const char *name) { unsigned long hash = init_name_hash(salt); unsigned long len = 0, c; c = (unsigned char)*name; do { len++; hash = partial_name_hash(c, hash); c = (unsigned char)name[len]; } while (c && c != '/'); return hashlen_create(end_name_hash(hash), len); } #endif /* * Name resolution. * This is the basic name resolution function, turning a pathname into * the final dentry. We expect 'base' to be positive and a directory. * * Returns 0 and nd will have valid dentry and mnt on success. * Returns error and drops reference to input namei data on failure. */ static int link_path_walk(const char *name, struct nameidata *nd) { int err; while (*name=='/') name++; if (!*name) return 0; /* At this point we know we have a real path component. */ for(;;) { u64 hash_len; int type; err = may_lookup(nd); if (err) return err; hash_len = hash_name(nd->path.dentry, name); type = LAST_NORM; if (name[0] == '.') switch (hashlen_len(hash_len)) { case 2: if (name[1] == '.') { type = LAST_DOTDOT; nd->flags |= LOOKUP_JUMPED; } break; case 1: type = LAST_DOT; } if (likely(type == LAST_NORM)) { struct dentry *parent = nd->path.dentry; nd->flags &= ~LOOKUP_JUMPED; if (unlikely(parent->d_flags & DCACHE_OP_HASH)) { struct qstr this = { { .hash_len = hash_len }, .name = name }; err = parent->d_op->d_hash(parent, &this); if (err < 0) return err; hash_len = this.hash_len; name = this.name; } } nd->last.hash_len = hash_len; nd->last.name = name; nd->last_type = type; name += hashlen_len(hash_len); if (!*name) goto OK; /* * If it wasn't NUL, we know it was '/'. Skip that * slash, and continue until no more slashes. */ do { name++; } while (unlikely(*name == '/')); if (unlikely(!*name)) { OK: /* pathname body, done */ if (!nd->depth) return 0; name = nd->stack[nd->depth - 1].name; /* trailing symlink, done */ if (!name) return 0; /* last component of nested symlink */ err = walk_component(nd, WALK_FOLLOW); } else { /* not the last component */ err = walk_component(nd, WALK_FOLLOW | WALK_MORE); } if (err < 0) return err; if (err) { const char *s = get_link(nd); if (IS_ERR(s)) return PTR_ERR(s); err = 0; if (unlikely(!s)) { /* jumped */ put_link(nd); } else { nd->stack[nd->depth - 1].name = name; name = s; continue; } } if (unlikely(!d_can_lookup(nd->path.dentry))) { if (nd->flags & LOOKUP_RCU) { if (unlazy_walk(nd)) return -ECHILD; } return -ENOTDIR; } } } static const char *path_init(struct nameidata *nd, unsigned flags) { const char *s = nd->name->name; if (!*s) flags &= ~LOOKUP_RCU; nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; nd->depth = 0; if (flags & LOOKUP_ROOT) { struct dentry *root = nd->root.dentry; struct inode *inode = root->d_inode; if (*s && unlikely(!d_can_lookup(root))) return ERR_PTR(-ENOTDIR); nd->path = nd->root; nd->inode = inode; if (flags & LOOKUP_RCU) { rcu_read_lock(); nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); nd->root_seq = nd->seq; nd->m_seq = read_seqbegin(&mount_lock); } else { path_get(&nd->path); } return s; } nd->root.mnt = NULL; nd->path.mnt = NULL; nd->path.dentry = NULL; nd->m_seq = read_seqbegin(&mount_lock); if (*s == '/') { if (flags & LOOKUP_RCU) rcu_read_lock(); set_root(nd); if (likely(!nd_jump_root(nd))) return s; nd->root.mnt = NULL; rcu_read_unlock(); return ERR_PTR(-ECHILD); } else if (nd->dfd == AT_FDCWD) { if (flags & LOOKUP_RCU) { struct fs_struct *fs = current->fs; unsigned seq; rcu_read_lock(); do { seq = read_seqcount_begin(&fs->seq); nd->path = fs->pwd; nd->inode = nd->path.dentry->d_inode; nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } else { get_fs_pwd(current->fs, &nd->path); nd->inode = nd->path.dentry->d_inode; } return s; } else { /* Caller must check execute permissions on the starting path component */ struct fd f = fdget_raw(nd->dfd); struct dentry *dentry; if (!f.file) return ERR_PTR(-EBADF); dentry = f.file->f_path.dentry; if (*s) { if (!d_can_lookup(dentry)) { fdput(f); return ERR_PTR(-ENOTDIR); } } nd->path = f.file->f_path; if (flags & LOOKUP_RCU) { rcu_read_lock(); nd->inode = nd->path.dentry->d_inode; nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); } else { path_get(&nd->path); nd->inode = nd->path.dentry->d_inode; } fdput(f); return s; } } static const char *trailing_symlink(struct nameidata *nd) { const char *s; int error = may_follow_link(nd); if (unlikely(error)) return ERR_PTR(error); nd->flags |= LOOKUP_PARENT; nd->stack[0].name = NULL; s = get_link(nd); return s ? s : ""; } static inline int lookup_last(struct nameidata *nd) { if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; nd->flags &= ~LOOKUP_PARENT; return walk_component(nd, 0); } static int handle_lookup_down(struct nameidata *nd) { struct path path = nd->path; struct inode *inode = nd->inode; unsigned seq = nd->seq; int err; if (nd->flags & LOOKUP_RCU) { /* * don't bother with unlazy_walk on failure - we are * at the very beginning of walk, so we lose nothing * if we simply redo everything in non-RCU mode */ if (unlikely(!__follow_mount_rcu(nd, &path, &inode, &seq))) return -ECHILD; } else { dget(path.dentry); err = follow_managed(&path, nd); if (unlikely(err < 0)) return err; inode = d_backing_inode(path.dentry); seq = 0; } path_to_nameidata(&path, nd); nd->inode = inode; nd->seq = seq; return 0; } /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path) { const char *s = path_init(nd, flags); int err; if (IS_ERR(s)) return PTR_ERR(s); if (unlikely(flags & LOOKUP_DOWN)) { err = handle_lookup_down(nd); if (unlikely(err < 0)) { terminate_walk(nd); return err; } } while (!(err = link_path_walk(s, nd)) && ((err = lookup_last(nd)) > 0)) { s = trailing_symlink(nd); if (IS_ERR(s)) { err = PTR_ERR(s); break; } } if (!err) err = complete_walk(nd); if (!err && nd->flags & LOOKUP_DIRECTORY) if (!d_can_lookup(nd->path.dentry)) err = -ENOTDIR; if (!err) { *path = nd->path; nd->path.mnt = NULL; nd->path.dentry = NULL; } terminate_walk(nd); return err; } static int filename_lookup(int dfd, struct filename *name, unsigned flags, struct path *path, struct path *root) { int retval; struct nameidata nd; if (IS_ERR(name)) return PTR_ERR(name); if (unlikely(root)) { nd.root = *root; flags |= LOOKUP_ROOT; } set_nameidata(&nd, dfd, name); retval = path_lookupat(&nd, flags | LOOKUP_RCU, path); if (unlikely(retval == -ECHILD)) retval = path_lookupat(&nd, flags, path); if (unlikely(retval == -ESTALE)) retval = path_lookupat(&nd, flags | LOOKUP_REVAL, path); if (likely(!retval)) audit_inode(name, path->dentry, flags & LOOKUP_PARENT); restore_nameidata(); putname(name); return retval; } /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ static int path_parentat(struct nameidata *nd, unsigned flags, struct path *parent) { const char *s = path_init(nd, flags); int err; if (IS_ERR(s)) return PTR_ERR(s); err = link_path_walk(s, nd); if (!err) err = complete_walk(nd); if (!err) { *parent = nd->path; nd->path.mnt = NULL; nd->path.dentry = NULL; } terminate_walk(nd); return err; } static struct filename *filename_parentat(int dfd, struct filename *name, unsigned int flags, struct path *parent, struct qstr *last, int *type) { int retval; struct nameidata nd; if (IS_ERR(name)) return name; set_nameidata(&nd, dfd, name); retval = path_parentat(&nd, flags | LOOKUP_RCU, parent); if (unlikely(retval == -ECHILD)) retval = path_parentat(&nd, flags, parent); if (unlikely(retval == -ESTALE)) retval = path_parentat(&nd, flags | LOOKUP_REVAL, parent); if (likely(!retval)) { *last = nd.last; *type = nd.last_type; audit_inode(name, parent->dentry, LOOKUP_PARENT); } else { putname(name); name = ERR_PTR(retval); } restore_nameidata(); return name; } /* does lookup, returns the object with parent locked */ struct dentry *kern_path_locked(const char *name, struct path *path) { struct filename *filename; struct dentry *d; struct qstr last; int type; filename = filename_parentat(AT_FDCWD, getname_kernel(name), 0, path, &last, &type); if (IS_ERR(filename)) return ERR_CAST(filename); if (unlikely(type != LAST_NORM)) { path_put(path); putname(filename); return ERR_PTR(-EINVAL); } inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT); d = __lookup_hash(&last, path->dentry, 0); if (IS_ERR(d)) { inode_unlock(path->dentry->d_inode); path_put(path); } putname(filename); return d; } int kern_path(const char *name, unsigned int flags, struct path *path) { return filename_lookup(AT_FDCWD, getname_kernel(name), flags, path, NULL); } EXPORT_SYMBOL(kern_path); /** * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair * @dentry: pointer to dentry of the base directory * @mnt: pointer to vfs mount of the base directory * @name: pointer to file name * @flags: lookup flags * @path: pointer to struct path to fill */ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, const char *name, unsigned int flags, struct path *path) { struct path root = {.mnt = mnt, .dentry = dentry}; /* the first argument of filename_lookup() is ignored with root */ return filename_lookup(AT_FDCWD, getname_kernel(name), flags , path, &root); } EXPORT_SYMBOL(vfs_path_lookup); /** * lookup_one_len - filesystem helper to lookup single pathname component * @name: pathname component to lookup * @base: base directory to lookup from * @len: maximum length @len should be interpreted to * * Note that this routine is purely a helper for filesystem usage and should * not be called by generic code. * * The caller must hold base->i_mutex. */ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) { struct qstr this; unsigned int c; int err; WARN_ON_ONCE(!inode_is_locked(base->d_inode)); this.name = name; this.len = len; this.hash = full_name_hash(base, name, len); if (!len) return ERR_PTR(-EACCES); if (unlikely(name[0] == '.')) { if (len < 2 || (len == 2 && name[1] == '.')) return ERR_PTR(-EACCES); } while (len--) { c = *(const unsigned char *)name++; if (c == '/' || c == '\0') return ERR_PTR(-EACCES); } /* * See if the low-level filesystem might want * to use its own hash.. */ if (base->d_flags & DCACHE_OP_HASH) { int err = base->d_op->d_hash(base, &this); if (err < 0) return ERR_PTR(err); } err = inode_permission(base->d_inode, MAY_EXEC); if (err) return ERR_PTR(err); return __lookup_hash(&this, base, 0); } EXPORT_SYMBOL(lookup_one_len); /** * lookup_one_len_unlocked - filesystem helper to lookup single pathname component * @name: pathname component to lookup * @base: base directory to lookup from * @len: maximum length @len should be interpreted to * * Note that this routine is purely a helper for filesystem usage and should * not be called by generic code. * * Unlike lookup_one_len, it should be called without the parent * i_mutex held, and will take the i_mutex itself if necessary. */ struct dentry *lookup_one_len_unlocked(const char *name, struct dentry *base, int len) { struct qstr this; unsigned int c; int err; struct dentry *ret; this.name = name; this.len = len; this.hash = full_name_hash(base, name, len); if (!len) return ERR_PTR(-EACCES); if (unlikely(name[0] == '.')) { if (len < 2 || (len == 2 && name[1] == '.')) return ERR_PTR(-EACCES); } while (len--) { c = *(const unsigned char *)name++; if (c == '/' || c == '\0') return ERR_PTR(-EACCES); } /* * See if the low-level filesystem might want * to use its own hash.. */ if (base->d_flags & DCACHE_OP_HASH) { int err = base->d_op->d_hash(base, &this); if (err < 0) return ERR_PTR(err); } err = inode_permission(base->d_inode, MAY_EXEC); if (err) return ERR_PTR(err); ret = lookup_dcache(&this, base, 0); if (!ret) ret = lookup_slow(&this, base, 0); return ret; } EXPORT_SYMBOL(lookup_one_len_unlocked); #ifdef CONFIG_UNIX98_PTYS int path_pts(struct path *path) { /* Find something mounted on "pts" in the same directory as * the input path. */ struct dentry *child, *parent; struct qstr this; int ret; ret = path_parent_directory(path); if (ret) return ret; parent = path->dentry; this.name = "pts"; this.len = 3; child = d_hash_and_lookup(parent, &this); if (!child) return -ENOENT; path->dentry = child; dput(parent); follow_mount(path); return 0; } #endif int user_path_at_empty(int dfd, const char __user *name, unsigned flags, struct path *path, int *empty) { return filename_lookup(dfd, getname_flags(name, flags, empty), flags, path, NULL); } EXPORT_SYMBOL(user_path_at_empty); /** * mountpoint_last - look up last component for umount * @nd: pathwalk nameidata - currently pointing at parent directory of "last" * * This is a special lookup_last function just for umount. In this case, we * need to resolve the path without doing any revalidation. * * The nameidata should be the result of doing a LOOKUP_PARENT pathwalk. Since * mountpoints are always pinned in the dcache, their ancestors are too. Thus, * in almost all cases, this lookup will be served out of the dcache. The only * cases where it won't are if nd->last refers to a symlink or the path is * bogus and it doesn't exist. * * Returns: * -error: if there was an error during lookup. This includes -ENOENT if the * lookup found a negative dentry. * * 0: if we successfully resolved nd->last and found it to not to be a * symlink that needs to be followed. * * 1: if we successfully resolved nd->last and found it to be a symlink * that needs to be followed. */ static int mountpoint_last(struct nameidata *nd) { int error = 0; struct dentry *dir = nd->path.dentry; struct path path; /* If we're in rcuwalk, drop out of it to handle last component */ if (nd->flags & LOOKUP_RCU) { if (unlazy_walk(nd)) return -ECHILD; } nd->flags &= ~LOOKUP_PARENT; if (unlikely(nd->last_type != LAST_NORM)) { error = handle_dots(nd, nd->last_type); if (error) return error; path.dentry = dget(nd->path.dentry); } else { path.dentry = d_lookup(dir, &nd->last); if (!path.dentry) { /* * No cached dentry. Mounted dentries are pinned in the * cache, so that means that this dentry is probably * a symlink or the path doesn't actually point * to a mounted dentry. */ path.dentry = lookup_slow(&nd->last, dir, nd->flags | LOOKUP_NO_REVAL); if (IS_ERR(path.dentry)) return PTR_ERR(path.dentry); } } if (d_is_negative(path.dentry)) { dput(path.dentry); return -ENOENT; } path.mnt = nd->path.mnt; return step_into(nd, &path, 0, d_backing_inode(path.dentry), 0); } /** * path_mountpoint - look up a path to be umounted * @nd: lookup context * @flags: lookup flags * @path: pointer to container for result * * Look up the given name, but don't attempt to revalidate the last component. * Returns 0 and "path" will be valid on success; Returns error otherwise. */ static int path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path) { const char *s = path_init(nd, flags); int err; if (IS_ERR(s)) return PTR_ERR(s); while (!(err = link_path_walk(s, nd)) && (err = mountpoint_last(nd)) > 0) { s = trailing_symlink(nd); if (IS_ERR(s)) { err = PTR_ERR(s); break; } } if (!err) { *path = nd->path; nd->path.mnt = NULL; nd->path.dentry = NULL; follow_mount(path); } terminate_walk(nd); return err; } static int filename_mountpoint(int dfd, struct filename *name, struct path *path, unsigned int flags) { struct nameidata nd; int error; if (IS_ERR(name)) return PTR_ERR(name); set_nameidata(&nd, dfd, name); error = path_mountpoint(&nd, flags | LOOKUP_RCU, path); if (unlikely(error == -ECHILD)) error = path_mountpoint(&nd, flags, path); if (unlikely(error == -ESTALE)) error = path_mountpoint(&nd, flags | LOOKUP_REVAL, path); if (likely(!error)) audit_inode(name, path->dentry, 0); restore_nameidata(); putname(name); return error; } /** * user_path_mountpoint_at - lookup a path from userland in order to umount it * @dfd: directory file descriptor * @name: pathname from userland * @flags: lookup flags * @path: pointer to container to hold result * * A umount is a special case for path walking. We're not actually interested * in the inode in this situation, and ESTALE errors can be a problem. We * simply want track down the dentry and vfsmount attached at the mountpoint * and avoid revalidating the last component. * * Returns 0 and populates "path" on success. */ int user_path_mountpoint_at(int dfd, const char __user *name, unsigned int flags, struct path *path) { return filename_mountpoint(dfd, getname(name), path, flags); } int kern_path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags) { return filename_mountpoint(dfd, getname_kernel(name), path, flags); } EXPORT_SYMBOL(kern_path_mountpoint); int __check_sticky(struct inode *dir, struct inode *inode) { kuid_t fsuid = current_fsuid(); if (uid_eq(inode->i_uid, fsuid)) return 0; if (uid_eq(dir->i_uid, fsuid)) return 0; return !capable_wrt_inode_uidgid(inode, CAP_FOWNER); } EXPORT_SYMBOL(__check_sticky); /* * Check whether we can remove a link victim from directory dir, check * whether the type of victim is right. * 1. We can't do it if dir is read-only (done in permission()) * 2. We should have write and exec permissions on dir * 3. We can't remove anything from append-only dir * 4. We can't do anything with immutable dir (done in permission()) * 5. If the sticky bit on dir is set we should either * a. be owner of dir, or * b. be owner of victim, or * c. have CAP_FOWNER capability * 6. If the victim is append-only or immutable we can't do antyhing with * links pointing to it. * 7. If the victim has an unknown uid or gid we can't change the inode. * 8. If we were asked to remove a directory and victim isn't one - ENOTDIR. * 9. If we were asked to remove a non-directory and victim isn't one - EISDIR. * 10. We can't remove a root or mountpoint. * 11. We don't allow removal of NFS sillyrenamed files; it's handled by * nfs_async_unlink(). */ static int may_delete(struct inode *dir, struct dentry *victim, bool isdir) { struct inode *inode = d_backing_inode(victim); int error; if (d_is_negative(victim)) return -ENOENT; BUG_ON(!inode); BUG_ON(victim->d_parent->d_inode != dir); audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE); error = inode_permission(dir, MAY_WRITE | MAY_EXEC); if (error) return error; if (IS_APPEND(dir)) return -EPERM; if (check_sticky(dir, inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) || IS_SWAPFILE(inode) || HAS_UNMAPPED_ID(inode)) return -EPERM; if (isdir) { if (!d_is_dir(victim)) return -ENOTDIR; if (IS_ROOT(victim)) return -EBUSY; } else if (d_is_dir(victim)) return -EISDIR; if (IS_DEADDIR(dir)) return -ENOENT; if (victim->d_flags & DCACHE_NFSFS_RENAMED) return -EBUSY; return 0; } /* Check whether we can create an object with dentry child in directory * dir. * 1. We can't do it if child already exists (open has special treatment for * this case, but since we are inlined it's OK) * 2. We can't do it if dir is read-only (done in permission()) * 3. We can't do it if the fs can't represent the fsuid or fsgid. * 4. We should have write and exec permissions on dir * 5. We can't do it if dir is immutable (done in permission()) */ static inline int may_create(struct inode *dir, struct dentry *child) { struct user_namespace *s_user_ns; audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE); if (child->d_inode) return -EEXIST; if (IS_DEADDIR(dir)) return -ENOENT; s_user_ns = dir->i_sb->s_user_ns; if (!kuid_has_mapping(s_user_ns, current_fsuid()) || !kgid_has_mapping(s_user_ns, current_fsgid())) return -EOVERFLOW; return inode_permission(dir, MAY_WRITE | MAY_EXEC); } /* * p1 and p2 should be directories on the same fs. */ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) { struct dentry *p; if (p1 == p2) { inode_lock_nested(p1->d_inode, I_MUTEX_PARENT); return NULL; } mutex_lock(&p1->d_sb->s_vfs_rename_mutex); p = d_ancestor(p2, p1); if (p) { inode_lock_nested(p2->d_inode, I_MUTEX_PARENT); inode_lock_nested(p1->d_inode, I_MUTEX_CHILD); return p; } p = d_ancestor(p1, p2); if (p) { inode_lock_nested(p1->d_inode, I_MUTEX_PARENT); inode_lock_nested(p2->d_inode, I_MUTEX_CHILD); return p; } inode_lock_nested(p1->d_inode, I_MUTEX_PARENT); inode_lock_nested(p2->d_inode, I_MUTEX_PARENT2); return NULL; } EXPORT_SYMBOL(lock_rename); void unlock_rename(struct dentry *p1, struct dentry *p2) { inode_unlock(p1->d_inode); if (p1 != p2) { inode_unlock(p2->d_inode); mutex_unlock(&p1->d_sb->s_vfs_rename_mutex); } } EXPORT_SYMBOL(unlock_rename); int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool want_excl) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->create) return -EACCES; /* shouldn't it be ENOSYS? */ mode &= S_IALLUGO; mode |= S_IFREG; error = security_inode_create(dir, dentry, mode); if (error) return error; error = dir->i_op->create(dir, dentry, mode, want_excl); if (!error) fsnotify_create(dir, dentry); return error; } EXPORT_SYMBOL(vfs_create); bool may_open_dev(const struct path *path) { return !(path->mnt->mnt_flags & MNT_NODEV) && !(path->mnt->mnt_sb->s_iflags & SB_I_NODEV); } static int may_open(const struct path *path, int acc_mode, int flag) { struct dentry *dentry = path->dentry; struct inode *inode = dentry->d_inode; int error; if (!inode) return -ENOENT; switch (inode->i_mode & S_IFMT) { case S_IFLNK: return -ELOOP; case S_IFDIR: if (acc_mode & MAY_WRITE) return -EISDIR; break; case S_IFBLK: case S_IFCHR: if (!may_open_dev(path)) return -EACCES; /*FALLTHRU*/ case S_IFIFO: case S_IFSOCK: flag &= ~O_TRUNC; break; } error = inode_permission(inode, MAY_OPEN | acc_mode); if (error) return error; /* * An append-only file must be opened in append mode for writing. */ if (IS_APPEND(inode)) { if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND)) return -EPERM; if (flag & O_TRUNC) return -EPERM; } /* O_NOATIME can only be set by the owner or superuser */ if (flag & O_NOATIME && !inode_owner_or_capable(inode)) return -EPERM; return 0; } static int handle_truncate(struct file *filp) { const struct path *path = &filp->f_path; struct inode *inode = path->dentry->d_inode; int error = get_write_access(inode); if (error) return error; /* * Refuse to truncate files with mandatory locks held on them. */ error = locks_verify_locked(filp); if (!error) error = security_path_truncate(path); if (!error) { error = do_truncate(path->dentry, 0, ATTR_MTIME|ATTR_CTIME|ATTR_OPEN, filp); } put_write_access(inode); return error; } static inline int open_to_namei_flags(int flag) { if ((flag & O_ACCMODE) == 3) flag--; return flag; } static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t mode) { struct user_namespace *s_user_ns; int error = security_path_mknod(dir, dentry, mode, 0); if (error) return error; s_user_ns = dir->dentry->d_sb->s_user_ns; if (!kuid_has_mapping(s_user_ns, current_fsuid()) || !kgid_has_mapping(s_user_ns, current_fsgid())) return -EOVERFLOW; error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC); if (error) return error; return security_inode_create(dir->dentry->d_inode, dentry, mode); } /* * Attempt to atomically look up, create and open a file from a negative * dentry. * * Returns 0 if successful. The file will have been created and attached to * @file by the filesystem calling finish_open(). * * Returns 1 if the file was looked up only or didn't need creating. The * caller will need to perform the open themselves. @path will have been * updated to point to the new dentry. This may be negative. * * Returns an error code otherwise. */ static int atomic_open(struct nameidata *nd, struct dentry *dentry, struct path *path, struct file *file, const struct open_flags *op, int open_flag, umode_t mode, int *opened) { struct dentry *const DENTRY_NOT_SET = (void *) -1UL; struct inode *dir = nd->path.dentry->d_inode; int error; if (!(~open_flag & (O_EXCL | O_CREAT))) /* both O_EXCL and O_CREAT */ open_flag &= ~O_TRUNC; if (nd->flags & LOOKUP_DIRECTORY) open_flag |= O_DIRECTORY; file->f_path.dentry = DENTRY_NOT_SET; file->f_path.mnt = nd->path.mnt; error = dir->i_op->atomic_open(dir, dentry, file, open_to_namei_flags(open_flag), mode, opened); d_lookup_done(dentry); if (!error) { /* * We didn't have the inode before the open, so check open * permission here. */ int acc_mode = op->acc_mode; if (*opened & FILE_CREATED) { WARN_ON(!(open_flag & O_CREAT)); fsnotify_create(dir, dentry); acc_mode = 0; } error = may_open(&file->f_path, acc_mode, open_flag); if (WARN_ON(error > 0)) error = -EINVAL; } else if (error > 0) { if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) { error = -EIO; } else { if (file->f_path.dentry) { dput(dentry); dentry = file->f_path.dentry; } if (*opened & FILE_CREATED) fsnotify_create(dir, dentry); if (unlikely(d_is_negative(dentry))) { error = -ENOENT; } else { path->dentry = dentry; path->mnt = nd->path.mnt; return 1; } } } dput(dentry); return error; } /* * Look up and maybe create and open the last component. * * Must be called with i_mutex held on parent. * * Returns 0 if the file was successfully atomically created (if necessary) and * opened. In this case the file will be returned attached to @file. * * Returns 1 if the file was not completely opened at this time, though lookups * and creations will have been performed and the dentry returned in @path will * be positive upon return if O_CREAT was specified. If O_CREAT wasn't * specified then a negative dentry may be returned. * * An error code is returned otherwise. * * FILE_CREATE will be set in @*opened if the dentry was created and will be * cleared otherwise prior to returning. */ static int lookup_open(struct nameidata *nd, struct path *path, struct file *file, const struct open_flags *op, bool got_write, int *opened) { struct dentry *dir = nd->path.dentry; struct inode *dir_inode = dir->d_inode; int open_flag = op->open_flag; struct dentry *dentry; int error, create_error = 0; umode_t mode = op->mode; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); if (unlikely(IS_DEADDIR(dir_inode))) return -ENOENT; *opened &= ~FILE_CREATED; dentry = d_lookup(dir, &nd->last); for (;;) { if (!dentry) { dentry = d_alloc_parallel(dir, &nd->last, &wq); if (IS_ERR(dentry)) return PTR_ERR(dentry); } if (d_in_lookup(dentry)) break; error = d_revalidate(dentry, nd->flags); if (likely(error > 0)) break; if (error) goto out_dput; d_invalidate(dentry); dput(dentry); dentry = NULL; } if (dentry->d_inode) { /* Cached positive dentry: will open in f_op->open */ goto out_no_open; } /* * Checking write permission is tricky, bacuse we don't know if we are * going to actually need it: O_CREAT opens should work as long as the * file exists. But checking existence breaks atomicity. The trick is * to check access and if not granted clear O_CREAT from the flags. * * Another problem is returing the "right" error value (e.g. for an * O_EXCL open we want to return EEXIST not EROFS). */ if (open_flag & O_CREAT) { if (!IS_POSIXACL(dir->d_inode)) mode &= ~current_umask(); if (unlikely(!got_write)) { create_error = -EROFS; open_flag &= ~O_CREAT; if (open_flag & (O_EXCL | O_TRUNC)) goto no_open; /* No side effects, safe to clear O_CREAT */ } else { create_error = may_o_create(&nd->path, dentry, mode); if (create_error) { open_flag &= ~O_CREAT; if (open_flag & O_EXCL) goto no_open; } } } else if ((open_flag & (O_TRUNC|O_WRONLY|O_RDWR)) && unlikely(!got_write)) { /* * No O_CREATE -> atomicity not a requirement -> fall * back to lookup + open */ goto no_open; } if (dir_inode->i_op->atomic_open) { error = atomic_open(nd, dentry, path, file, op, open_flag, mode, opened); if (unlikely(error == -ENOENT) && create_error) error = create_error; return error; } no_open: if (d_in_lookup(dentry)) { struct dentry *res = dir_inode->i_op->lookup(dir_inode, dentry, nd->flags); d_lookup_done(dentry); if (unlikely(res)) { if (IS_ERR(res)) { error = PTR_ERR(res); goto out_dput; } dput(dentry); dentry = res; } } /* Negative dentry, just create the file */ if (!dentry->d_inode && (open_flag & O_CREAT)) { *opened |= FILE_CREATED; audit_inode_child(dir_inode, dentry, AUDIT_TYPE_CHILD_CREATE); if (!dir_inode->i_op->create) { error = -EACCES; goto out_dput; } error = dir_inode->i_op->create(dir_inode, dentry, mode, open_flag & O_EXCL); if (error) goto out_dput; fsnotify_create(dir_inode, dentry); } if (unlikely(create_error) && !dentry->d_inode) { error = create_error; goto out_dput; } out_no_open: path->dentry = dentry; path->mnt = nd->path.mnt; return 1; out_dput: dput(dentry); return error; } /* * Handle the last step of open() */ static int do_last(struct nameidata *nd, struct file *file, const struct open_flags *op, int *opened) { struct dentry *dir = nd->path.dentry; int open_flag = op->open_flag; bool will_truncate = (open_flag & O_TRUNC) != 0; bool got_write = false; int acc_mode = op->acc_mode; unsigned seq; struct inode *inode; struct path path; int error; nd->flags &= ~LOOKUP_PARENT; nd->flags |= op->intent; if (nd->last_type != LAST_NORM) { error = handle_dots(nd, nd->last_type); if (unlikely(error)) return error; goto finish_open; } if (!(open_flag & O_CREAT)) { if (nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; /* we _can_ be in RCU mode here */ error = lookup_fast(nd, &path, &inode, &seq); if (likely(error > 0)) goto finish_lookup; if (error < 0) return error; BUG_ON(nd->inode != dir->d_inode); BUG_ON(nd->flags & LOOKUP_RCU); } else { /* create side of things */ /* * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED * has been cleared when we got to the last component we are * about to look up */ error = complete_walk(nd); if (error) return error; audit_inode(nd->name, dir, LOOKUP_PARENT); /* trailing slashes? */ if (unlikely(nd->last.name[nd->last.len])) return -EISDIR; } if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) { error = mnt_want_write(nd->path.mnt); if (!error) got_write = true; /* * do _not_ fail yet - we might not need that or fail with * a different error; let lookup_open() decide; we'll be * dropping this one anyway. */ } if (open_flag & O_CREAT) inode_lock(dir->d_inode); else inode_lock_shared(dir->d_inode); error = lookup_open(nd, &path, file, op, got_write, opened); if (open_flag & O_CREAT) inode_unlock(dir->d_inode); else inode_unlock_shared(dir->d_inode); if (error <= 0) { if (error) goto out; if ((*opened & FILE_CREATED) || !S_ISREG(file_inode(file)->i_mode)) will_truncate = false; audit_inode(nd->name, file->f_path.dentry, 0); goto opened; } if (*opened & FILE_CREATED) { /* Don't check for write permission, don't truncate */ open_flag &= ~O_TRUNC; will_truncate = false; acc_mode = 0; path_to_nameidata(&path, nd); goto finish_open_created; } /* * If atomic_open() acquired write access it is dropped now due to * possible mount and symlink following (this might be optimized away if * necessary...) */ if (got_write) { mnt_drop_write(nd->path.mnt); got_write = false; } error = follow_managed(&path, nd); if (unlikely(error < 0)) return error; if (unlikely(d_is_negative(path.dentry))) { path_to_nameidata(&path, nd); return -ENOENT; } /* * create/update audit record if it already exists. */ audit_inode(nd->name, path.dentry, 0); if (unlikely((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))) { path_to_nameidata(&path, nd); return -EEXIST; } seq = 0; /* out of RCU mode, so the value doesn't matter */ inode = d_backing_inode(path.dentry); finish_lookup: error = step_into(nd, &path, 0, inode, seq); if (unlikely(error)) return error; finish_open: /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ error = complete_walk(nd); if (error) return error; audit_inode(nd->name, nd->path.dentry, 0); error = -EISDIR; if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry)) goto out; error = -ENOTDIR; if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry)) goto out; if (!d_is_reg(nd->path.dentry)) will_truncate = false; if (will_truncate) { error = mnt_want_write(nd->path.mnt); if (error) goto out; got_write = true; } finish_open_created: error = may_open(&nd->path, acc_mode, open_flag); if (error) goto out; BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ error = vfs_open(&nd->path, file, current_cred()); if (error) goto out; *opened |= FILE_OPENED; opened: error = open_check_o_direct(file); if (!error) error = ima_file_check(file, op->acc_mode, *opened); if (!error && will_truncate) error = handle_truncate(file); out: if (unlikely(error) && (*opened & FILE_OPENED)) fput(file); if (unlikely(error > 0)) { WARN_ON(1); error = -EINVAL; } if (got_write) mnt_drop_write(nd->path.mnt); return error; } struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag) { static const struct qstr name = QSTR_INIT("/", 1); struct dentry *child = NULL; struct inode *dir = dentry->d_inode; struct inode *inode; int error; /* we want directory to be writable */ error = inode_permission(dir, MAY_WRITE | MAY_EXEC); if (error) goto out_err; error = -EOPNOTSUPP; if (!dir->i_op->tmpfile) goto out_err; error = -ENOMEM; child = d_alloc(dentry, &name); if (unlikely(!child)) goto out_err; error = dir->i_op->tmpfile(dir, child, mode); if (error) goto out_err; error = -ENOENT; inode = child->d_inode; if (unlikely(!inode)) goto out_err; if (!(open_flag & O_EXCL)) { spin_lock(&inode->i_lock); inode->i_state |= I_LINKABLE; spin_unlock(&inode->i_lock); } return child; out_err: dput(child); return ERR_PTR(error); } EXPORT_SYMBOL(vfs_tmpfile); static int do_tmpfile(struct nameidata *nd, unsigned flags, const struct open_flags *op, struct file *file, int *opened) { struct dentry *child; struct path path; int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path); if (unlikely(error)) return error; error = mnt_want_write(path.mnt); if (unlikely(error)) goto out; child = vfs_tmpfile(path.dentry, op->mode, op->open_flag); error = PTR_ERR(child); if (unlikely(IS_ERR(child))) goto out2; dput(path.dentry); path.dentry = child; audit_inode(nd->name, child, 0); /* Don't check for other permissions, the inode was just created */ error = may_open(&path, 0, op->open_flag); if (error) goto out2; file->f_path.mnt = path.mnt; error = finish_open(file, child, NULL, opened); if (error) goto out2; error = open_check_o_direct(file); if (error) fput(file); out2: mnt_drop_write(path.mnt); out: path_put(&path); return error; } static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file) { struct path path; int error = path_lookupat(nd, flags, &path); if (!error) { audit_inode(nd->name, path.dentry, 0); error = vfs_open(&path, file, current_cred()); path_put(&path); } return error; } static struct file *path_openat(struct nameidata *nd, const struct open_flags *op, unsigned flags) { const char *s; struct file *file; int opened = 0; int error; file = get_empty_filp(); if (IS_ERR(file)) return file; file->f_flags = op->open_flag; if (unlikely(file->f_flags & __O_TMPFILE)) { error = do_tmpfile(nd, flags, op, file, &opened); goto out2; } if (unlikely(file->f_flags & O_PATH)) { error = do_o_path(nd, flags, file); if (!error) opened |= FILE_OPENED; goto out2; } s = path_init(nd, flags); if (IS_ERR(s)) { put_filp(file); return ERR_CAST(s); } while (!(error = link_path_walk(s, nd)) && (error = do_last(nd, file, op, &opened)) > 0) { nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL); s = trailing_symlink(nd); if (IS_ERR(s)) { error = PTR_ERR(s); break; } } terminate_walk(nd); out2: if (!(opened & FILE_OPENED)) { BUG_ON(!error); put_filp(file); } if (unlikely(error)) { if (error == -EOPENSTALE) { if (flags & LOOKUP_RCU) error = -ECHILD; else error = -ESTALE; } file = ERR_PTR(error); } return file; } struct file *do_filp_open(int dfd, struct filename *pathname, const struct open_flags *op) { struct nameidata nd; int flags = op->lookup_flags; struct file *filp; set_nameidata(&nd, dfd, pathname); filp = path_openat(&nd, op, flags | LOOKUP_RCU); if (unlikely(filp == ERR_PTR(-ECHILD))) filp = path_openat(&nd, op, flags); if (unlikely(filp == ERR_PTR(-ESTALE))) filp = path_openat(&nd, op, flags | LOOKUP_REVAL); restore_nameidata(); return filp; } struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, const char *name, const struct open_flags *op) { struct nameidata nd; struct file *file; struct filename *filename; int flags = op->lookup_flags | LOOKUP_ROOT; nd.root.mnt = mnt; nd.root.dentry = dentry; if (d_is_symlink(dentry) && op->intent & LOOKUP_OPEN) return ERR_PTR(-ELOOP); filename = getname_kernel(name); if (IS_ERR(filename)) return ERR_CAST(filename); set_nameidata(&nd, -1, filename); file = path_openat(&nd, op, flags | LOOKUP_RCU); if (unlikely(file == ERR_PTR(-ECHILD))) file = path_openat(&nd, op, flags); if (unlikely(file == ERR_PTR(-ESTALE))) file = path_openat(&nd, op, flags | LOOKUP_REVAL); restore_nameidata(); putname(filename); return file; } static struct dentry *filename_create(int dfd, struct filename *name, struct path *path, unsigned int lookup_flags) { struct dentry *dentry = ERR_PTR(-EEXIST); struct qstr last; int type; int err2; int error; bool is_dir = (lookup_flags & LOOKUP_DIRECTORY); /* * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any * other flags passed in are ignored! */ lookup_flags &= LOOKUP_REVAL; name = filename_parentat(dfd, name, lookup_flags, path, &last, &type); if (IS_ERR(name)) return ERR_CAST(name); /* * Yucky last component or no last component at all? * (foo/., foo/.., /////) */ if (unlikely(type != LAST_NORM)) goto out; /* don't fail immediately if it's r/o, at least try to report other errors */ err2 = mnt_want_write(path->mnt); /* * Do the final lookup. */ lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL; inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT); dentry = __lookup_hash(&last, path->dentry, lookup_flags); if (IS_ERR(dentry)) goto unlock; error = -EEXIST; if (d_is_positive(dentry)) goto fail; /* * Special case - lookup gave negative, but... we had foo/bar/ * From the vfs_mknod() POV we just have a negative dentry - * all is fine. Let's be bastards - you had / on the end, you've * been asking for (non-existent) directory. -ENOENT for you. */ if (unlikely(!is_dir && last.name[last.len])) { error = -ENOENT; goto fail; } if (unlikely(err2)) { error = err2; goto fail; } putname(name); return dentry; fail: dput(dentry); dentry = ERR_PTR(error); unlock: inode_unlock(path->dentry->d_inode); if (!err2) mnt_drop_write(path->mnt); out: path_put(path); putname(name); return dentry; } struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, unsigned int lookup_flags) { return filename_create(dfd, getname_kernel(pathname), path, lookup_flags); } EXPORT_SYMBOL(kern_path_create); void done_path_create(struct path *path, struct dentry *dentry) { dput(dentry); inode_unlock(path->dentry->d_inode); mnt_drop_write(path->mnt); path_put(path); } EXPORT_SYMBOL(done_path_create); inline struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, unsigned int lookup_flags) { return filename_create(dfd, getname(pathname), path, lookup_flags); } EXPORT_SYMBOL(user_path_create); int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { int error = may_create(dir, dentry); if (error) return error; if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD)) return -EPERM; if (!dir->i_op->mknod) return -EPERM; error = devcgroup_inode_mknod(mode, dev); if (error) return error; error = security_inode_mknod(dir, dentry, mode, dev); if (error) return error; error = dir->i_op->mknod(dir, dentry, mode, dev); if (!error) fsnotify_create(dir, dentry); return error; } EXPORT_SYMBOL(vfs_mknod); static int may_mknod(umode_t mode) { switch (mode & S_IFMT) { case S_IFREG: case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: case 0: /* zero mode translates to S_IFREG */ return 0; case S_IFDIR: return -EPERM; default: return -EINVAL; } } SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev) { struct dentry *dentry; struct path path; int error; unsigned int lookup_flags = 0; error = may_mknod(mode); if (error) return error; retry: dentry = user_path_create(dfd, filename, &path, lookup_flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); error = security_path_mknod(&path, dentry, mode, dev); if (error) goto out; switch (mode & S_IFMT) { case 0: case S_IFREG: error = vfs_create(path.dentry->d_inode,dentry,mode,true); if (!error) ima_post_path_mknod(dentry); break; case S_IFCHR: case S_IFBLK: error = vfs_mknod(path.dentry->d_inode,dentry,mode, new_decode_dev(dev)); break; case S_IFIFO: case S_IFSOCK: error = vfs_mknod(path.dentry->d_inode,dentry,mode,0); break; } out: done_path_create(&path, dentry); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev) { return sys_mknodat(AT_FDCWD, filename, mode, dev); } int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { int error = may_create(dir, dentry); unsigned max_links = dir->i_sb->s_max_links; if (error) return error; if (!dir->i_op->mkdir) return -EPERM; mode &= (S_IRWXUGO|S_ISVTX); error = security_inode_mkdir(dir, dentry, mode); if (error) return error; if (max_links && dir->i_nlink >= max_links) return -EMLINK; error = dir->i_op->mkdir(dir, dentry, mode); if (!error) fsnotify_mkdir(dir, dentry); return error; } EXPORT_SYMBOL(vfs_mkdir); SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode) { struct dentry *dentry; struct path path; int error; unsigned int lookup_flags = LOOKUP_DIRECTORY; retry: dentry = user_path_create(dfd, pathname, &path, lookup_flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); error = security_path_mkdir(&path, dentry, mode); if (!error) error = vfs_mkdir(path.dentry->d_inode, dentry, mode); done_path_create(&path, dentry); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode) { return sys_mkdirat(AT_FDCWD, pathname, mode); } int vfs_rmdir(struct inode *dir, struct dentry *dentry) { int error = may_delete(dir, dentry, 1); if (error) return error; if (!dir->i_op->rmdir) return -EPERM; dget(dentry); inode_lock(dentry->d_inode); error = -EBUSY; if (is_local_mountpoint(dentry)) goto out; error = security_inode_rmdir(dir, dentry); if (error) goto out; shrink_dcache_parent(dentry); error = dir->i_op->rmdir(dir, dentry); if (error) goto out; dentry->d_inode->i_flags |= S_DEAD; dont_mount(dentry); detach_mounts(dentry); out: inode_unlock(dentry->d_inode); dput(dentry); if (!error) d_delete(dentry); return error; } EXPORT_SYMBOL(vfs_rmdir); static long do_rmdir(int dfd, const char __user *pathname) { int error = 0; struct filename *name; struct dentry *dentry; struct path path; struct qstr last; int type; unsigned int lookup_flags = 0; retry: name = filename_parentat(dfd, getname(pathname), lookup_flags, &path, &last, &type); if (IS_ERR(name)) return PTR_ERR(name); switch (type) { case LAST_DOTDOT: error = -ENOTEMPTY; goto exit1; case LAST_DOT: error = -EINVAL; goto exit1; case LAST_ROOT: error = -EBUSY; goto exit1; } error = mnt_want_write(path.mnt); if (error) goto exit1; inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT); dentry = __lookup_hash(&last, path.dentry, lookup_flags); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto exit2; if (!dentry->d_inode) { error = -ENOENT; goto exit3; } error = security_path_rmdir(&path, dentry); if (error) goto exit3; error = vfs_rmdir(path.dentry->d_inode, dentry); exit3: dput(dentry); exit2: inode_unlock(path.dentry->d_inode); mnt_drop_write(path.mnt); exit1: path_put(&path); putname(name); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE1(rmdir, const char __user *, pathname) { return do_rmdir(AT_FDCWD, pathname); } /** * vfs_unlink - unlink a filesystem object * @dir: parent directory * @dentry: victim * @delegated_inode: returns victim inode, if the inode is delegated. * * The caller must hold dir->i_mutex. * * If vfs_unlink discovers a delegation, it will return -EWOULDBLOCK and * return a reference to the inode in delegated_inode. The caller * should then break the delegation on that inode and retry. Because * breaking a delegation may take a long time, the caller should drop * dir->i_mutex before doing so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not * to be NFS exported. */ int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode) { struct inode *target = dentry->d_inode; int error = may_delete(dir, dentry, 0); if (error) return error; if (!dir->i_op->unlink) return -EPERM; inode_lock(target); if (is_local_mountpoint(dentry)) error = -EBUSY; else { error = security_inode_unlink(dir, dentry); if (!error) { error = try_break_deleg(target, delegated_inode); if (error) goto out; error = dir->i_op->unlink(dir, dentry); if (!error) { dont_mount(dentry); detach_mounts(dentry); } } } out: inode_unlock(target); /* We don't d_delete() NFS sillyrenamed files--they still exist. */ if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { fsnotify_link_count(target); d_delete(dentry); } return error; } EXPORT_SYMBOL(vfs_unlink); /* * Make sure that the actual truncation of the file will occur outside its * directory's i_mutex. Truncate can take a long time if there is a lot of * writeout happening, and we don't want to prevent access to the directory * while waiting on the I/O. */ static long do_unlinkat(int dfd, const char __user *pathname) { int error; struct filename *name; struct dentry *dentry; struct path path; struct qstr last; int type; struct inode *inode = NULL; struct inode *delegated_inode = NULL; unsigned int lookup_flags = 0; retry: name = filename_parentat(dfd, getname(pathname), lookup_flags, &path, &last, &type); if (IS_ERR(name)) return PTR_ERR(name); error = -EISDIR; if (type != LAST_NORM) goto exit1; error = mnt_want_write(path.mnt); if (error) goto exit1; retry_deleg: inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT); dentry = __lookup_hash(&last, path.dentry, lookup_flags); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { /* Why not before? Because we want correct error value */ if (last.name[last.len]) goto slashes; inode = dentry->d_inode; if (d_is_negative(dentry)) goto slashes; ihold(inode); error = security_path_unlink(&path, dentry); if (error) goto exit2; error = vfs_unlink(path.dentry->d_inode, dentry, &delegated_inode); exit2: dput(dentry); } inode_unlock(path.dentry->d_inode); if (inode) iput(inode); /* truncate the inode here */ inode = NULL; if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } mnt_drop_write(path.mnt); exit1: path_put(&path); putname(name); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; inode = NULL; goto retry; } return error; slashes: if (d_is_negative(dentry)) error = -ENOENT; else if (d_is_dir(dentry)) error = -EISDIR; else error = -ENOTDIR; goto exit2; } SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag) { if ((flag & ~AT_REMOVEDIR) != 0) return -EINVAL; if (flag & AT_REMOVEDIR) return do_rmdir(dfd, pathname); return do_unlinkat(dfd, pathname); } SYSCALL_DEFINE1(unlink, const char __user *, pathname) { return do_unlinkat(AT_FDCWD, pathname); } int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->symlink) return -EPERM; error = security_inode_symlink(dir, dentry, oldname); if (error) return error; error = dir->i_op->symlink(dir, dentry, oldname); if (!error) fsnotify_create(dir, dentry); return error; } EXPORT_SYMBOL(vfs_symlink); SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, int, newdfd, const char __user *, newname) { int error; struct filename *from; struct dentry *dentry; struct path path; unsigned int lookup_flags = 0; from = getname(oldname); if (IS_ERR(from)) return PTR_ERR(from); retry: dentry = user_path_create(newdfd, newname, &path, lookup_flags); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_putname; error = security_path_symlink(&path, dentry, from->name); if (!error) error = vfs_symlink(path.dentry->d_inode, dentry, from->name); done_path_create(&path, dentry); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out_putname: putname(from); return error; } SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname) { return sys_symlinkat(oldname, AT_FDCWD, newname); } /** * vfs_link - create a new link * @old_dentry: object to be linked * @dir: new parent * @new_dentry: where to create the new link * @delegated_inode: returns inode needing a delegation break * * The caller must hold dir->i_mutex * * If vfs_link discovers a delegation on the to-be-linked file in need * of breaking, it will return -EWOULDBLOCK and return a reference to the * inode in delegated_inode. The caller should then break the delegation * and retry. Because breaking a delegation may take a long time, the * caller should drop the i_mutex before doing so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not * to be NFS exported. */ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode) { struct inode *inode = old_dentry->d_inode; unsigned max_links = dir->i_sb->s_max_links; int error; if (!inode) return -ENOENT; error = may_create(dir, new_dentry); if (error) return error; if (dir->i_sb != inode->i_sb) return -EXDEV; /* * A link to an append-only or immutable file cannot be created. */ if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; /* * Updating the link count will likely cause i_uid and i_gid to * be writen back improperly if their true value is unknown to * the vfs. */ if (HAS_UNMAPPED_ID(inode)) return -EPERM; if (!dir->i_op->link) return -EPERM; if (S_ISDIR(inode->i_mode)) return -EPERM; error = security_inode_link(old_dentry, dir, new_dentry); if (error) return error; inode_lock(inode); /* Make sure we don't allow creating hardlink to an unlinked file */ if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE)) error = -ENOENT; else if (max_links && inode->i_nlink >= max_links) error = -EMLINK; else { error = try_break_deleg(inode, delegated_inode); if (!error) error = dir->i_op->link(old_dentry, dir, new_dentry); } if (!error && (inode->i_state & I_LINKABLE)) { spin_lock(&inode->i_lock); inode->i_state &= ~I_LINKABLE; spin_unlock(&inode->i_lock); } inode_unlock(inode); if (!error) fsnotify_link(dir, inode, new_dentry); return error; } EXPORT_SYMBOL(vfs_link); /* * Hardlinks are often used in delicate situations. We avoid * security-related surprises by not following symlinks on the * newname. --KAB * * We don't follow them on the oldname either to be compatible * with linux 2.0, and to avoid hard-linking to directories * and other special files. --ADM */ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, int, flags) { struct dentry *new_dentry; struct path old_path, new_path; struct inode *delegated_inode = NULL; int how = 0; int error; if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) return -EINVAL; /* * To use null names we require CAP_DAC_READ_SEARCH * This ensures that not everyone will be able to create * handlink using the passed filedescriptor. */ if (flags & AT_EMPTY_PATH) { if (!capable(CAP_DAC_READ_SEARCH)) return -ENOENT; how = LOOKUP_EMPTY; } if (flags & AT_SYMLINK_FOLLOW) how |= LOOKUP_FOLLOW; retry: error = user_path_at(olddfd, oldname, how, &old_path); if (error) return error; new_dentry = user_path_create(newdfd, newname, &new_path, (how & LOOKUP_REVAL)); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto out; error = -EXDEV; if (old_path.mnt != new_path.mnt) goto out_dput; error = may_linkat(&old_path); if (unlikely(error)) goto out_dput; error = security_path_link(old_path.dentry, &new_path, new_dentry); if (error) goto out_dput; error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode); out_dput: done_path_create(&new_path, new_dentry); if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) { path_put(&old_path); goto retry; } } if (retry_estale(error, how)) { path_put(&old_path); how |= LOOKUP_REVAL; goto retry; } out: path_put(&old_path); return error; } SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname) { return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0); } /** * vfs_rename - rename a filesystem object * @old_dir: parent of source * @old_dentry: source * @new_dir: parent of destination * @new_dentry: destination * @delegated_inode: returns an inode needing a delegation break * @flags: rename flags * * The caller must hold multiple mutexes--see lock_rename()). * * If vfs_rename discovers a delegation in need of breaking at either * the source or destination, it will return -EWOULDBLOCK and return a * reference to the inode in delegated_inode. The caller should then * break the delegation and retry. Because breaking a delegation may * take a long time, the caller should drop all locks before doing * so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not * to be NFS exported. * * The worst of all namespace operations - renaming directory. "Perverted" * doesn't even start to describe it. Somebody in UCB had a heck of a trip... * Problems: * a) we can get into loop creation. * b) race potential - two innocent renames can create a loop together. * That's where 4.4 screws up. Current fix: serialization on * sb->s_vfs_rename_mutex. We might be more accurate, but that's another * story. * c) we have to lock _four_ objects - parents and victim (if it exists), * and source (if it is not a directory). * And that - after we got ->i_mutex on parents (until then we don't know * whether the target exists). Solution: try to be smart with locking * order for inodes. We rely on the fact that tree topology may change * only under ->s_vfs_rename_mutex _and_ that parent of the object we * move will be locked. Thus we can rank directories by the tree * (ancestors first) and rank all non-directories after them. * That works since everybody except rename does "lock parent, lookup, * lock child" and rename is under ->s_vfs_rename_mutex. * HOWEVER, it relies on the assumption that any object with ->lookup() * has no more than 1 dentry. If "hybrid" objects will ever appear, * we'd better make sure that there's no link(2) for them. * d) conversion from fhandle to dentry may come in the wrong moment - when * we are removing the target. Solution: we will have to grab ->i_mutex * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on * ->i_mutex on parents, which works but leads to some truly excessive * locking]. */ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, struct inode **delegated_inode, unsigned int flags) { int error; bool is_dir = d_is_dir(old_dentry); const unsigned char *old_name; struct inode *source = old_dentry->d_inode; struct inode *target = new_dentry->d_inode; bool new_is_dir = false; unsigned max_links = new_dir->i_sb->s_max_links; if (source == target) return 0; error = may_delete(old_dir, old_dentry, is_dir); if (error) return error; if (!target) { error = may_create(new_dir, new_dentry); } else { new_is_dir = d_is_dir(new_dentry); if (!(flags & RENAME_EXCHANGE)) error = may_delete(new_dir, new_dentry, is_dir); else error = may_delete(new_dir, new_dentry, new_is_dir); } if (error) return error; if (!old_dir->i_op->rename) return -EPERM; /* * If we are going to change the parent - check write permissions, * we'll need to flip '..'. */ if (new_dir != old_dir) { if (is_dir) { error = inode_permission(source, MAY_WRITE); if (error) return error; } if ((flags & RENAME_EXCHANGE) && new_is_dir) { error = inode_permission(target, MAY_WRITE); if (error) return error; } } error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry, flags); if (error) return error; old_name = fsnotify_oldname_init(old_dentry->d_name.name); dget(new_dentry); if (!is_dir || (flags & RENAME_EXCHANGE)) lock_two_nondirectories(source, target); else if (target) inode_lock(target); error = -EBUSY; if (is_local_mountpoint(old_dentry) || is_local_mountpoint(new_dentry)) goto out; if (max_links && new_dir != old_dir) { error = -EMLINK; if (is_dir && !new_is_dir && new_dir->i_nlink >= max_links) goto out; if ((flags & RENAME_EXCHANGE) && !is_dir && new_is_dir && old_dir->i_nlink >= max_links) goto out; } if (is_dir && !(flags & RENAME_EXCHANGE) && target) shrink_dcache_parent(new_dentry); if (!is_dir) { error = try_break_deleg(source, delegated_inode); if (error) goto out; } if (target && !new_is_dir) { error = try_break_deleg(target, delegated_inode); if (error) goto out; } error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry, flags); if (error) goto out; if (!(flags & RENAME_EXCHANGE) && target) { if (is_dir) target->i_flags |= S_DEAD; dont_mount(new_dentry); detach_mounts(new_dentry); } if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) { if (!(flags & RENAME_EXCHANGE)) d_move(old_dentry, new_dentry); else d_exchange(old_dentry, new_dentry); } out: if (!is_dir || (flags & RENAME_EXCHANGE)) unlock_two_nondirectories(source, target); else if (target) inode_unlock(target); dput(new_dentry); if (!error) { fsnotify_move(old_dir, new_dir, old_name, is_dir, !(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry); if (flags & RENAME_EXCHANGE) { fsnotify_move(new_dir, old_dir, old_dentry->d_name.name, new_is_dir, NULL, new_dentry); } } fsnotify_oldname_free(old_name); return error; } EXPORT_SYMBOL(vfs_rename); SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags) { struct dentry *old_dentry, *new_dentry; struct dentry *trap; struct path old_path, new_path; struct qstr old_last, new_last; int old_type, new_type; struct inode *delegated_inode = NULL; struct filename *from; struct filename *to; unsigned int lookup_flags = 0, target_flags = LOOKUP_RENAME_TARGET; bool should_retry = false; int error; if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) return -EINVAL; if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) && (flags & RENAME_EXCHANGE)) return -EINVAL; if ((flags & RENAME_WHITEOUT) && !capable(CAP_MKNOD)) return -EPERM; if (flags & RENAME_EXCHANGE) target_flags = 0; retry: from = filename_parentat(olddfd, getname(oldname), lookup_flags, &old_path, &old_last, &old_type); if (IS_ERR(from)) { error = PTR_ERR(from); goto exit; } to = filename_parentat(newdfd, getname(newname), lookup_flags, &new_path, &new_last, &new_type); if (IS_ERR(to)) { error = PTR_ERR(to); goto exit1; } error = -EXDEV; if (old_path.mnt != new_path.mnt) goto exit2; error = -EBUSY; if (old_type != LAST_NORM) goto exit2; if (flags & RENAME_NOREPLACE) error = -EEXIST; if (new_type != LAST_NORM) goto exit2; error = mnt_want_write(old_path.mnt); if (error) goto exit2; retry_deleg: trap = lock_rename(new_path.dentry, old_path.dentry); old_dentry = __lookup_hash(&old_last, old_path.dentry, lookup_flags); error = PTR_ERR(old_dentry); if (IS_ERR(old_dentry)) goto exit3; /* source must exist */ error = -ENOENT; if (d_is_negative(old_dentry)) goto exit4; new_dentry = __lookup_hash(&new_last, new_path.dentry, lookup_flags | target_flags); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto exit4; error = -EEXIST; if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) goto exit5; if (flags & RENAME_EXCHANGE) { error = -ENOENT; if (d_is_negative(new_dentry)) goto exit5; if (!d_is_dir(new_dentry)) { error = -ENOTDIR; if (new_last.name[new_last.len]) goto exit5; } } /* unless the source is a directory trailing slashes give -ENOTDIR */ if (!d_is_dir(old_dentry)) { error = -ENOTDIR; if (old_last.name[old_last.len]) goto exit5; if (!(flags & RENAME_EXCHANGE) && new_last.name[new_last.len]) goto exit5; } /* source should not be ancestor of target */ error = -EINVAL; if (old_dentry == trap) goto exit5; /* target should not be an ancestor of source */ if (!(flags & RENAME_EXCHANGE)) error = -ENOTEMPTY; if (new_dentry == trap) goto exit5; error = security_path_rename(&old_path, old_dentry, &new_path, new_dentry, flags); if (error) goto exit5; error = vfs_rename(old_path.dentry->d_inode, old_dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode, flags); exit5: dput(new_dentry); exit4: dput(old_dentry); exit3: unlock_rename(new_path.dentry, old_path.dentry); if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } mnt_drop_write(old_path.mnt); exit2: if (retry_estale(error, lookup_flags)) should_retry = true; path_put(&new_path); putname(to); exit1: path_put(&old_path); putname(from); if (should_retry) { should_retry = false; lookup_flags |= LOOKUP_REVAL; goto retry; } exit: return error; } SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname) { return sys_renameat2(olddfd, oldname, newdfd, newname, 0); } SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname) { return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0); } int vfs_whiteout(struct inode *dir, struct dentry *dentry) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->mknod) return -EPERM; return dir->i_op->mknod(dir, dentry, S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); } EXPORT_SYMBOL(vfs_whiteout); int readlink_copy(char __user *buffer, int buflen, const char *link) { int len = PTR_ERR(link); if (IS_ERR(link)) goto out; len = strlen(link); if (len > (unsigned) buflen) len = buflen; if (copy_to_user(buffer, link, len)) len = -EFAULT; out: return len; } /* * A helper for ->readlink(). This should be used *ONLY* for symlinks that * have ->get_link() not calling nd_jump_link(). Using (or not using) it * for any given inode is up to filesystem. */ static int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen) { DEFINE_DELAYED_CALL(done); struct inode *inode = d_inode(dentry); const char *link = inode->i_link; int res; if (!link) { link = inode->i_op->get_link(dentry, inode, &done); if (IS_ERR(link)) return PTR_ERR(link); } res = readlink_copy(buffer, buflen, link); do_delayed_call(&done); return res; } /** * vfs_readlink - copy symlink body into userspace buffer * @dentry: dentry on which to get symbolic link * @buffer: user memory pointer * @buflen: size of buffer * * Does not touch atime. That's up to the caller if necessary * * Does not call security hook. */ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen) { struct inode *inode = d_inode(dentry); if (unlikely(!(inode->i_opflags & IOP_DEFAULT_READLINK))) { if (unlikely(inode->i_op->readlink)) return inode->i_op->readlink(dentry, buffer, buflen); if (!d_is_symlink(dentry)) return -EINVAL; spin_lock(&inode->i_lock); inode->i_opflags |= IOP_DEFAULT_READLINK; spin_unlock(&inode->i_lock); } return generic_readlink(dentry, buffer, buflen); } EXPORT_SYMBOL(vfs_readlink); /** * vfs_get_link - get symlink body * @dentry: dentry on which to get symbolic link * @done: caller needs to free returned data with this * * Calls security hook and i_op->get_link() on the supplied inode. * * It does not touch atime. That's up to the caller if necessary. * * Does not work on "special" symlinks like /proc/$$/fd/N */ const char *vfs_get_link(struct dentry *dentry, struct delayed_call *done) { const char *res = ERR_PTR(-EINVAL); struct inode *inode = d_inode(dentry); if (d_is_symlink(dentry)) { res = ERR_PTR(security_inode_readlink(dentry)); if (!res) res = inode->i_op->get_link(dentry, inode, done); } return res; } EXPORT_SYMBOL(vfs_get_link); /* get the link contents into pagecache */ const char *page_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *callback) { char *kaddr; struct page *page; struct address_space *mapping = inode->i_mapping; if (!dentry) { page = find_get_page(mapping, 0); if (!page) return ERR_PTR(-ECHILD); if (!PageUptodate(page)) { put_page(page); return ERR_PTR(-ECHILD); } } else { page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) return (char*)page; } set_delayed_call(callback, page_put_link, page); BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM); kaddr = page_address(page); nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1); return kaddr; } EXPORT_SYMBOL(page_get_link); void page_put_link(void *arg) { put_page(arg); } EXPORT_SYMBOL(page_put_link); int page_readlink(struct dentry *dentry, char __user *buffer, int buflen) { DEFINE_DELAYED_CALL(done); int res = readlink_copy(buffer, buflen, page_get_link(dentry, d_inode(dentry), &done)); do_delayed_call(&done); return res; } EXPORT_SYMBOL(page_readlink); /* * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS */ int __page_symlink(struct inode *inode, const char *symname, int len, int nofs) { struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; int err; unsigned int flags = 0; if (nofs) flags |= AOP_FLAG_NOFS; retry: err = pagecache_write_begin(NULL, mapping, 0, len-1, flags, &page, &fsdata); if (err) goto fail; memcpy(page_address(page), symname, len-1); err = pagecache_write_end(NULL, mapping, 0, len-1, len-1, page, fsdata); if (err < 0) goto fail; if (err < len-1) goto retry; mark_inode_dirty(inode); return 0; fail: return err; } EXPORT_SYMBOL(__page_symlink); int page_symlink(struct inode *inode, const char *symname, int len) { return __page_symlink(inode, symname, len, !mapping_gfp_constraint(inode->i_mapping, __GFP_FS)); } EXPORT_SYMBOL(page_symlink); const struct inode_operations page_symlink_inode_operations = { .get_link = page_get_link, }; EXPORT_SYMBOL(page_symlink_inode_operations);
./CrossVul/dataset_final_sorted/CWE-362/c/bad_3275_2
crossvul-cpp_data_good_677_1
/* * Interrupt descriptor table related code * * This file is licensed under the GPL V2 */ #include <linux/interrupt.h> #include <asm/traps.h> #include <asm/proto.h> #include <asm/desc.h> struct idt_data { unsigned int vector; unsigned int segment; struct idt_bits bits; const void *addr; }; #define DPL0 0x0 #define DPL3 0x3 #define DEFAULT_STACK 0 #define G(_vector, _addr, _ist, _type, _dpl, _segment) \ { \ .vector = _vector, \ .bits.ist = _ist, \ .bits.type = _type, \ .bits.dpl = _dpl, \ .bits.p = 1, \ .addr = _addr, \ .segment = _segment, \ } /* Interrupt gate */ #define INTG(_vector, _addr) \ G(_vector, _addr, DEFAULT_STACK, GATE_INTERRUPT, DPL0, __KERNEL_CS) /* System interrupt gate */ #define SYSG(_vector, _addr) \ G(_vector, _addr, DEFAULT_STACK, GATE_INTERRUPT, DPL3, __KERNEL_CS) /* Interrupt gate with interrupt stack */ #define ISTG(_vector, _addr, _ist) \ G(_vector, _addr, _ist, GATE_INTERRUPT, DPL0, __KERNEL_CS) /* System interrupt gate with interrupt stack */ #define SISTG(_vector, _addr, _ist) \ G(_vector, _addr, _ist, GATE_INTERRUPT, DPL3, __KERNEL_CS) /* Task gate */ #define TSKG(_vector, _gdt) \ G(_vector, NULL, DEFAULT_STACK, GATE_TASK, DPL0, _gdt << 3) /* * Early traps running on the DEFAULT_STACK because the other interrupt * stacks work only after cpu_init(). */ static const __initconst struct idt_data early_idts[] = { INTG(X86_TRAP_DB, debug), SYSG(X86_TRAP_BP, int3), #ifdef CONFIG_X86_32 INTG(X86_TRAP_PF, page_fault), #endif }; /* * The default IDT entries which are set up in trap_init() before * cpu_init() is invoked. Interrupt stacks cannot be used at that point and * the traps which use them are reinitialized with IST after cpu_init() has * set up TSS. */ static const __initconst struct idt_data def_idts[] = { INTG(X86_TRAP_DE, divide_error), INTG(X86_TRAP_NMI, nmi), INTG(X86_TRAP_BR, bounds), INTG(X86_TRAP_UD, invalid_op), INTG(X86_TRAP_NM, device_not_available), INTG(X86_TRAP_OLD_MF, coprocessor_segment_overrun), INTG(X86_TRAP_TS, invalid_TSS), INTG(X86_TRAP_NP, segment_not_present), INTG(X86_TRAP_SS, stack_segment), INTG(X86_TRAP_GP, general_protection), INTG(X86_TRAP_SPURIOUS, spurious_interrupt_bug), INTG(X86_TRAP_MF, coprocessor_error), INTG(X86_TRAP_AC, alignment_check), INTG(X86_TRAP_XF, simd_coprocessor_error), #ifdef CONFIG_X86_32 TSKG(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS), #else INTG(X86_TRAP_DF, double_fault), #endif INTG(X86_TRAP_DB, debug), #ifdef CONFIG_X86_MCE INTG(X86_TRAP_MC, &machine_check), #endif SYSG(X86_TRAP_OF, overflow), #if defined(CONFIG_IA32_EMULATION) SYSG(IA32_SYSCALL_VECTOR, entry_INT80_compat), #elif defined(CONFIG_X86_32) SYSG(IA32_SYSCALL_VECTOR, entry_INT80_32), #endif }; /* * The APIC and SMP idt entries */ static const __initconst struct idt_data apic_idts[] = { #ifdef CONFIG_SMP INTG(RESCHEDULE_VECTOR, reschedule_interrupt), INTG(CALL_FUNCTION_VECTOR, call_function_interrupt), INTG(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt), INTG(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt), INTG(REBOOT_VECTOR, reboot_interrupt), #endif #ifdef CONFIG_X86_THERMAL_VECTOR INTG(THERMAL_APIC_VECTOR, thermal_interrupt), #endif #ifdef CONFIG_X86_MCE_THRESHOLD INTG(THRESHOLD_APIC_VECTOR, threshold_interrupt), #endif #ifdef CONFIG_X86_MCE_AMD INTG(DEFERRED_ERROR_VECTOR, deferred_error_interrupt), #endif #ifdef CONFIG_X86_LOCAL_APIC INTG(LOCAL_TIMER_VECTOR, apic_timer_interrupt), INTG(X86_PLATFORM_IPI_VECTOR, x86_platform_ipi), # ifdef CONFIG_HAVE_KVM INTG(POSTED_INTR_VECTOR, kvm_posted_intr_ipi), INTG(POSTED_INTR_WAKEUP_VECTOR, kvm_posted_intr_wakeup_ipi), INTG(POSTED_INTR_NESTED_VECTOR, kvm_posted_intr_nested_ipi), # endif # ifdef CONFIG_IRQ_WORK INTG(IRQ_WORK_VECTOR, irq_work_interrupt), # endif INTG(SPURIOUS_APIC_VECTOR, spurious_interrupt), INTG(ERROR_APIC_VECTOR, error_interrupt), #endif }; #ifdef CONFIG_X86_64 /* * Early traps running on the DEFAULT_STACK because the other interrupt * stacks work only after cpu_init(). */ static const __initconst struct idt_data early_pf_idts[] = { INTG(X86_TRAP_PF, page_fault), }; /* * Override for the debug_idt. Same as the default, but with interrupt * stack set to DEFAULT_STACK (0). Required for NMI trap handling. */ static const __initconst struct idt_data dbg_idts[] = { INTG(X86_TRAP_DB, debug), }; #endif /* Must be page-aligned because the real IDT is used in a fixmap. */ gate_desc idt_table[IDT_ENTRIES] __page_aligned_bss; struct desc_ptr idt_descr __ro_after_init = { .size = (IDT_ENTRIES * 2 * sizeof(unsigned long)) - 1, .address = (unsigned long) idt_table, }; #ifdef CONFIG_X86_64 /* No need to be aligned, but done to keep all IDTs defined the same way. */ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss; /* * The exceptions which use Interrupt stacks. They are setup after * cpu_init() when the TSS has been initialized. */ static const __initconst struct idt_data ist_idts[] = { ISTG(X86_TRAP_DB, debug, DEBUG_STACK), ISTG(X86_TRAP_NMI, nmi, NMI_STACK), ISTG(X86_TRAP_DF, double_fault, DOUBLEFAULT_STACK), #ifdef CONFIG_X86_MCE ISTG(X86_TRAP_MC, &machine_check, MCE_STACK), #endif }; /* * Override for the debug_idt. Same as the default, but with interrupt * stack set to DEFAULT_STACK (0). Required for NMI trap handling. */ const struct desc_ptr debug_idt_descr = { .size = IDT_ENTRIES * 16 - 1, .address = (unsigned long) debug_idt_table, }; #endif static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d) { unsigned long addr = (unsigned long) d->addr; gate->offset_low = (u16) addr; gate->segment = (u16) d->segment; gate->bits = d->bits; gate->offset_middle = (u16) (addr >> 16); #ifdef CONFIG_X86_64 gate->offset_high = (u32) (addr >> 32); gate->reserved = 0; #endif } static void idt_setup_from_table(gate_desc *idt, const struct idt_data *t, int size, bool sys) { gate_desc desc; for (; size > 0; t++, size--) { idt_init_desc(&desc, t); write_idt_entry(idt, t->vector, &desc); if (sys) set_bit(t->vector, system_vectors); } } static void set_intr_gate(unsigned int n, const void *addr) { struct idt_data data; BUG_ON(n > 0xFF); memset(&data, 0, sizeof(data)); data.vector = n; data.addr = addr; data.segment = __KERNEL_CS; data.bits.type = GATE_INTERRUPT; data.bits.p = 1; idt_setup_from_table(idt_table, &data, 1, false); } /** * idt_setup_early_traps - Initialize the idt table with early traps * * On X8664 these traps do not use interrupt stacks as they can't work * before cpu_init() is invoked and sets up TSS. The IST variants are * installed after that. */ void __init idt_setup_early_traps(void) { idt_setup_from_table(idt_table, early_idts, ARRAY_SIZE(early_idts), true); load_idt(&idt_descr); } /** * idt_setup_traps - Initialize the idt table with default traps */ void __init idt_setup_traps(void) { idt_setup_from_table(idt_table, def_idts, ARRAY_SIZE(def_idts), true); } #ifdef CONFIG_X86_64 /** * idt_setup_early_pf - Initialize the idt table with early pagefault handler * * On X8664 this does not use interrupt stacks as they can't work before * cpu_init() is invoked and sets up TSS. The IST variant is installed * after that. * * FIXME: Why is 32bit and 64bit installing the PF handler at different * places in the early setup code? */ void __init idt_setup_early_pf(void) { idt_setup_from_table(idt_table, early_pf_idts, ARRAY_SIZE(early_pf_idts), true); } /** * idt_setup_ist_traps - Initialize the idt table with traps using IST */ void __init idt_setup_ist_traps(void) { idt_setup_from_table(idt_table, ist_idts, ARRAY_SIZE(ist_idts), true); } /** * idt_setup_debugidt_traps - Initialize the debug idt table with debug traps */ void __init idt_setup_debugidt_traps(void) { memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16); idt_setup_from_table(debug_idt_table, dbg_idts, ARRAY_SIZE(dbg_idts), false); } #endif /** * idt_setup_apic_and_irq_gates - Setup APIC/SMP and normal interrupt gates */ void __init idt_setup_apic_and_irq_gates(void) { int i = FIRST_EXTERNAL_VECTOR; void *entry; idt_setup_from_table(idt_table, apic_idts, ARRAY_SIZE(apic_idts), true); for_each_clear_bit_from(i, system_vectors, FIRST_SYSTEM_VECTOR) { entry = irq_entries_start + 8 * (i - FIRST_EXTERNAL_VECTOR); set_intr_gate(i, entry); } for_each_clear_bit_from(i, system_vectors, NR_VECTORS) { #ifdef CONFIG_X86_LOCAL_APIC set_bit(i, system_vectors); set_intr_gate(i, spurious_interrupt); #else entry = irq_entries_start + 8 * (i - FIRST_EXTERNAL_VECTOR); set_intr_gate(i, entry); #endif } } /** * idt_setup_early_handler - Initializes the idt table with early handlers */ void __init idt_setup_early_handler(void) { int i; for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) set_intr_gate(i, early_idt_handler_array[i]); #ifdef CONFIG_X86_32 for ( ; i < NR_VECTORS; i++) set_intr_gate(i, early_ignore_irq); #endif load_idt(&idt_descr); } /** * idt_invalidate - Invalidate interrupt descriptor table * @addr: The virtual address of the 'invalid' IDT */ void idt_invalidate(void *addr) { struct desc_ptr idt = { .address = (unsigned long) addr, .size = 0 }; load_idt(&idt); } void __init update_intr_gate(unsigned int n, const void *addr) { if (WARN_ON_ONCE(!test_bit(n, system_vectors))) return; set_intr_gate(n, addr); } void alloc_intr_gate(unsigned int n, const void *addr) { BUG_ON(n < FIRST_SYSTEM_VECTOR); if (!test_and_set_bit(n, system_vectors)) set_intr_gate(n, addr); }
./CrossVul/dataset_final_sorted/CWE-362/c/good_677_1
crossvul-cpp_data_bad_4963_0
/* * Timers abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/module.h> #include <linux/string.h> #include <sound/core.h> #include <sound/timer.h> #include <sound/control.h> #include <sound/info.h> #include <sound/minors.h> #include <sound/initval.h> #include <linux/kmod.h> #if IS_ENABLED(CONFIG_SND_HRTIMER) #define DEFAULT_TIMER_LIMIT 4 #elif IS_ENABLED(CONFIG_SND_RTCTIMER) #define DEFAULT_TIMER_LIMIT 2 #else #define DEFAULT_TIMER_LIMIT 1 #endif static int timer_limit = DEFAULT_TIMER_LIMIT; static int timer_tstamp_monotonic = 1; MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ALSA timer interface"); MODULE_LICENSE("GPL"); module_param(timer_limit, int, 0444); MODULE_PARM_DESC(timer_limit, "Maximum global timers in system."); module_param(timer_tstamp_monotonic, int, 0444); MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default)."); MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER); MODULE_ALIAS("devname:snd/timer"); struct snd_timer_user { struct snd_timer_instance *timeri; int tread; /* enhanced read with timestamps and events */ unsigned long ticks; unsigned long overrun; int qhead; int qtail; int qused; int queue_size; struct snd_timer_read *queue; struct snd_timer_tread *tqueue; spinlock_t qlock; unsigned long last_resolution; unsigned int filter; struct timespec tstamp; /* trigger tstamp */ wait_queue_head_t qchange_sleep; struct fasync_struct *fasync; struct mutex tread_sem; }; /* list of timers */ static LIST_HEAD(snd_timer_list); /* list of slave instances */ static LIST_HEAD(snd_timer_slave_list); /* lock for slave active lists */ static DEFINE_SPINLOCK(slave_active_lock); static DEFINE_MUTEX(register_mutex); static int snd_timer_free(struct snd_timer *timer); static int snd_timer_dev_free(struct snd_device *device); static int snd_timer_dev_register(struct snd_device *device); static int snd_timer_dev_disconnect(struct snd_device *device); static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left); /* * create a timer instance with the given owner string. * when timer is not NULL, increments the module counter */ static struct snd_timer_instance *snd_timer_instance_new(char *owner, struct snd_timer *timer) { struct snd_timer_instance *timeri; timeri = kzalloc(sizeof(*timeri), GFP_KERNEL); if (timeri == NULL) return NULL; timeri->owner = kstrdup(owner, GFP_KERNEL); if (! timeri->owner) { kfree(timeri); return NULL; } INIT_LIST_HEAD(&timeri->open_list); INIT_LIST_HEAD(&timeri->active_list); INIT_LIST_HEAD(&timeri->ack_list); INIT_LIST_HEAD(&timeri->slave_list_head); INIT_LIST_HEAD(&timeri->slave_active_head); timeri->timer = timer; if (timer && !try_module_get(timer->module)) { kfree(timeri->owner); kfree(timeri); return NULL; } return timeri; } /* * find a timer instance from the given timer id */ static struct snd_timer *snd_timer_find(struct snd_timer_id *tid) { struct snd_timer *timer = NULL; list_for_each_entry(timer, &snd_timer_list, device_list) { if (timer->tmr_class != tid->dev_class) continue; if ((timer->tmr_class == SNDRV_TIMER_CLASS_CARD || timer->tmr_class == SNDRV_TIMER_CLASS_PCM) && (timer->card == NULL || timer->card->number != tid->card)) continue; if (timer->tmr_device != tid->device) continue; if (timer->tmr_subdevice != tid->subdevice) continue; return timer; } return NULL; } #ifdef CONFIG_MODULES static void snd_timer_request(struct snd_timer_id *tid) { switch (tid->dev_class) { case SNDRV_TIMER_CLASS_GLOBAL: if (tid->device < timer_limit) request_module("snd-timer-%i", tid->device); break; case SNDRV_TIMER_CLASS_CARD: case SNDRV_TIMER_CLASS_PCM: if (tid->card < snd_ecards_limit) request_module("snd-card-%i", tid->card); break; default: break; } } #endif /* * look for a master instance matching with the slave id of the given slave. * when found, relink the open_link of the slave. * * call this with register_mutex down. */ static void snd_timer_check_slave(struct snd_timer_instance *slave) { struct snd_timer *timer; struct snd_timer_instance *master; /* FIXME: it's really dumb to look up all entries.. */ list_for_each_entry(timer, &snd_timer_list, device_list) { list_for_each_entry(master, &timer->open_list_head, open_list) { if (slave->slave_class == master->slave_class && slave->slave_id == master->slave_id) { list_move_tail(&slave->open_list, &master->slave_list_head); spin_lock_irq(&slave_active_lock); slave->master = master; slave->timer = master->timer; spin_unlock_irq(&slave_active_lock); return; } } } } /* * look for slave instances matching with the slave id of the given master. * when found, relink the open_link of slaves. * * call this with register_mutex down. */ static void snd_timer_check_master(struct snd_timer_instance *master) { struct snd_timer_instance *slave, *tmp; /* check all pending slaves */ list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) { if (slave->slave_class == master->slave_class && slave->slave_id == master->slave_id) { list_move_tail(&slave->open_list, &master->slave_list_head); spin_lock_irq(&slave_active_lock); slave->master = master; slave->timer = master->timer; if (slave->flags & SNDRV_TIMER_IFLG_RUNNING) list_add_tail(&slave->active_list, &master->slave_active_head); spin_unlock_irq(&slave_active_lock); } } } /* * open a timer instance * when opening a master, the slave id must be here given. */ int snd_timer_open(struct snd_timer_instance **ti, char *owner, struct snd_timer_id *tid, unsigned int slave_id) { struct snd_timer *timer; struct snd_timer_instance *timeri = NULL; if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) { /* open a slave instance */ if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE || tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) { pr_debug("ALSA: timer: invalid slave class %i\n", tid->dev_sclass); return -EINVAL; } mutex_lock(&register_mutex); timeri = snd_timer_instance_new(owner, NULL); if (!timeri) { mutex_unlock(&register_mutex); return -ENOMEM; } timeri->slave_class = tid->dev_sclass; timeri->slave_id = tid->device; timeri->flags |= SNDRV_TIMER_IFLG_SLAVE; list_add_tail(&timeri->open_list, &snd_timer_slave_list); snd_timer_check_slave(timeri); mutex_unlock(&register_mutex); *ti = timeri; return 0; } /* open a master instance */ mutex_lock(&register_mutex); timer = snd_timer_find(tid); #ifdef CONFIG_MODULES if (!timer) { mutex_unlock(&register_mutex); snd_timer_request(tid); mutex_lock(&register_mutex); timer = snd_timer_find(tid); } #endif if (!timer) { mutex_unlock(&register_mutex); return -ENODEV; } if (!list_empty(&timer->open_list_head)) { timeri = list_entry(timer->open_list_head.next, struct snd_timer_instance, open_list); if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) { mutex_unlock(&register_mutex); return -EBUSY; } } timeri = snd_timer_instance_new(owner, timer); if (!timeri) { mutex_unlock(&register_mutex); return -ENOMEM; } timeri->slave_class = tid->dev_sclass; timeri->slave_id = slave_id; if (list_empty(&timer->open_list_head) && timer->hw.open) timer->hw.open(timer); list_add_tail(&timeri->open_list, &timer->open_list_head); snd_timer_check_master(timeri); mutex_unlock(&register_mutex); *ti = timeri; return 0; } static int _snd_timer_stop(struct snd_timer_instance *timeri, int keep_flag, int event); /* * close a timer instance */ int snd_timer_close(struct snd_timer_instance *timeri) { struct snd_timer *timer = NULL; struct snd_timer_instance *slave, *tmp; if (snd_BUG_ON(!timeri)) return -ENXIO; /* force to stop the timer */ snd_timer_stop(timeri); if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { /* wait, until the active callback is finished */ spin_lock_irq(&slave_active_lock); while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { spin_unlock_irq(&slave_active_lock); udelay(10); spin_lock_irq(&slave_active_lock); } spin_unlock_irq(&slave_active_lock); mutex_lock(&register_mutex); list_del(&timeri->open_list); mutex_unlock(&register_mutex); } else { timer = timeri->timer; if (snd_BUG_ON(!timer)) goto out; /* wait, until the active callback is finished */ spin_lock_irq(&timer->lock); while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { spin_unlock_irq(&timer->lock); udelay(10); spin_lock_irq(&timer->lock); } spin_unlock_irq(&timer->lock); mutex_lock(&register_mutex); list_del(&timeri->open_list); if (timer && list_empty(&timer->open_list_head) && timer->hw.close) timer->hw.close(timer); /* remove slave links */ list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head, open_list) { spin_lock_irq(&slave_active_lock); _snd_timer_stop(slave, 1, SNDRV_TIMER_EVENT_RESOLUTION); list_move_tail(&slave->open_list, &snd_timer_slave_list); slave->master = NULL; slave->timer = NULL; spin_unlock_irq(&slave_active_lock); } mutex_unlock(&register_mutex); } out: if (timeri->private_free) timeri->private_free(timeri); kfree(timeri->owner); kfree(timeri); if (timer) module_put(timer->module); return 0; } unsigned long snd_timer_resolution(struct snd_timer_instance *timeri) { struct snd_timer * timer; if (timeri == NULL) return 0; if ((timer = timeri->timer) != NULL) { if (timer->hw.c_resolution) return timer->hw.c_resolution(timer); return timer->hw.resolution; } return 0; } static void snd_timer_notify1(struct snd_timer_instance *ti, int event) { struct snd_timer *timer; unsigned long flags; unsigned long resolution = 0; struct snd_timer_instance *ts; struct timespec tstamp; if (timer_tstamp_monotonic) ktime_get_ts(&tstamp); else getnstimeofday(&tstamp); if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START || event > SNDRV_TIMER_EVENT_PAUSE)) return; if (event == SNDRV_TIMER_EVENT_START || event == SNDRV_TIMER_EVENT_CONTINUE) resolution = snd_timer_resolution(ti); if (ti->ccallback) ti->ccallback(ti, event, &tstamp, resolution); if (ti->flags & SNDRV_TIMER_IFLG_SLAVE) return; timer = ti->timer; if (timer == NULL) return; if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) return; spin_lock_irqsave(&timer->lock, flags); list_for_each_entry(ts, &ti->slave_active_head, active_list) if (ts->ccallback) ts->ccallback(ti, event + 100, &tstamp, resolution); spin_unlock_irqrestore(&timer->lock, flags); } static int snd_timer_start1(struct snd_timer *timer, struct snd_timer_instance *timeri, unsigned long sticks) { list_move_tail(&timeri->active_list, &timer->active_list_head); if (timer->running) { if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) goto __start_now; timer->flags |= SNDRV_TIMER_FLG_RESCHED; timeri->flags |= SNDRV_TIMER_IFLG_START; return 1; /* delayed start */ } else { timer->sticks = sticks; timer->hw.start(timer); __start_now: timer->running++; timeri->flags |= SNDRV_TIMER_IFLG_RUNNING; return 0; } } static int snd_timer_start_slave(struct snd_timer_instance *timeri) { unsigned long flags; spin_lock_irqsave(&slave_active_lock, flags); timeri->flags |= SNDRV_TIMER_IFLG_RUNNING; if (timeri->master) list_add_tail(&timeri->active_list, &timeri->master->slave_active_head); spin_unlock_irqrestore(&slave_active_lock, flags); return 1; /* delayed start */ } /* * start the timer instance */ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks) { struct snd_timer *timer; int result = -EINVAL; unsigned long flags; if (timeri == NULL || ticks < 1) return -EINVAL; if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { result = snd_timer_start_slave(timeri); snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); return result; } timer = timeri->timer; if (timer == NULL) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); timeri->ticks = timeri->cticks = ticks; timeri->pticks = 0; result = snd_timer_start1(timer, timeri, ticks); spin_unlock_irqrestore(&timer->lock, flags); snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); return result; } static int _snd_timer_stop(struct snd_timer_instance * timeri, int keep_flag, int event) { struct snd_timer *timer; unsigned long flags; if (snd_BUG_ON(!timeri)) return -ENXIO; if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { if (!keep_flag) { spin_lock_irqsave(&slave_active_lock, flags); timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING; spin_unlock_irqrestore(&slave_active_lock, flags); } goto __end; } timer = timeri->timer; if (!timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); list_del_init(&timeri->ack_list); list_del_init(&timeri->active_list); if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) && !(--timer->running)) { timer->hw.stop(timer); if (timer->flags & SNDRV_TIMER_FLG_RESCHED) { timer->flags &= ~SNDRV_TIMER_FLG_RESCHED; snd_timer_reschedule(timer, 0); if (timer->flags & SNDRV_TIMER_FLG_CHANGE) { timer->flags &= ~SNDRV_TIMER_FLG_CHANGE; timer->hw.start(timer); } } } if (!keep_flag) timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START); spin_unlock_irqrestore(&timer->lock, flags); __end: if (event != SNDRV_TIMER_EVENT_RESOLUTION) snd_timer_notify1(timeri, event); return 0; } /* * stop the timer instance. * * do not call this from the timer callback! */ int snd_timer_stop(struct snd_timer_instance *timeri) { struct snd_timer *timer; unsigned long flags; int err; err = _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_STOP); if (err < 0) return err; timer = timeri->timer; if (!timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); timeri->cticks = timeri->ticks; timeri->pticks = 0; spin_unlock_irqrestore(&timer->lock, flags); return 0; } /* * start again.. the tick is kept. */ int snd_timer_continue(struct snd_timer_instance *timeri) { struct snd_timer *timer; int result = -EINVAL; unsigned long flags; if (timeri == NULL) return result; if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) return snd_timer_start_slave(timeri); timer = timeri->timer; if (! timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); if (!timeri->cticks) timeri->cticks = 1; timeri->pticks = 0; result = snd_timer_start1(timer, timeri, timer->sticks); spin_unlock_irqrestore(&timer->lock, flags); snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE); return result; } /* * pause.. remember the ticks left */ int snd_timer_pause(struct snd_timer_instance * timeri) { return _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_PAUSE); } /* * reschedule the timer * * start pending instances and check the scheduling ticks. * when the scheduling ticks is changed set CHANGE flag to reprogram the timer. */ static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left) { struct snd_timer_instance *ti; unsigned long ticks = ~0UL; list_for_each_entry(ti, &timer->active_list_head, active_list) { if (ti->flags & SNDRV_TIMER_IFLG_START) { ti->flags &= ~SNDRV_TIMER_IFLG_START; ti->flags |= SNDRV_TIMER_IFLG_RUNNING; timer->running++; } if (ti->flags & SNDRV_TIMER_IFLG_RUNNING) { if (ticks > ti->cticks) ticks = ti->cticks; } } if (ticks == ~0UL) { timer->flags &= ~SNDRV_TIMER_FLG_RESCHED; return; } if (ticks > timer->hw.ticks) ticks = timer->hw.ticks; if (ticks_left != ticks) timer->flags |= SNDRV_TIMER_FLG_CHANGE; timer->sticks = ticks; } /* * timer tasklet * */ static void snd_timer_tasklet(unsigned long arg) { struct snd_timer *timer = (struct snd_timer *) arg; struct snd_timer_instance *ti; struct list_head *p; unsigned long resolution, ticks; unsigned long flags; spin_lock_irqsave(&timer->lock, flags); /* now process all callbacks */ while (!list_empty(&timer->sack_list_head)) { p = timer->sack_list_head.next; /* get first item */ ti = list_entry(p, struct snd_timer_instance, ack_list); /* remove from ack_list and make empty */ list_del_init(p); ticks = ti->pticks; ti->pticks = 0; resolution = ti->resolution; ti->flags |= SNDRV_TIMER_IFLG_CALLBACK; spin_unlock(&timer->lock); if (ti->callback) ti->callback(ti, resolution, ticks); spin_lock(&timer->lock); ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK; } spin_unlock_irqrestore(&timer->lock, flags); } /* * timer interrupt * * ticks_left is usually equal to timer->sticks. * */ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left) { struct snd_timer_instance *ti, *ts, *tmp; unsigned long resolution, ticks; struct list_head *p, *ack_list_head; unsigned long flags; int use_tasklet = 0; if (timer == NULL) return; spin_lock_irqsave(&timer->lock, flags); /* remember the current resolution */ if (timer->hw.c_resolution) resolution = timer->hw.c_resolution(timer); else resolution = timer->hw.resolution; /* loop for all active instances * Here we cannot use list_for_each_entry because the active_list of a * processed instance is relinked to done_list_head before the callback * is called. */ list_for_each_entry_safe(ti, tmp, &timer->active_list_head, active_list) { if (!(ti->flags & SNDRV_TIMER_IFLG_RUNNING)) continue; ti->pticks += ticks_left; ti->resolution = resolution; if (ti->cticks < ticks_left) ti->cticks = 0; else ti->cticks -= ticks_left; if (ti->cticks) /* not expired */ continue; if (ti->flags & SNDRV_TIMER_IFLG_AUTO) { ti->cticks = ti->ticks; } else { ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING; if (--timer->running) list_del_init(&ti->active_list); } if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) || (ti->flags & SNDRV_TIMER_IFLG_FAST)) ack_list_head = &timer->ack_list_head; else ack_list_head = &timer->sack_list_head; if (list_empty(&ti->ack_list)) list_add_tail(&ti->ack_list, ack_list_head); list_for_each_entry(ts, &ti->slave_active_head, active_list) { ts->pticks = ti->pticks; ts->resolution = resolution; if (list_empty(&ts->ack_list)) list_add_tail(&ts->ack_list, ack_list_head); } } if (timer->flags & SNDRV_TIMER_FLG_RESCHED) snd_timer_reschedule(timer, timer->sticks); if (timer->running) { if (timer->hw.flags & SNDRV_TIMER_HW_STOP) { timer->hw.stop(timer); timer->flags |= SNDRV_TIMER_FLG_CHANGE; } if (!(timer->hw.flags & SNDRV_TIMER_HW_AUTO) || (timer->flags & SNDRV_TIMER_FLG_CHANGE)) { /* restart timer */ timer->flags &= ~SNDRV_TIMER_FLG_CHANGE; timer->hw.start(timer); } } else { timer->hw.stop(timer); } /* now process all fast callbacks */ while (!list_empty(&timer->ack_list_head)) { p = timer->ack_list_head.next; /* get first item */ ti = list_entry(p, struct snd_timer_instance, ack_list); /* remove from ack_list and make empty */ list_del_init(p); ticks = ti->pticks; ti->pticks = 0; ti->flags |= SNDRV_TIMER_IFLG_CALLBACK; spin_unlock(&timer->lock); if (ti->callback) ti->callback(ti, resolution, ticks); spin_lock(&timer->lock); ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK; } /* do we have any slow callbacks? */ use_tasklet = !list_empty(&timer->sack_list_head); spin_unlock_irqrestore(&timer->lock, flags); if (use_tasklet) tasklet_schedule(&timer->task_queue); } /* */ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid, struct snd_timer **rtimer) { struct snd_timer *timer; int err; static struct snd_device_ops ops = { .dev_free = snd_timer_dev_free, .dev_register = snd_timer_dev_register, .dev_disconnect = snd_timer_dev_disconnect, }; if (snd_BUG_ON(!tid)) return -EINVAL; if (rtimer) *rtimer = NULL; timer = kzalloc(sizeof(*timer), GFP_KERNEL); if (!timer) return -ENOMEM; timer->tmr_class = tid->dev_class; timer->card = card; timer->tmr_device = tid->device; timer->tmr_subdevice = tid->subdevice; if (id) strlcpy(timer->id, id, sizeof(timer->id)); INIT_LIST_HEAD(&timer->device_list); INIT_LIST_HEAD(&timer->open_list_head); INIT_LIST_HEAD(&timer->active_list_head); INIT_LIST_HEAD(&timer->ack_list_head); INIT_LIST_HEAD(&timer->sack_list_head); spin_lock_init(&timer->lock); tasklet_init(&timer->task_queue, snd_timer_tasklet, (unsigned long)timer); if (card != NULL) { timer->module = card->module; err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops); if (err < 0) { snd_timer_free(timer); return err; } } if (rtimer) *rtimer = timer; return 0; } static int snd_timer_free(struct snd_timer *timer) { if (!timer) return 0; mutex_lock(&register_mutex); if (! list_empty(&timer->open_list_head)) { struct list_head *p, *n; struct snd_timer_instance *ti; pr_warn("ALSA: timer %p is busy?\n", timer); list_for_each_safe(p, n, &timer->open_list_head) { list_del_init(p); ti = list_entry(p, struct snd_timer_instance, open_list); ti->timer = NULL; } } list_del(&timer->device_list); mutex_unlock(&register_mutex); if (timer->private_free) timer->private_free(timer); kfree(timer); return 0; } static int snd_timer_dev_free(struct snd_device *device) { struct snd_timer *timer = device->device_data; return snd_timer_free(timer); } static int snd_timer_dev_register(struct snd_device *dev) { struct snd_timer *timer = dev->device_data; struct snd_timer *timer1; if (snd_BUG_ON(!timer || !timer->hw.start || !timer->hw.stop)) return -ENXIO; if (!(timer->hw.flags & SNDRV_TIMER_HW_SLAVE) && !timer->hw.resolution && timer->hw.c_resolution == NULL) return -EINVAL; mutex_lock(&register_mutex); list_for_each_entry(timer1, &snd_timer_list, device_list) { if (timer1->tmr_class > timer->tmr_class) break; if (timer1->tmr_class < timer->tmr_class) continue; if (timer1->card && timer->card) { if (timer1->card->number > timer->card->number) break; if (timer1->card->number < timer->card->number) continue; } if (timer1->tmr_device > timer->tmr_device) break; if (timer1->tmr_device < timer->tmr_device) continue; if (timer1->tmr_subdevice > timer->tmr_subdevice) break; if (timer1->tmr_subdevice < timer->tmr_subdevice) continue; /* conflicts.. */ mutex_unlock(&register_mutex); return -EBUSY; } list_add_tail(&timer->device_list, &timer1->device_list); mutex_unlock(&register_mutex); return 0; } static int snd_timer_dev_disconnect(struct snd_device *device) { struct snd_timer *timer = device->device_data; mutex_lock(&register_mutex); list_del_init(&timer->device_list); mutex_unlock(&register_mutex); return 0; } void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp) { unsigned long flags; unsigned long resolution = 0; struct snd_timer_instance *ti, *ts; if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)) return; if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART || event > SNDRV_TIMER_EVENT_MRESUME)) return; spin_lock_irqsave(&timer->lock, flags); if (event == SNDRV_TIMER_EVENT_MSTART || event == SNDRV_TIMER_EVENT_MCONTINUE || event == SNDRV_TIMER_EVENT_MRESUME) { if (timer->hw.c_resolution) resolution = timer->hw.c_resolution(timer); else resolution = timer->hw.resolution; } list_for_each_entry(ti, &timer->active_list_head, active_list) { if (ti->ccallback) ti->ccallback(ti, event, tstamp, resolution); list_for_each_entry(ts, &ti->slave_active_head, active_list) if (ts->ccallback) ts->ccallback(ts, event, tstamp, resolution); } spin_unlock_irqrestore(&timer->lock, flags); } /* * exported functions for global timers */ int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer) { struct snd_timer_id tid; tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL; tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE; tid.card = -1; tid.device = device; tid.subdevice = 0; return snd_timer_new(NULL, id, &tid, rtimer); } int snd_timer_global_free(struct snd_timer *timer) { return snd_timer_free(timer); } int snd_timer_global_register(struct snd_timer *timer) { struct snd_device dev; memset(&dev, 0, sizeof(dev)); dev.device_data = timer; return snd_timer_dev_register(&dev); } /* * System timer */ struct snd_timer_system_private { struct timer_list tlist; unsigned long last_expires; unsigned long last_jiffies; unsigned long correction; }; static void snd_timer_s_function(unsigned long data) { struct snd_timer *timer = (struct snd_timer *)data; struct snd_timer_system_private *priv = timer->private_data; unsigned long jiff = jiffies; if (time_after(jiff, priv->last_expires)) priv->correction += (long)jiff - (long)priv->last_expires; snd_timer_interrupt(timer, (long)jiff - (long)priv->last_jiffies); } static int snd_timer_s_start(struct snd_timer * timer) { struct snd_timer_system_private *priv; unsigned long njiff; priv = (struct snd_timer_system_private *) timer->private_data; njiff = (priv->last_jiffies = jiffies); if (priv->correction > timer->sticks - 1) { priv->correction -= timer->sticks - 1; njiff++; } else { njiff += timer->sticks - priv->correction; priv->correction = 0; } priv->last_expires = priv->tlist.expires = njiff; add_timer(&priv->tlist); return 0; } static int snd_timer_s_stop(struct snd_timer * timer) { struct snd_timer_system_private *priv; unsigned long jiff; priv = (struct snd_timer_system_private *) timer->private_data; del_timer(&priv->tlist); jiff = jiffies; if (time_before(jiff, priv->last_expires)) timer->sticks = priv->last_expires - jiff; else timer->sticks = 1; priv->correction = 0; return 0; } static struct snd_timer_hardware snd_timer_system = { .flags = SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET, .resolution = 1000000000L / HZ, .ticks = 10000000L, .start = snd_timer_s_start, .stop = snd_timer_s_stop }; static void snd_timer_free_system(struct snd_timer *timer) { kfree(timer->private_data); } static int snd_timer_register_system(void) { struct snd_timer *timer; struct snd_timer_system_private *priv; int err; err = snd_timer_global_new("system", SNDRV_TIMER_GLOBAL_SYSTEM, &timer); if (err < 0) return err; strcpy(timer->name, "system timer"); timer->hw = snd_timer_system; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv == NULL) { snd_timer_free(timer); return -ENOMEM; } setup_timer(&priv->tlist, snd_timer_s_function, (unsigned long) timer); timer->private_data = priv; timer->private_free = snd_timer_free_system; return snd_timer_global_register(timer); } #ifdef CONFIG_SND_PROC_FS /* * Info interface */ static void snd_timer_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_timer *timer; struct snd_timer_instance *ti; mutex_lock(&register_mutex); list_for_each_entry(timer, &snd_timer_list, device_list) { switch (timer->tmr_class) { case SNDRV_TIMER_CLASS_GLOBAL: snd_iprintf(buffer, "G%i: ", timer->tmr_device); break; case SNDRV_TIMER_CLASS_CARD: snd_iprintf(buffer, "C%i-%i: ", timer->card->number, timer->tmr_device); break; case SNDRV_TIMER_CLASS_PCM: snd_iprintf(buffer, "P%i-%i-%i: ", timer->card->number, timer->tmr_device, timer->tmr_subdevice); break; default: snd_iprintf(buffer, "?%i-%i-%i-%i: ", timer->tmr_class, timer->card ? timer->card->number : -1, timer->tmr_device, timer->tmr_subdevice); } snd_iprintf(buffer, "%s :", timer->name); if (timer->hw.resolution) snd_iprintf(buffer, " %lu.%03luus (%lu ticks)", timer->hw.resolution / 1000, timer->hw.resolution % 1000, timer->hw.ticks); if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) snd_iprintf(buffer, " SLAVE"); snd_iprintf(buffer, "\n"); list_for_each_entry(ti, &timer->open_list_head, open_list) snd_iprintf(buffer, " Client %s : %s\n", ti->owner ? ti->owner : "unknown", ti->flags & (SNDRV_TIMER_IFLG_START | SNDRV_TIMER_IFLG_RUNNING) ? "running" : "stopped"); } mutex_unlock(&register_mutex); } static struct snd_info_entry *snd_timer_proc_entry; static void __init snd_timer_proc_init(void) { struct snd_info_entry *entry; entry = snd_info_create_module_entry(THIS_MODULE, "timers", NULL); if (entry != NULL) { entry->c.text.read = snd_timer_proc_read; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } snd_timer_proc_entry = entry; } static void __exit snd_timer_proc_done(void) { snd_info_free_entry(snd_timer_proc_entry); } #else /* !CONFIG_SND_PROC_FS */ #define snd_timer_proc_init() #define snd_timer_proc_done() #endif /* * USER SPACE interface */ static void snd_timer_user_interrupt(struct snd_timer_instance *timeri, unsigned long resolution, unsigned long ticks) { struct snd_timer_user *tu = timeri->callback_data; struct snd_timer_read *r; int prev; spin_lock(&tu->qlock); if (tu->qused > 0) { prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1; r = &tu->queue[prev]; if (r->resolution == resolution) { r->ticks += ticks; goto __wake; } } if (tu->qused >= tu->queue_size) { tu->overrun++; } else { r = &tu->queue[tu->qtail++]; tu->qtail %= tu->queue_size; r->resolution = resolution; r->ticks = ticks; tu->qused++; } __wake: spin_unlock(&tu->qlock); kill_fasync(&tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } static void snd_timer_user_append_to_tqueue(struct snd_timer_user *tu, struct snd_timer_tread *tread) { if (tu->qused >= tu->queue_size) { tu->overrun++; } else { memcpy(&tu->tqueue[tu->qtail++], tread, sizeof(*tread)); tu->qtail %= tu->queue_size; tu->qused++; } } static void snd_timer_user_ccallback(struct snd_timer_instance *timeri, int event, struct timespec *tstamp, unsigned long resolution) { struct snd_timer_user *tu = timeri->callback_data; struct snd_timer_tread r1; unsigned long flags; if (event >= SNDRV_TIMER_EVENT_START && event <= SNDRV_TIMER_EVENT_PAUSE) tu->tstamp = *tstamp; if ((tu->filter & (1 << event)) == 0 || !tu->tread) return; r1.event = event; r1.tstamp = *tstamp; r1.val = resolution; spin_lock_irqsave(&tu->qlock, flags); snd_timer_user_append_to_tqueue(tu, &r1); spin_unlock_irqrestore(&tu->qlock, flags); kill_fasync(&tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri, unsigned long resolution, unsigned long ticks) { struct snd_timer_user *tu = timeri->callback_data; struct snd_timer_tread *r, r1; struct timespec tstamp; int prev, append = 0; memset(&tstamp, 0, sizeof(tstamp)); spin_lock(&tu->qlock); if ((tu->filter & ((1 << SNDRV_TIMER_EVENT_RESOLUTION) | (1 << SNDRV_TIMER_EVENT_TICK))) == 0) { spin_unlock(&tu->qlock); return; } if (tu->last_resolution != resolution || ticks > 0) { if (timer_tstamp_monotonic) ktime_get_ts(&tstamp); else getnstimeofday(&tstamp); } if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) && tu->last_resolution != resolution) { r1.event = SNDRV_TIMER_EVENT_RESOLUTION; r1.tstamp = tstamp; r1.val = resolution; snd_timer_user_append_to_tqueue(tu, &r1); tu->last_resolution = resolution; append++; } if ((tu->filter & (1 << SNDRV_TIMER_EVENT_TICK)) == 0) goto __wake; if (ticks == 0) goto __wake; if (tu->qused > 0) { prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1; r = &tu->tqueue[prev]; if (r->event == SNDRV_TIMER_EVENT_TICK) { r->tstamp = tstamp; r->val += ticks; append++; goto __wake; } } r1.event = SNDRV_TIMER_EVENT_TICK; r1.tstamp = tstamp; r1.val = ticks; snd_timer_user_append_to_tqueue(tu, &r1); append++; __wake: spin_unlock(&tu->qlock); if (append == 0) return; kill_fasync(&tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } static int snd_timer_user_open(struct inode *inode, struct file *file) { struct snd_timer_user *tu; int err; err = nonseekable_open(inode, file); if (err < 0) return err; tu = kzalloc(sizeof(*tu), GFP_KERNEL); if (tu == NULL) return -ENOMEM; spin_lock_init(&tu->qlock); init_waitqueue_head(&tu->qchange_sleep); mutex_init(&tu->tread_sem); tu->ticks = 1; tu->queue_size = 128; tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read), GFP_KERNEL); if (tu->queue == NULL) { kfree(tu); return -ENOMEM; } file->private_data = tu; return 0; } static int snd_timer_user_release(struct inode *inode, struct file *file) { struct snd_timer_user *tu; if (file->private_data) { tu = file->private_data; file->private_data = NULL; if (tu->timeri) snd_timer_close(tu->timeri); kfree(tu->queue); kfree(tu->tqueue); kfree(tu); } return 0; } static void snd_timer_user_zero_id(struct snd_timer_id *id) { id->dev_class = SNDRV_TIMER_CLASS_NONE; id->dev_sclass = SNDRV_TIMER_SCLASS_NONE; id->card = -1; id->device = -1; id->subdevice = -1; } static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *timer) { id->dev_class = timer->tmr_class; id->dev_sclass = SNDRV_TIMER_SCLASS_NONE; id->card = timer->card ? timer->card->number : -1; id->device = timer->tmr_device; id->subdevice = timer->tmr_subdevice; } static int snd_timer_user_next_device(struct snd_timer_id __user *_tid) { struct snd_timer_id id; struct snd_timer *timer; struct list_head *p; if (copy_from_user(&id, _tid, sizeof(id))) return -EFAULT; mutex_lock(&register_mutex); if (id.dev_class < 0) { /* first item */ if (list_empty(&snd_timer_list)) snd_timer_user_zero_id(&id); else { timer = list_entry(snd_timer_list.next, struct snd_timer, device_list); snd_timer_user_copy_id(&id, timer); } } else { switch (id.dev_class) { case SNDRV_TIMER_CLASS_GLOBAL: id.device = id.device < 0 ? 0 : id.device + 1; list_for_each(p, &snd_timer_list) { timer = list_entry(p, struct snd_timer, device_list); if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_device >= id.device) { snd_timer_user_copy_id(&id, timer); break; } } if (p == &snd_timer_list) snd_timer_user_zero_id(&id); break; case SNDRV_TIMER_CLASS_CARD: case SNDRV_TIMER_CLASS_PCM: if (id.card < 0) { id.card = 0; } else { if (id.card < 0) { id.card = 0; } else { if (id.device < 0) { id.device = 0; } else { if (id.subdevice < 0) { id.subdevice = 0; } else { id.subdevice++; } } } } list_for_each(p, &snd_timer_list) { timer = list_entry(p, struct snd_timer, device_list); if (timer->tmr_class > id.dev_class) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_class < id.dev_class) continue; if (timer->card->number > id.card) { snd_timer_user_copy_id(&id, timer); break; } if (timer->card->number < id.card) continue; if (timer->tmr_device > id.device) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_device < id.device) continue; if (timer->tmr_subdevice > id.subdevice) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_subdevice < id.subdevice) continue; snd_timer_user_copy_id(&id, timer); break; } if (p == &snd_timer_list) snd_timer_user_zero_id(&id); break; default: snd_timer_user_zero_id(&id); } } mutex_unlock(&register_mutex); if (copy_to_user(_tid, &id, sizeof(*_tid))) return -EFAULT; return 0; } static int snd_timer_user_ginfo(struct file *file, struct snd_timer_ginfo __user *_ginfo) { struct snd_timer_ginfo *ginfo; struct snd_timer_id tid; struct snd_timer *t; struct list_head *p; int err = 0; ginfo = memdup_user(_ginfo, sizeof(*ginfo)); if (IS_ERR(ginfo)) return PTR_ERR(ginfo); tid = ginfo->tid; memset(ginfo, 0, sizeof(*ginfo)); ginfo->tid = tid; mutex_lock(&register_mutex); t = snd_timer_find(&tid); if (t != NULL) { ginfo->card = t->card ? t->card->number : -1; if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) ginfo->flags |= SNDRV_TIMER_FLG_SLAVE; strlcpy(ginfo->id, t->id, sizeof(ginfo->id)); strlcpy(ginfo->name, t->name, sizeof(ginfo->name)); ginfo->resolution = t->hw.resolution; if (t->hw.resolution_min > 0) { ginfo->resolution_min = t->hw.resolution_min; ginfo->resolution_max = t->hw.resolution_max; } list_for_each(p, &t->open_list_head) { ginfo->clients++; } } else { err = -ENODEV; } mutex_unlock(&register_mutex); if (err >= 0 && copy_to_user(_ginfo, ginfo, sizeof(*ginfo))) err = -EFAULT; kfree(ginfo); return err; } static int snd_timer_user_gparams(struct file *file, struct snd_timer_gparams __user *_gparams) { struct snd_timer_gparams gparams; struct snd_timer *t; int err; if (copy_from_user(&gparams, _gparams, sizeof(gparams))) return -EFAULT; mutex_lock(&register_mutex); t = snd_timer_find(&gparams.tid); if (!t) { err = -ENODEV; goto _error; } if (!list_empty(&t->open_list_head)) { err = -EBUSY; goto _error; } if (!t->hw.set_period) { err = -ENOSYS; goto _error; } err = t->hw.set_period(t, gparams.period_num, gparams.period_den); _error: mutex_unlock(&register_mutex); return err; } static int snd_timer_user_gstatus(struct file *file, struct snd_timer_gstatus __user *_gstatus) { struct snd_timer_gstatus gstatus; struct snd_timer_id tid; struct snd_timer *t; int err = 0; if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus))) return -EFAULT; tid = gstatus.tid; memset(&gstatus, 0, sizeof(gstatus)); gstatus.tid = tid; mutex_lock(&register_mutex); t = snd_timer_find(&tid); if (t != NULL) { if (t->hw.c_resolution) gstatus.resolution = t->hw.c_resolution(t); else gstatus.resolution = t->hw.resolution; if (t->hw.precise_resolution) { t->hw.precise_resolution(t, &gstatus.resolution_num, &gstatus.resolution_den); } else { gstatus.resolution_num = gstatus.resolution; gstatus.resolution_den = 1000000000uL; } } else { err = -ENODEV; } mutex_unlock(&register_mutex); if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus))) err = -EFAULT; return err; } static int snd_timer_user_tselect(struct file *file, struct snd_timer_select __user *_tselect) { struct snd_timer_user *tu; struct snd_timer_select tselect; char str[32]; int err = 0; tu = file->private_data; mutex_lock(&tu->tread_sem); if (tu->timeri) { snd_timer_close(tu->timeri); tu->timeri = NULL; } if (copy_from_user(&tselect, _tselect, sizeof(tselect))) { err = -EFAULT; goto __err; } sprintf(str, "application %i", current->pid); if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE) tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION; err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid); if (err < 0) goto __err; kfree(tu->queue); tu->queue = NULL; kfree(tu->tqueue); tu->tqueue = NULL; if (tu->tread) { tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread), GFP_KERNEL); if (tu->tqueue == NULL) err = -ENOMEM; } else { tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read), GFP_KERNEL); if (tu->queue == NULL) err = -ENOMEM; } if (err < 0) { snd_timer_close(tu->timeri); tu->timeri = NULL; } else { tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST; tu->timeri->callback = tu->tread ? snd_timer_user_tinterrupt : snd_timer_user_interrupt; tu->timeri->ccallback = snd_timer_user_ccallback; tu->timeri->callback_data = (void *)tu; } __err: mutex_unlock(&tu->tread_sem); return err; } static int snd_timer_user_info(struct file *file, struct snd_timer_info __user *_info) { struct snd_timer_user *tu; struct snd_timer_info *info; struct snd_timer *t; int err = 0; tu = file->private_data; if (!tu->timeri) return -EBADFD; t = tu->timeri->timer; if (!t) return -EBADFD; info = kzalloc(sizeof(*info), GFP_KERNEL); if (! info) return -ENOMEM; info->card = t->card ? t->card->number : -1; if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) info->flags |= SNDRV_TIMER_FLG_SLAVE; strlcpy(info->id, t->id, sizeof(info->id)); strlcpy(info->name, t->name, sizeof(info->name)); info->resolution = t->hw.resolution; if (copy_to_user(_info, info, sizeof(*_info))) err = -EFAULT; kfree(info); return err; } static int snd_timer_user_params(struct file *file, struct snd_timer_params __user *_params) { struct snd_timer_user *tu; struct snd_timer_params params; struct snd_timer *t; struct snd_timer_read *tr; struct snd_timer_tread *ttr; int err; tu = file->private_data; if (!tu->timeri) return -EBADFD; t = tu->timeri->timer; if (!t) return -EBADFD; if (copy_from_user(&params, _params, sizeof(params))) return -EFAULT; if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) { err = -EINVAL; goto _end; } if (params.queue_size > 0 && (params.queue_size < 32 || params.queue_size > 1024)) { err = -EINVAL; goto _end; } if (params.filter & ~((1<<SNDRV_TIMER_EVENT_RESOLUTION)| (1<<SNDRV_TIMER_EVENT_TICK)| (1<<SNDRV_TIMER_EVENT_START)| (1<<SNDRV_TIMER_EVENT_STOP)| (1<<SNDRV_TIMER_EVENT_CONTINUE)| (1<<SNDRV_TIMER_EVENT_PAUSE)| (1<<SNDRV_TIMER_EVENT_SUSPEND)| (1<<SNDRV_TIMER_EVENT_RESUME)| (1<<SNDRV_TIMER_EVENT_MSTART)| (1<<SNDRV_TIMER_EVENT_MSTOP)| (1<<SNDRV_TIMER_EVENT_MCONTINUE)| (1<<SNDRV_TIMER_EVENT_MPAUSE)| (1<<SNDRV_TIMER_EVENT_MSUSPEND)| (1<<SNDRV_TIMER_EVENT_MRESUME))) { err = -EINVAL; goto _end; } snd_timer_stop(tu->timeri); spin_lock_irq(&t->lock); tu->timeri->flags &= ~(SNDRV_TIMER_IFLG_AUTO| SNDRV_TIMER_IFLG_EXCLUSIVE| SNDRV_TIMER_IFLG_EARLY_EVENT); if (params.flags & SNDRV_TIMER_PSFLG_AUTO) tu->timeri->flags |= SNDRV_TIMER_IFLG_AUTO; if (params.flags & SNDRV_TIMER_PSFLG_EXCLUSIVE) tu->timeri->flags |= SNDRV_TIMER_IFLG_EXCLUSIVE; if (params.flags & SNDRV_TIMER_PSFLG_EARLY_EVENT) tu->timeri->flags |= SNDRV_TIMER_IFLG_EARLY_EVENT; spin_unlock_irq(&t->lock); if (params.queue_size > 0 && (unsigned int)tu->queue_size != params.queue_size) { if (tu->tread) { ttr = kmalloc(params.queue_size * sizeof(*ttr), GFP_KERNEL); if (ttr) { kfree(tu->tqueue); tu->queue_size = params.queue_size; tu->tqueue = ttr; } } else { tr = kmalloc(params.queue_size * sizeof(*tr), GFP_KERNEL); if (tr) { kfree(tu->queue); tu->queue_size = params.queue_size; tu->queue = tr; } } } tu->qhead = tu->qtail = tu->qused = 0; if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) { if (tu->tread) { struct snd_timer_tread tread; tread.event = SNDRV_TIMER_EVENT_EARLY; tread.tstamp.tv_sec = 0; tread.tstamp.tv_nsec = 0; tread.val = 0; snd_timer_user_append_to_tqueue(tu, &tread); } else { struct snd_timer_read *r = &tu->queue[0]; r->resolution = 0; r->ticks = 0; tu->qused++; tu->qtail++; } } tu->filter = params.filter; tu->ticks = params.ticks; err = 0; _end: if (copy_to_user(_params, &params, sizeof(params))) return -EFAULT; return err; } static int snd_timer_user_status(struct file *file, struct snd_timer_status __user *_status) { struct snd_timer_user *tu; struct snd_timer_status status; tu = file->private_data; if (!tu->timeri) return -EBADFD; memset(&status, 0, sizeof(status)); status.tstamp = tu->tstamp; status.resolution = snd_timer_resolution(tu->timeri); status.lost = tu->timeri->lost; status.overrun = tu->overrun; spin_lock_irq(&tu->qlock); status.queue = tu->qused; spin_unlock_irq(&tu->qlock); if (copy_to_user(_status, &status, sizeof(status))) return -EFAULT; return 0; } static int snd_timer_user_start(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; snd_timer_stop(tu->timeri); tu->timeri->lost = 0; tu->last_resolution = 0; return (err = snd_timer_start(tu->timeri, tu->ticks)) < 0 ? err : 0; } static int snd_timer_user_stop(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; return (err = snd_timer_stop(tu->timeri)) < 0 ? err : 0; } static int snd_timer_user_continue(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; tu->timeri->lost = 0; return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0; } static int snd_timer_user_pause(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; return (err = snd_timer_pause(tu->timeri)) < 0 ? err : 0; } enum { SNDRV_TIMER_IOCTL_START_OLD = _IO('T', 0x20), SNDRV_TIMER_IOCTL_STOP_OLD = _IO('T', 0x21), SNDRV_TIMER_IOCTL_CONTINUE_OLD = _IO('T', 0x22), SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23), }; static long snd_timer_user_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_timer_user *tu; void __user *argp = (void __user *)arg; int __user *p = argp; tu = file->private_data; switch (cmd) { case SNDRV_TIMER_IOCTL_PVERSION: return put_user(SNDRV_TIMER_VERSION, p) ? -EFAULT : 0; case SNDRV_TIMER_IOCTL_NEXT_DEVICE: return snd_timer_user_next_device(argp); case SNDRV_TIMER_IOCTL_TREAD: { int xarg; mutex_lock(&tu->tread_sem); if (tu->timeri) { /* too late */ mutex_unlock(&tu->tread_sem); return -EBUSY; } if (get_user(xarg, p)) { mutex_unlock(&tu->tread_sem); return -EFAULT; } tu->tread = xarg ? 1 : 0; mutex_unlock(&tu->tread_sem); return 0; } case SNDRV_TIMER_IOCTL_GINFO: return snd_timer_user_ginfo(file, argp); case SNDRV_TIMER_IOCTL_GPARAMS: return snd_timer_user_gparams(file, argp); case SNDRV_TIMER_IOCTL_GSTATUS: return snd_timer_user_gstatus(file, argp); case SNDRV_TIMER_IOCTL_SELECT: return snd_timer_user_tselect(file, argp); case SNDRV_TIMER_IOCTL_INFO: return snd_timer_user_info(file, argp); case SNDRV_TIMER_IOCTL_PARAMS: return snd_timer_user_params(file, argp); case SNDRV_TIMER_IOCTL_STATUS: return snd_timer_user_status(file, argp); case SNDRV_TIMER_IOCTL_START: case SNDRV_TIMER_IOCTL_START_OLD: return snd_timer_user_start(file); case SNDRV_TIMER_IOCTL_STOP: case SNDRV_TIMER_IOCTL_STOP_OLD: return snd_timer_user_stop(file); case SNDRV_TIMER_IOCTL_CONTINUE: case SNDRV_TIMER_IOCTL_CONTINUE_OLD: return snd_timer_user_continue(file); case SNDRV_TIMER_IOCTL_PAUSE: case SNDRV_TIMER_IOCTL_PAUSE_OLD: return snd_timer_user_pause(file); } return -ENOTTY; } static int snd_timer_user_fasync(int fd, struct file * file, int on) { struct snd_timer_user *tu; tu = file->private_data; return fasync_helper(fd, file, on, &tu->fasync); } static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, size_t count, loff_t *offset) { struct snd_timer_user *tu; long result = 0, unit; int err = 0; tu = file->private_data; unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read); spin_lock_irq(&tu->qlock); while ((long)count - result >= unit) { while (!tu->qused) { wait_queue_t wait; if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { err = -EAGAIN; break; } set_current_state(TASK_INTERRUPTIBLE); init_waitqueue_entry(&wait, current); add_wait_queue(&tu->qchange_sleep, &wait); spin_unlock_irq(&tu->qlock); schedule(); spin_lock_irq(&tu->qlock); remove_wait_queue(&tu->qchange_sleep, &wait); if (signal_pending(current)) { err = -ERESTARTSYS; break; } } spin_unlock_irq(&tu->qlock); if (err < 0) goto _error; if (tu->tread) { if (copy_to_user(buffer, &tu->tqueue[tu->qhead++], sizeof(struct snd_timer_tread))) { err = -EFAULT; goto _error; } } else { if (copy_to_user(buffer, &tu->queue[tu->qhead++], sizeof(struct snd_timer_read))) { err = -EFAULT; goto _error; } } tu->qhead %= tu->queue_size; result += unit; buffer += unit; spin_lock_irq(&tu->qlock); tu->qused--; } spin_unlock_irq(&tu->qlock); _error: return result > 0 ? result : err; } static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait) { unsigned int mask; struct snd_timer_user *tu; tu = file->private_data; poll_wait(file, &tu->qchange_sleep, wait); mask = 0; if (tu->qused) mask |= POLLIN | POLLRDNORM; return mask; } #ifdef CONFIG_COMPAT #include "timer_compat.c" #else #define snd_timer_user_ioctl_compat NULL #endif static const struct file_operations snd_timer_f_ops = { .owner = THIS_MODULE, .read = snd_timer_user_read, .open = snd_timer_user_open, .release = snd_timer_user_release, .llseek = no_llseek, .poll = snd_timer_user_poll, .unlocked_ioctl = snd_timer_user_ioctl, .compat_ioctl = snd_timer_user_ioctl_compat, .fasync = snd_timer_user_fasync, }; /* unregister the system timer */ static void snd_timer_free_all(void) { struct snd_timer *timer, *n; list_for_each_entry_safe(timer, n, &snd_timer_list, device_list) snd_timer_free(timer); } static struct device timer_dev; /* * ENTRY functions */ static int __init alsa_timer_init(void) { int err; snd_device_initialize(&timer_dev, NULL); dev_set_name(&timer_dev, "timer"); #ifdef SNDRV_OSS_INFO_DEV_TIMERS snd_oss_info_register(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1, "system timer"); #endif err = snd_timer_register_system(); if (err < 0) { pr_err("ALSA: unable to register system timer (%i)\n", err); put_device(&timer_dev); return err; } err = snd_register_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0, &snd_timer_f_ops, NULL, &timer_dev); if (err < 0) { pr_err("ALSA: unable to register timer device (%i)\n", err); snd_timer_free_all(); put_device(&timer_dev); return err; } snd_timer_proc_init(); return 0; } static void __exit alsa_timer_exit(void) { snd_unregister_device(&timer_dev); snd_timer_free_all(); put_device(&timer_dev); snd_timer_proc_done(); #ifdef SNDRV_OSS_INFO_DEV_TIMERS snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1); #endif } module_init(alsa_timer_init) module_exit(alsa_timer_exit) EXPORT_SYMBOL(snd_timer_open); EXPORT_SYMBOL(snd_timer_close); EXPORT_SYMBOL(snd_timer_resolution); EXPORT_SYMBOL(snd_timer_start); EXPORT_SYMBOL(snd_timer_stop); EXPORT_SYMBOL(snd_timer_continue); EXPORT_SYMBOL(snd_timer_pause); EXPORT_SYMBOL(snd_timer_new); EXPORT_SYMBOL(snd_timer_notify); EXPORT_SYMBOL(snd_timer_global_new); EXPORT_SYMBOL(snd_timer_global_free); EXPORT_SYMBOL(snd_timer_global_register); EXPORT_SYMBOL(snd_timer_interrupt);
./CrossVul/dataset_final_sorted/CWE-362/c/bad_4963_0
crossvul-cpp_data_bad_2160_0
/* * 8253/8254 interval timer emulation * * Copyright (c) 2003-2004 Fabrice Bellard * Copyright (c) 2006 Intel Corporation * Copyright (c) 2007 Keir Fraser, XenSource Inc * Copyright (c) 2008 Intel Corporation * Copyright 2009 Red Hat, Inc. and/or its affiliates. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * * Authors: * Sheng Yang <sheng.yang@intel.com> * Based on QEMU and Xen. */ #define pr_fmt(fmt) "pit: " fmt #include <linux/kvm_host.h> #include <linux/slab.h> #include "irq.h" #include "i8254.h" #include "x86.h" #ifndef CONFIG_X86_64 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) #else #define mod_64(x, y) ((x) % (y)) #endif #define RW_STATE_LSB 1 #define RW_STATE_MSB 2 #define RW_STATE_WORD0 3 #define RW_STATE_WORD1 4 /* Compute with 96 bit intermediate result: (a*b)/c */ static u64 muldiv64(u64 a, u32 b, u32 c) { union { u64 ll; struct { u32 low, high; } l; } u, res; u64 rl, rh; u.ll = a; rl = (u64)u.l.low * (u64)b; rh = (u64)u.l.high * (u64)b; rh += (rl >> 32); res.l.high = div64_u64(rh, c); res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c); return res.ll; } static void pit_set_gate(struct kvm *kvm, int channel, u32 val) { struct kvm_kpit_channel_state *c = &kvm->arch.vpit->pit_state.channels[channel]; WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); switch (c->mode) { default: case 0: case 4: /* XXX: just disable/enable counting */ break; case 1: case 2: case 3: case 5: /* Restart counting on rising edge. */ if (c->gate < val) c->count_load_time = ktime_get(); break; } c->gate = val; } static int pit_get_gate(struct kvm *kvm, int channel) { WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); return kvm->arch.vpit->pit_state.channels[channel].gate; } static s64 __kpit_elapsed(struct kvm *kvm) { s64 elapsed; ktime_t remaining; struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; if (!ps->period) return 0; /* * The Counter does not stop when it reaches zero. In * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to * the highest count, either FFFF hex for binary counting * or 9999 for BCD counting, and continues counting. * Modes 2 and 3 are periodic; the Counter reloads * itself with the initial count and continues counting * from there. */ remaining = hrtimer_get_remaining(&ps->timer); elapsed = ps->period - ktime_to_ns(remaining); return elapsed; } static s64 kpit_elapsed(struct kvm *kvm, struct kvm_kpit_channel_state *c, int channel) { if (channel == 0) return __kpit_elapsed(kvm); return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time)); } static int pit_get_count(struct kvm *kvm, int channel) { struct kvm_kpit_channel_state *c = &kvm->arch.vpit->pit_state.channels[channel]; s64 d, t; int counter; WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); t = kpit_elapsed(kvm, c, channel); d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); switch (c->mode) { case 0: case 1: case 4: case 5: counter = (c->count - d) & 0xffff; break; case 3: /* XXX: may be incorrect for odd counts */ counter = c->count - (mod_64((2 * d), c->count)); break; default: counter = c->count - mod_64(d, c->count); break; } return counter; } static int pit_get_out(struct kvm *kvm, int channel) { struct kvm_kpit_channel_state *c = &kvm->arch.vpit->pit_state.channels[channel]; s64 d, t; int out; WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); t = kpit_elapsed(kvm, c, channel); d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); switch (c->mode) { default: case 0: out = (d >= c->count); break; case 1: out = (d < c->count); break; case 2: out = ((mod_64(d, c->count) == 0) && (d != 0)); break; case 3: out = (mod_64(d, c->count) < ((c->count + 1) >> 1)); break; case 4: case 5: out = (d == c->count); break; } return out; } static void pit_latch_count(struct kvm *kvm, int channel) { struct kvm_kpit_channel_state *c = &kvm->arch.vpit->pit_state.channels[channel]; WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); if (!c->count_latched) { c->latched_count = pit_get_count(kvm, channel); c->count_latched = c->rw_mode; } } static void pit_latch_status(struct kvm *kvm, int channel) { struct kvm_kpit_channel_state *c = &kvm->arch.vpit->pit_state.channels[channel]; WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); if (!c->status_latched) { /* TODO: Return NULL COUNT (bit 6). */ c->status = ((pit_get_out(kvm, channel) << 7) | (c->rw_mode << 4) | (c->mode << 1) | c->bcd); c->status_latched = 1; } } static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) { struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, irq_ack_notifier); int value; spin_lock(&ps->inject_lock); value = atomic_dec_return(&ps->pending); if (value < 0) /* spurious acks can be generated if, for example, the * PIC is being reset. Handle it gracefully here */ atomic_inc(&ps->pending); else if (value > 0) /* in this case, we had multiple outstanding pit interrupts * that we needed to inject. Reinject */ queue_kthread_work(&ps->pit->worker, &ps->pit->expired); ps->irq_ack = 1; spin_unlock(&ps->inject_lock); } void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) { struct kvm_pit *pit = vcpu->kvm->arch.vpit; struct hrtimer *timer; if (!kvm_vcpu_is_bsp(vcpu) || !pit) return; timer = &pit->pit_state.timer; if (hrtimer_cancel(timer)) hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } static void destroy_pit_timer(struct kvm_pit *pit) { hrtimer_cancel(&pit->pit_state.timer); flush_kthread_work(&pit->expired); } static void pit_do_work(struct kthread_work *work) { struct kvm_pit *pit = container_of(work, struct kvm_pit, expired); struct kvm *kvm = pit->kvm; struct kvm_vcpu *vcpu; int i; struct kvm_kpit_state *ps = &pit->pit_state; int inject = 0; /* Try to inject pending interrupts when * last one has been acked. */ spin_lock(&ps->inject_lock); if (ps->irq_ack) { ps->irq_ack = 0; inject = 1; } spin_unlock(&ps->inject_lock); if (inject) { kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1, false); kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0, false); /* * Provides NMI watchdog support via Virtual Wire mode. * The route is: PIT -> PIC -> LVT0 in NMI mode. * * Note: Our Virtual Wire implementation is simplified, only * propagating PIT interrupts to all VCPUs when they have set * LVT0 to NMI delivery. Other PIC interrupts are just sent to * VCPU0, and only if its LVT0 is in EXTINT mode. */ if (kvm->arch.vapics_in_nmi_mode > 0) kvm_for_each_vcpu(i, vcpu, kvm) kvm_apic_nmi_wd_deliver(vcpu); } } static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) { struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer); struct kvm_pit *pt = ps->kvm->arch.vpit; if (ps->reinject || !atomic_read(&ps->pending)) { atomic_inc(&ps->pending); queue_kthread_work(&pt->worker, &pt->expired); } if (ps->is_periodic) { hrtimer_add_expires_ns(&ps->timer, ps->period); return HRTIMER_RESTART; } else return HRTIMER_NORESTART; } static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) { struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; s64 interval; if (!irqchip_in_kernel(kvm) || ps->flags & KVM_PIT_FLAGS_HPET_LEGACY) return; interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); pr_debug("create pit timer, interval is %llu nsec\n", interval); /* TODO The new value only affected after the retriggered */ hrtimer_cancel(&ps->timer); flush_kthread_work(&ps->pit->expired); ps->period = interval; ps->is_periodic = is_period; ps->timer.function = pit_timer_fn; ps->kvm = ps->pit->kvm; atomic_set(&ps->pending, 0); ps->irq_ack = 1; /* * Do not allow the guest to program periodic timers with small * interval, since the hrtimers are not throttled by the host * scheduler. */ if (ps->is_periodic) { s64 min_period = min_timer_period_us * 1000LL; if (ps->period < min_period) { pr_info_ratelimited( "kvm: requested %lld ns " "i8254 timer period limited to %lld ns\n", ps->period, min_period); ps->period = min_period; } } hrtimer_start(&ps->timer, ktime_add_ns(ktime_get(), interval), HRTIMER_MODE_ABS); } static void pit_load_count(struct kvm *kvm, int channel, u32 val) { struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; WARN_ON(!mutex_is_locked(&ps->lock)); pr_debug("load_count val is %d, channel is %d\n", val, channel); /* * The largest possible initial count is 0; this is equivalent * to 216 for binary counting and 104 for BCD counting. */ if (val == 0) val = 0x10000; ps->channels[channel].count = val; if (channel != 0) { ps->channels[channel].count_load_time = ktime_get(); return; } /* Two types of timer * mode 1 is one shot, mode 2 is period, otherwise del timer */ switch (ps->channels[0].mode) { case 0: case 1: /* FIXME: enhance mode 4 precision */ case 4: create_pit_timer(kvm, val, 0); break; case 2: case 3: create_pit_timer(kvm, val, 1); break; default: destroy_pit_timer(kvm->arch.vpit); } } void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start) { u8 saved_mode; if (hpet_legacy_start) { /* save existing mode for later reenablement */ saved_mode = kvm->arch.vpit->pit_state.channels[0].mode; kvm->arch.vpit->pit_state.channels[0].mode = 0xff; /* disable timer */ pit_load_count(kvm, channel, val); kvm->arch.vpit->pit_state.channels[0].mode = saved_mode; } else { pit_load_count(kvm, channel, val); } } static inline struct kvm_pit *dev_to_pit(struct kvm_io_device *dev) { return container_of(dev, struct kvm_pit, dev); } static inline struct kvm_pit *speaker_to_pit(struct kvm_io_device *dev) { return container_of(dev, struct kvm_pit, speaker_dev); } static inline int pit_in_range(gpa_t addr) { return ((addr >= KVM_PIT_BASE_ADDRESS) && (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH)); } static int pit_ioport_write(struct kvm_io_device *this, gpa_t addr, int len, const void *data) { struct kvm_pit *pit = dev_to_pit(this); struct kvm_kpit_state *pit_state = &pit->pit_state; struct kvm *kvm = pit->kvm; int channel, access; struct kvm_kpit_channel_state *s; u32 val = *(u32 *) data; if (!pit_in_range(addr)) return -EOPNOTSUPP; val &= 0xff; addr &= KVM_PIT_CHANNEL_MASK; mutex_lock(&pit_state->lock); if (val != 0) pr_debug("write addr is 0x%x, len is %d, val is 0x%x\n", (unsigned int)addr, len, val); if (addr == 3) { channel = val >> 6; if (channel == 3) { /* Read-Back Command. */ for (channel = 0; channel < 3; channel++) { s = &pit_state->channels[channel]; if (val & (2 << channel)) { if (!(val & 0x20)) pit_latch_count(kvm, channel); if (!(val & 0x10)) pit_latch_status(kvm, channel); } } } else { /* Select Counter <channel>. */ s = &pit_state->channels[channel]; access = (val >> 4) & KVM_PIT_CHANNEL_MASK; if (access == 0) { pit_latch_count(kvm, channel); } else { s->rw_mode = access; s->read_state = access; s->write_state = access; s->mode = (val >> 1) & 7; if (s->mode > 5) s->mode -= 4; s->bcd = val & 1; } } } else { /* Write Count. */ s = &pit_state->channels[addr]; switch (s->write_state) { default: case RW_STATE_LSB: pit_load_count(kvm, addr, val); break; case RW_STATE_MSB: pit_load_count(kvm, addr, val << 8); break; case RW_STATE_WORD0: s->write_latch = val; s->write_state = RW_STATE_WORD1; break; case RW_STATE_WORD1: pit_load_count(kvm, addr, s->write_latch | (val << 8)); s->write_state = RW_STATE_WORD0; break; } } mutex_unlock(&pit_state->lock); return 0; } static int pit_ioport_read(struct kvm_io_device *this, gpa_t addr, int len, void *data) { struct kvm_pit *pit = dev_to_pit(this); struct kvm_kpit_state *pit_state = &pit->pit_state; struct kvm *kvm = pit->kvm; int ret, count; struct kvm_kpit_channel_state *s; if (!pit_in_range(addr)) return -EOPNOTSUPP; addr &= KVM_PIT_CHANNEL_MASK; if (addr == 3) return 0; s = &pit_state->channels[addr]; mutex_lock(&pit_state->lock); if (s->status_latched) { s->status_latched = 0; ret = s->status; } else if (s->count_latched) { switch (s->count_latched) { default: case RW_STATE_LSB: ret = s->latched_count & 0xff; s->count_latched = 0; break; case RW_STATE_MSB: ret = s->latched_count >> 8; s->count_latched = 0; break; case RW_STATE_WORD0: ret = s->latched_count & 0xff; s->count_latched = RW_STATE_MSB; break; } } else { switch (s->read_state) { default: case RW_STATE_LSB: count = pit_get_count(kvm, addr); ret = count & 0xff; break; case RW_STATE_MSB: count = pit_get_count(kvm, addr); ret = (count >> 8) & 0xff; break; case RW_STATE_WORD0: count = pit_get_count(kvm, addr); ret = count & 0xff; s->read_state = RW_STATE_WORD1; break; case RW_STATE_WORD1: count = pit_get_count(kvm, addr); ret = (count >> 8) & 0xff; s->read_state = RW_STATE_WORD0; break; } } if (len > sizeof(ret)) len = sizeof(ret); memcpy(data, (char *)&ret, len); mutex_unlock(&pit_state->lock); return 0; } static int speaker_ioport_write(struct kvm_io_device *this, gpa_t addr, int len, const void *data) { struct kvm_pit *pit = speaker_to_pit(this); struct kvm_kpit_state *pit_state = &pit->pit_state; struct kvm *kvm = pit->kvm; u32 val = *(u32 *) data; if (addr != KVM_SPEAKER_BASE_ADDRESS) return -EOPNOTSUPP; mutex_lock(&pit_state->lock); pit_state->speaker_data_on = (val >> 1) & 1; pit_set_gate(kvm, 2, val & 1); mutex_unlock(&pit_state->lock); return 0; } static int speaker_ioport_read(struct kvm_io_device *this, gpa_t addr, int len, void *data) { struct kvm_pit *pit = speaker_to_pit(this); struct kvm_kpit_state *pit_state = &pit->pit_state; struct kvm *kvm = pit->kvm; unsigned int refresh_clock; int ret; if (addr != KVM_SPEAKER_BASE_ADDRESS) return -EOPNOTSUPP; /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */ refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1; mutex_lock(&pit_state->lock); ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) | (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4)); if (len > sizeof(ret)) len = sizeof(ret); memcpy(data, (char *)&ret, len); mutex_unlock(&pit_state->lock); return 0; } void kvm_pit_reset(struct kvm_pit *pit) { int i; struct kvm_kpit_channel_state *c; mutex_lock(&pit->pit_state.lock); pit->pit_state.flags = 0; for (i = 0; i < 3; i++) { c = &pit->pit_state.channels[i]; c->mode = 0xff; c->gate = (i != 2); pit_load_count(pit->kvm, i, 0); } mutex_unlock(&pit->pit_state.lock); atomic_set(&pit->pit_state.pending, 0); pit->pit_state.irq_ack = 1; } static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask) { struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier); if (!mask) { atomic_set(&pit->pit_state.pending, 0); pit->pit_state.irq_ack = 1; } } static const struct kvm_io_device_ops pit_dev_ops = { .read = pit_ioport_read, .write = pit_ioport_write, }; static const struct kvm_io_device_ops speaker_dev_ops = { .read = speaker_ioport_read, .write = speaker_ioport_write, }; /* Caller must hold slots_lock */ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) { struct kvm_pit *pit; struct kvm_kpit_state *pit_state; struct pid *pid; pid_t pid_nr; int ret; pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL); if (!pit) return NULL; pit->irq_source_id = kvm_request_irq_source_id(kvm); if (pit->irq_source_id < 0) { kfree(pit); return NULL; } mutex_init(&pit->pit_state.lock); mutex_lock(&pit->pit_state.lock); spin_lock_init(&pit->pit_state.inject_lock); pid = get_pid(task_tgid(current)); pid_nr = pid_vnr(pid); put_pid(pid); init_kthread_worker(&pit->worker); pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker, "kvm-pit/%d", pid_nr); if (IS_ERR(pit->worker_task)) { mutex_unlock(&pit->pit_state.lock); kvm_free_irq_source_id(kvm, pit->irq_source_id); kfree(pit); return NULL; } init_kthread_work(&pit->expired, pit_do_work); kvm->arch.vpit = pit; pit->kvm = kvm; pit_state = &pit->pit_state; pit_state->pit = pit; hrtimer_init(&pit_state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); pit_state->irq_ack_notifier.gsi = 0; pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); pit_state->reinject = true; mutex_unlock(&pit->pit_state.lock); kvm_pit_reset(pit); pit->mask_notifier.func = pit_mask_notifer; kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); kvm_iodevice_init(&pit->dev, &pit_dev_ops); ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, KVM_PIT_BASE_ADDRESS, KVM_PIT_MEM_LENGTH, &pit->dev); if (ret < 0) goto fail; if (flags & KVM_PIT_SPEAKER_DUMMY) { kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops); ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, KVM_SPEAKER_BASE_ADDRESS, 4, &pit->speaker_dev); if (ret < 0) goto fail_unregister; } return pit; fail_unregister: kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev); fail: kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier); kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); kvm_free_irq_source_id(kvm, pit->irq_source_id); kthread_stop(pit->worker_task); kfree(pit); return NULL; } void kvm_free_pit(struct kvm *kvm) { struct hrtimer *timer; if (kvm->arch.vpit) { kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &kvm->arch.vpit->dev); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &kvm->arch.vpit->speaker_dev); kvm_unregister_irq_mask_notifier(kvm, 0, &kvm->arch.vpit->mask_notifier); kvm_unregister_irq_ack_notifier(kvm, &kvm->arch.vpit->pit_state.irq_ack_notifier); mutex_lock(&kvm->arch.vpit->pit_state.lock); timer = &kvm->arch.vpit->pit_state.timer; hrtimer_cancel(timer); flush_kthread_work(&kvm->arch.vpit->expired); kthread_stop(kvm->arch.vpit->worker_task); kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id); mutex_unlock(&kvm->arch.vpit->pit_state.lock); kfree(kvm->arch.vpit); } }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_2160_0
crossvul-cpp_data_good_1819_4
/* * linux/fs/ext4/super.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ #include <linux/module.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/parser.h> #include <linux/buffer_head.h> #include <linux/exportfs.h> #include <linux/vfs.h> #include <linux/random.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/quotaops.h> #include <linux/seq_file.h> #include <linux/ctype.h> #include <linux/log2.h> #include <linux/crc16.h> #include <linux/cleancache.h> #include <asm/uaccess.h> #include <linux/kthread.h> #include <linux/freezer.h> #include "ext4.h" #include "ext4_extents.h" /* Needed for trace points definition */ #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" #include "mballoc.h" #define CREATE_TRACE_POINTS #include <trace/events/ext4.h> static struct ext4_lazy_init *ext4_li_info; static struct mutex ext4_li_mtx; static int ext4_mballoc_ready; static struct ratelimit_state ext4_mount_msg_ratelimit; static int ext4_load_journal(struct super_block *, struct ext4_super_block *, unsigned long journal_devnum); static int ext4_show_options(struct seq_file *seq, struct dentry *root); static int ext4_commit_super(struct super_block *sb, int sync); static void ext4_mark_recovery_complete(struct super_block *sb, struct ext4_super_block *es); static void ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es); static int ext4_sync_fs(struct super_block *sb, int wait); static int ext4_remount(struct super_block *sb, int *flags, char *data); static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); static int ext4_unfreeze(struct super_block *sb); static int ext4_freeze(struct super_block *sb); static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data); static inline int ext2_feature_set_ok(struct super_block *sb); static inline int ext3_feature_set_ok(struct super_block *sb); static int ext4_feature_set_ok(struct super_block *sb, int readonly); static void ext4_destroy_lazyinit_thread(void); static void ext4_unregister_li_request(struct super_block *sb); static void ext4_clear_request_list(void); #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2) static struct file_system_type ext2_fs_type = { .owner = THIS_MODULE, .name = "ext2", .mount = ext4_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("ext2"); MODULE_ALIAS("ext2"); #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type) #else #define IS_EXT2_SB(sb) (0) #endif static struct file_system_type ext3_fs_type = { .owner = THIS_MODULE, .name = "ext3", .mount = ext4_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("ext3"); MODULE_ALIAS("ext3"); #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type) static int ext4_verify_csum_type(struct super_block *sb, struct ext4_super_block *es) { if (!ext4_has_feature_metadata_csum(sb)) return 1; return es->s_checksum_type == EXT4_CRC32C_CHKSUM; } static __le32 ext4_superblock_csum(struct super_block *sb, struct ext4_super_block *es) { struct ext4_sb_info *sbi = EXT4_SB(sb); int offset = offsetof(struct ext4_super_block, s_checksum); __u32 csum; csum = ext4_chksum(sbi, ~0, (char *)es, offset); return cpu_to_le32(csum); } static int ext4_superblock_csum_verify(struct super_block *sb, struct ext4_super_block *es) { if (!ext4_has_metadata_csum(sb)) return 1; return es->s_checksum == ext4_superblock_csum(sb, es); } void ext4_superblock_csum_set(struct super_block *sb) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; if (!ext4_has_metadata_csum(sb)) return; es->s_checksum = ext4_superblock_csum(sb, es); } void *ext4_kvmalloc(size_t size, gfp_t flags) { void *ret; ret = kmalloc(size, flags | __GFP_NOWARN); if (!ret) ret = __vmalloc(size, flags, PAGE_KERNEL); return ret; } void *ext4_kvzalloc(size_t size, gfp_t flags) { void *ret; ret = kzalloc(size, flags | __GFP_NOWARN); if (!ret) ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL); return ret; } ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, struct ext4_group_desc *bg) { return le32_to_cpu(bg->bg_block_bitmap_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0); } ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb, struct ext4_group_desc *bg) { return le32_to_cpu(bg->bg_inode_bitmap_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0); } ext4_fsblk_t ext4_inode_table(struct super_block *sb, struct ext4_group_desc *bg) { return le32_to_cpu(bg->bg_inode_table_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0); } __u32 ext4_free_group_clusters(struct super_block *sb, struct ext4_group_desc *bg) { return le16_to_cpu(bg->bg_free_blocks_count_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0); } __u32 ext4_free_inodes_count(struct super_block *sb, struct ext4_group_desc *bg) { return le16_to_cpu(bg->bg_free_inodes_count_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0); } __u32 ext4_used_dirs_count(struct super_block *sb, struct ext4_group_desc *bg) { return le16_to_cpu(bg->bg_used_dirs_count_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0); } __u32 ext4_itable_unused_count(struct super_block *sb, struct ext4_group_desc *bg) { return le16_to_cpu(bg->bg_itable_unused_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0); } void ext4_block_bitmap_set(struct super_block *sb, struct ext4_group_desc *bg, ext4_fsblk_t blk) { bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32); } void ext4_inode_bitmap_set(struct super_block *sb, struct ext4_group_desc *bg, ext4_fsblk_t blk) { bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32); } void ext4_inode_table_set(struct super_block *sb, struct ext4_group_desc *bg, ext4_fsblk_t blk) { bg->bg_inode_table_lo = cpu_to_le32((u32)blk); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_inode_table_hi = cpu_to_le32(blk >> 32); } void ext4_free_group_clusters_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count) { bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16); } void ext4_free_inodes_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count) { bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16); } void ext4_used_dirs_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count) { bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16); } void ext4_itable_unused_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count) { bg->bg_itable_unused_lo = cpu_to_le16((__u16)count); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_itable_unused_hi = cpu_to_le16(count >> 16); } static void __save_error_info(struct super_block *sb, const char *func, unsigned int line) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; if (bdev_read_only(sb->s_bdev)) return; es->s_state |= cpu_to_le16(EXT4_ERROR_FS); es->s_last_error_time = cpu_to_le32(get_seconds()); strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func)); es->s_last_error_line = cpu_to_le32(line); if (!es->s_first_error_time) { es->s_first_error_time = es->s_last_error_time; strncpy(es->s_first_error_func, func, sizeof(es->s_first_error_func)); es->s_first_error_line = cpu_to_le32(line); es->s_first_error_ino = es->s_last_error_ino; es->s_first_error_block = es->s_last_error_block; } /* * Start the daily error reporting function if it hasn't been * started already */ if (!es->s_error_count) mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ); le32_add_cpu(&es->s_error_count, 1); } static void save_error_info(struct super_block *sb, const char *func, unsigned int line) { __save_error_info(sb, func, line); ext4_commit_super(sb, 1); } /* * The del_gendisk() function uninitializes the disk-specific data * structures, including the bdi structure, without telling anyone * else. Once this happens, any attempt to call mark_buffer_dirty() * (for example, by ext4_commit_super), will cause a kernel OOPS. * This is a kludge to prevent these oops until we can put in a proper * hook in del_gendisk() to inform the VFS and file system layers. */ static int block_device_ejected(struct super_block *sb) { struct inode *bd_inode = sb->s_bdev->bd_inode; struct backing_dev_info *bdi = inode_to_bdi(bd_inode); return bdi->dev == NULL; } static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) { struct super_block *sb = journal->j_private; struct ext4_sb_info *sbi = EXT4_SB(sb); int error = is_journal_aborted(journal); struct ext4_journal_cb_entry *jce; BUG_ON(txn->t_state == T_FINISHED); spin_lock(&sbi->s_md_lock); while (!list_empty(&txn->t_private_list)) { jce = list_entry(txn->t_private_list.next, struct ext4_journal_cb_entry, jce_list); list_del_init(&jce->jce_list); spin_unlock(&sbi->s_md_lock); jce->jce_func(sb, jce, error); spin_lock(&sbi->s_md_lock); } spin_unlock(&sbi->s_md_lock); } /* Deal with the reporting of failure conditions on a filesystem such as * inconsistencies detected or read IO failures. * * On ext2, we can store the error state of the filesystem in the * superblock. That is not possible on ext4, because we may have other * write ordering constraints on the superblock which prevent us from * writing it out straight away; and given that the journal is about to * be aborted, we can't rely on the current, or future, transactions to * write out the superblock safely. * * We'll just use the jbd2_journal_abort() error code to record an error in * the journal instead. On recovery, the journal will complain about * that error until we've noted it down and cleared it. */ static void ext4_handle_error(struct super_block *sb) { if (sb->s_flags & MS_RDONLY) return; if (!test_opt(sb, ERRORS_CONT)) { journal_t *journal = EXT4_SB(sb)->s_journal; EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED; if (journal) jbd2_journal_abort(journal, -EIO); } if (test_opt(sb, ERRORS_RO)) { ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); /* * Make sure updated value of ->s_mount_flags will be visible * before ->s_flags update */ smp_wmb(); sb->s_flags |= MS_RDONLY; } if (test_opt(sb, ERRORS_PANIC)) { if (EXT4_SB(sb)->s_journal && !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR)) return; panic("EXT4-fs (device %s): panic forced after error\n", sb->s_id); } } #define ext4_error_ratelimit(sb) \ ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \ "EXT4-fs error") void __ext4_error(struct super_block *sb, const char *function, unsigned int line, const char *fmt, ...) { struct va_format vaf; va_list args; if (ext4_error_ratelimit(sb)) { va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n", sb->s_id, function, line, current->comm, &vaf); va_end(args); } save_error_info(sb, function, line); ext4_handle_error(sb); } void __ext4_error_inode(struct inode *inode, const char *function, unsigned int line, ext4_fsblk_t block, const char *fmt, ...) { va_list args; struct va_format vaf; struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; es->s_last_error_ino = cpu_to_le32(inode->i_ino); es->s_last_error_block = cpu_to_le64(block); if (ext4_error_ratelimit(inode->i_sb)) { va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (block) printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " "inode #%lu: block %llu: comm %s: %pV\n", inode->i_sb->s_id, function, line, inode->i_ino, block, current->comm, &vaf); else printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id, function, line, inode->i_ino, current->comm, &vaf); va_end(args); } save_error_info(inode->i_sb, function, line); ext4_handle_error(inode->i_sb); } void __ext4_error_file(struct file *file, const char *function, unsigned int line, ext4_fsblk_t block, const char *fmt, ...) { va_list args; struct va_format vaf; struct ext4_super_block *es; struct inode *inode = file_inode(file); char pathname[80], *path; es = EXT4_SB(inode->i_sb)->s_es; es->s_last_error_ino = cpu_to_le32(inode->i_ino); if (ext4_error_ratelimit(inode->i_sb)) { path = file_path(file, pathname, sizeof(pathname)); if (IS_ERR(path)) path = "(unknown)"; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (block) printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: " "block %llu: comm %s: path %s: %pV\n", inode->i_sb->s_id, function, line, inode->i_ino, block, current->comm, path, &vaf); else printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: " "comm %s: path %s: %pV\n", inode->i_sb->s_id, function, line, inode->i_ino, current->comm, path, &vaf); va_end(args); } save_error_info(inode->i_sb, function, line); ext4_handle_error(inode->i_sb); } const char *ext4_decode_error(struct super_block *sb, int errno, char nbuf[16]) { char *errstr = NULL; switch (errno) { case -EFSCORRUPTED: errstr = "Corrupt filesystem"; break; case -EFSBADCRC: errstr = "Filesystem failed CRC"; break; case -EIO: errstr = "IO failure"; break; case -ENOMEM: errstr = "Out of memory"; break; case -EROFS: if (!sb || (EXT4_SB(sb)->s_journal && EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT)) errstr = "Journal has aborted"; else errstr = "Readonly filesystem"; break; default: /* If the caller passed in an extra buffer for unknown * errors, textualise them now. Else we just return * NULL. */ if (nbuf) { /* Check for truncated error codes... */ if (snprintf(nbuf, 16, "error %d", -errno) >= 0) errstr = nbuf; } break; } return errstr; } /* __ext4_std_error decodes expected errors from journaling functions * automatically and invokes the appropriate error response. */ void __ext4_std_error(struct super_block *sb, const char *function, unsigned int line, int errno) { char nbuf[16]; const char *errstr; /* Special case: if the error is EROFS, and we're not already * inside a transaction, then there's really no point in logging * an error. */ if (errno == -EROFS && journal_current_handle() == NULL && (sb->s_flags & MS_RDONLY)) return; if (ext4_error_ratelimit(sb)) { errstr = ext4_decode_error(sb, errno, nbuf); printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n", sb->s_id, function, line, errstr); } save_error_info(sb, function, line); ext4_handle_error(sb); } /* * ext4_abort is a much stronger failure handler than ext4_error. The * abort function may be used to deal with unrecoverable failures such * as journal IO errors or ENOMEM at a critical moment in log management. * * We unconditionally force the filesystem into an ABORT|READONLY state, * unless the error response on the fs has been set to panic in which * case we take the easy way out and panic immediately. */ void __ext4_abort(struct super_block *sb, const char *function, unsigned int line, const char *fmt, ...) { va_list args; save_error_info(sb, function, line); va_start(args, fmt); printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: ", sb->s_id, function, line); vprintk(fmt, args); printk("\n"); va_end(args); if ((sb->s_flags & MS_RDONLY) == 0) { ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED; /* * Make sure updated value of ->s_mount_flags will be visible * before ->s_flags update */ smp_wmb(); sb->s_flags |= MS_RDONLY; if (EXT4_SB(sb)->s_journal) jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO); save_error_info(sb, function, line); } if (test_opt(sb, ERRORS_PANIC)) { if (EXT4_SB(sb)->s_journal && !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR)) return; panic("EXT4-fs panic from previous error\n"); } } void __ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...) { struct va_format vaf; va_list args; if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs")) return; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf); va_end(args); } #define ext4_warning_ratelimit(sb) \ ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), \ "EXT4-fs warning") void __ext4_warning(struct super_block *sb, const char *function, unsigned int line, const char *fmt, ...) { struct va_format vaf; va_list args; if (!ext4_warning_ratelimit(sb)) return; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n", sb->s_id, function, line, &vaf); va_end(args); } void __ext4_warning_inode(const struct inode *inode, const char *function, unsigned int line, const char *fmt, ...) { struct va_format vaf; va_list args; if (!ext4_warning_ratelimit(inode->i_sb)) return; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: " "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id, function, line, inode->i_ino, current->comm, &vaf); va_end(args); } void __ext4_grp_locked_error(const char *function, unsigned int line, struct super_block *sb, ext4_group_t grp, unsigned long ino, ext4_fsblk_t block, const char *fmt, ...) __releases(bitlock) __acquires(bitlock) { struct va_format vaf; va_list args; struct ext4_super_block *es = EXT4_SB(sb)->s_es; es->s_last_error_ino = cpu_to_le32(ino); es->s_last_error_block = cpu_to_le64(block); __save_error_info(sb, function, line); if (ext4_error_ratelimit(sb)) { va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ", sb->s_id, function, line, grp); if (ino) printk(KERN_CONT "inode %lu: ", ino); if (block) printk(KERN_CONT "block %llu:", (unsigned long long) block); printk(KERN_CONT "%pV\n", &vaf); va_end(args); } if (test_opt(sb, ERRORS_CONT)) { ext4_commit_super(sb, 0); return; } ext4_unlock_group(sb, grp); ext4_handle_error(sb); /* * We only get here in the ERRORS_RO case; relocking the group * may be dangerous, but nothing bad will happen since the * filesystem will have already been marked read/only and the * journal has been aborted. We return 1 as a hint to callers * who might what to use the return value from * ext4_grp_locked_error() to distinguish between the * ERRORS_CONT and ERRORS_RO case, and perhaps return more * aggressively from the ext4 function in question, with a * more appropriate error code. */ ext4_lock_group(sb, grp); return; } void ext4_update_dynamic_rev(struct super_block *sb) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV) return; ext4_warning(sb, "updating to rev %d because of new feature flag, " "running e2fsck is recommended", EXT4_DYNAMIC_REV); es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO); es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE); es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV); /* leave es->s_feature_*compat flags alone */ /* es->s_uuid will be set by e2fsck if empty */ /* * The rest of the superblock fields should be zero, and if not it * means they are likely already in use, so leave them alone. We * can leave it up to e2fsck to clean up any inconsistencies there. */ } /* * Open the external journal device */ static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb) { struct block_device *bdev; char b[BDEVNAME_SIZE]; bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); if (IS_ERR(bdev)) goto fail; return bdev; fail: ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld", __bdevname(dev, b), PTR_ERR(bdev)); return NULL; } /* * Release the journal device */ static void ext4_blkdev_put(struct block_device *bdev) { blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } static void ext4_blkdev_remove(struct ext4_sb_info *sbi) { struct block_device *bdev; bdev = sbi->journal_bdev; if (bdev) { ext4_blkdev_put(bdev); sbi->journal_bdev = NULL; } } static inline struct inode *orphan_list_entry(struct list_head *l) { return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode; } static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi) { struct list_head *l; ext4_msg(sb, KERN_ERR, "sb orphan head is %d", le32_to_cpu(sbi->s_es->s_last_orphan)); printk(KERN_ERR "sb_info orphan list:\n"); list_for_each(l, &sbi->s_orphan) { struct inode *inode = orphan_list_entry(l); printk(KERN_ERR " " "inode %s:%lu at %p: mode %o, nlink %d, next %d\n", inode->i_sb->s_id, inode->i_ino, inode, inode->i_mode, inode->i_nlink, NEXT_ORPHAN(inode)); } } static void ext4_put_super(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; int i, err; ext4_unregister_li_request(sb); dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); flush_workqueue(sbi->rsv_conversion_wq); destroy_workqueue(sbi->rsv_conversion_wq); if (sbi->s_journal) { err = jbd2_journal_destroy(sbi->s_journal); sbi->s_journal = NULL; if (err < 0) ext4_abort(sb, "Couldn't clean up the journal"); } ext4_unregister_sysfs(sb); ext4_es_unregister_shrinker(sbi); del_timer_sync(&sbi->s_err_report); ext4_release_system_zone(sb); ext4_mb_release(sb); ext4_ext_release(sb); ext4_xattr_put_super(sb); if (!(sb->s_flags & MS_RDONLY)) { ext4_clear_feature_journal_needs_recovery(sb); es->s_state = cpu_to_le16(sbi->s_mount_state); } if (!(sb->s_flags & MS_RDONLY)) ext4_commit_super(sb, 1); for (i = 0; i < sbi->s_gdb_count; i++) brelse(sbi->s_group_desc[i]); kvfree(sbi->s_group_desc); kvfree(sbi->s_flex_groups); percpu_counter_destroy(&sbi->s_freeclusters_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter); brelse(sbi->s_sbh); #ifdef CONFIG_QUOTA for (i = 0; i < EXT4_MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif /* Debugging code just in case the in-memory inode orphan list * isn't empty. The on-disk one can be non-empty if we've * detected an error and taken the fs readonly, but the * in-memory list had better be clean by this point. */ if (!list_empty(&sbi->s_orphan)) dump_orphan_list(sb, sbi); J_ASSERT(list_empty(&sbi->s_orphan)); sync_blockdev(sb->s_bdev); invalidate_bdev(sb->s_bdev); if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) { /* * Invalidate the journal device's buffers. We don't want them * floating about in memory - the physical journal device may * hotswapped, and it breaks the `ro-after' testing code. */ sync_blockdev(sbi->journal_bdev); invalidate_bdev(sbi->journal_bdev); ext4_blkdev_remove(sbi); } if (sbi->s_mb_cache) { ext4_xattr_destroy_cache(sbi->s_mb_cache); sbi->s_mb_cache = NULL; } if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); sb->s_fs_info = NULL; /* * Now that we are completely done shutting down the * superblock, we need to actually destroy the kobject. */ kobject_put(&sbi->s_kobj); wait_for_completion(&sbi->s_kobj_unregister); if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); kfree(sbi->s_blockgroup_lock); kfree(sbi); } static struct kmem_cache *ext4_inode_cachep; /* * Called inside transaction, so use GFP_NOFS */ static struct inode *ext4_alloc_inode(struct super_block *sb) { struct ext4_inode_info *ei; ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS); if (!ei) return NULL; ei->vfs_inode.i_version = 1; spin_lock_init(&ei->i_raw_lock); INIT_LIST_HEAD(&ei->i_prealloc_list); spin_lock_init(&ei->i_prealloc_lock); ext4_es_init_tree(&ei->i_es_tree); rwlock_init(&ei->i_es_lock); INIT_LIST_HEAD(&ei->i_es_list); ei->i_es_all_nr = 0; ei->i_es_shk_nr = 0; ei->i_es_shrink_lblk = 0; ei->i_reserved_data_blocks = 0; ei->i_reserved_meta_blocks = 0; ei->i_allocated_meta_blocks = 0; ei->i_da_metadata_calc_len = 0; ei->i_da_metadata_calc_last_lblock = 0; spin_lock_init(&(ei->i_block_reservation_lock)); #ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; memset(&ei->i_dquot, 0, sizeof(ei->i_dquot)); #endif ei->jinode = NULL; INIT_LIST_HEAD(&ei->i_rsv_conversion_list); spin_lock_init(&ei->i_completed_io_lock); ei->i_sync_tid = 0; ei->i_datasync_tid = 0; atomic_set(&ei->i_ioend_count, 0); atomic_set(&ei->i_unwritten, 0); INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); #ifdef CONFIG_EXT4_FS_ENCRYPTION ei->i_crypt_info = NULL; #endif return &ei->vfs_inode; } static int ext4_drop_inode(struct inode *inode) { int drop = generic_drop_inode(inode); trace_ext4_drop_inode(inode, drop); return drop; } static void ext4_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); } static void ext4_destroy_inode(struct inode *inode) { if (!list_empty(&(EXT4_I(inode)->i_orphan))) { ext4_msg(inode->i_sb, KERN_ERR, "Inode %lu (%p): orphan list check failed!", inode->i_ino, EXT4_I(inode)); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4, EXT4_I(inode), sizeof(struct ext4_inode_info), true); dump_stack(); } call_rcu(&inode->i_rcu, ext4_i_callback); } static void init_once(void *foo) { struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; INIT_LIST_HEAD(&ei->i_orphan); init_rwsem(&ei->xattr_sem); init_rwsem(&ei->i_data_sem); init_rwsem(&ei->i_mmap_sem); inode_init_once(&ei->vfs_inode); } static int __init init_inodecache(void) { ext4_inode_cachep = kmem_cache_create("ext4_inode_cache", sizeof(struct ext4_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), init_once); if (ext4_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(ext4_inode_cachep); } void ext4_clear_inode(struct inode *inode) { invalidate_inode_buffers(inode); clear_inode(inode); dquot_drop(inode); ext4_discard_preallocations(inode); ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); if (EXT4_I(inode)->jinode) { jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode), EXT4_I(inode)->jinode); jbd2_free_inode(EXT4_I(inode)->jinode); EXT4_I(inode)->jinode = NULL; } #ifdef CONFIG_EXT4_FS_ENCRYPTION if (EXT4_I(inode)->i_crypt_info) ext4_free_encryption_info(inode, EXT4_I(inode)->i_crypt_info); #endif } static struct inode *ext4_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) return ERR_PTR(-ESTALE); if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) return ERR_PTR(-ESTALE); /* iget isn't really right if the inode is currently unallocated!! * * ext4_read_inode will return a bad_inode if the inode had been * deleted, so we should be safe. * * Currently we don't know the generation for parent directory, so * a generation of 0 means "accept any" */ inode = ext4_iget_normal(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (generation && inode->i_generation != generation) { iput(inode); return ERR_PTR(-ESTALE); } return inode; } static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ext4_nfs_get_inode); } static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, ext4_nfs_get_inode); } /* * Try to release metadata pages (indirect blocks, directories) which are * mapped via the block device. Since these pages could have journal heads * which would prevent try_to_free_buffers() from freeing them, we must use * jbd2 layer's try_to_free_buffers() function to release them. */ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_t wait) { journal_t *journal = EXT4_SB(sb)->s_journal; WARN_ON(PageChecked(page)); if (!page_has_buffers(page)) return 0; if (journal) return jbd2_journal_try_to_free_buffers(journal, page, wait & ~__GFP_DIRECT_RECLAIM); return try_to_free_buffers(page); } #ifdef CONFIG_QUOTA #define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group") #define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) static int ext4_write_dquot(struct dquot *dquot); static int ext4_acquire_dquot(struct dquot *dquot); static int ext4_release_dquot(struct dquot *dquot); static int ext4_mark_dquot_dirty(struct dquot *dquot); static int ext4_write_info(struct super_block *sb, int type); static int ext4_quota_on(struct super_block *sb, int type, int format_id, struct path *path); static int ext4_quota_off(struct super_block *sb, int type); static int ext4_quota_on_mount(struct super_block *sb, int type); static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off); static ssize_t ext4_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); static int ext4_quota_enable(struct super_block *sb, int type, int format_id, unsigned int flags); static int ext4_enable_quotas(struct super_block *sb); static struct dquot **ext4_get_dquots(struct inode *inode) { return EXT4_I(inode)->i_dquot; } static const struct dquot_operations ext4_quota_operations = { .get_reserved_space = ext4_get_reserved_space, .write_dquot = ext4_write_dquot, .acquire_dquot = ext4_acquire_dquot, .release_dquot = ext4_release_dquot, .mark_dirty = ext4_mark_dquot_dirty, .write_info = ext4_write_info, .alloc_dquot = dquot_alloc, .destroy_dquot = dquot_destroy, }; static const struct quotactl_ops ext4_qctl_operations = { .quota_on = ext4_quota_on, .quota_off = ext4_quota_off, .quota_sync = dquot_quota_sync, .get_state = dquot_get_state, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .set_dqblk = dquot_set_dqblk }; #endif static const struct super_operations ext4_sops = { .alloc_inode = ext4_alloc_inode, .destroy_inode = ext4_destroy_inode, .write_inode = ext4_write_inode, .dirty_inode = ext4_dirty_inode, .drop_inode = ext4_drop_inode, .evict_inode = ext4_evict_inode, .put_super = ext4_put_super, .sync_fs = ext4_sync_fs, .freeze_fs = ext4_freeze, .unfreeze_fs = ext4_unfreeze, .statfs = ext4_statfs, .remount_fs = ext4_remount, .show_options = ext4_show_options, #ifdef CONFIG_QUOTA .quota_read = ext4_quota_read, .quota_write = ext4_quota_write, .get_dquots = ext4_get_dquots, #endif .bdev_try_to_free_page = bdev_try_to_free_page, }; static const struct export_operations ext4_export_ops = { .fh_to_dentry = ext4_fh_to_dentry, .fh_to_parent = ext4_fh_to_parent, .get_parent = ext4_get_parent, }; enum { Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro, Opt_nouid32, Opt_debug, Opt_removed, Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload, Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev, Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit, Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption, Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, Opt_usrquota, Opt_grpquota, Opt_i_version, Opt_dax, Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit, Opt_lazytime, Opt_nolazytime, Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, Opt_inode_readahead_blks, Opt_journal_ioprio, Opt_dioread_nolock, Opt_dioread_lock, Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable, Opt_max_dir_size_kb, Opt_nojournal_checksum, }; static const match_table_t tokens = { {Opt_bsd_df, "bsddf"}, {Opt_minix_df, "minixdf"}, {Opt_grpid, "grpid"}, {Opt_grpid, "bsdgroups"}, {Opt_nogrpid, "nogrpid"}, {Opt_nogrpid, "sysvgroups"}, {Opt_resgid, "resgid=%u"}, {Opt_resuid, "resuid=%u"}, {Opt_sb, "sb=%u"}, {Opt_err_cont, "errors=continue"}, {Opt_err_panic, "errors=panic"}, {Opt_err_ro, "errors=remount-ro"}, {Opt_nouid32, "nouid32"}, {Opt_debug, "debug"}, {Opt_removed, "oldalloc"}, {Opt_removed, "orlov"}, {Opt_user_xattr, "user_xattr"}, {Opt_nouser_xattr, "nouser_xattr"}, {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, {Opt_noload, "norecovery"}, {Opt_noload, "noload"}, {Opt_removed, "nobh"}, {Opt_removed, "bh"}, {Opt_commit, "commit=%u"}, {Opt_min_batch_time, "min_batch_time=%u"}, {Opt_max_batch_time, "max_batch_time=%u"}, {Opt_journal_dev, "journal_dev=%u"}, {Opt_journal_path, "journal_path=%s"}, {Opt_journal_checksum, "journal_checksum"}, {Opt_nojournal_checksum, "nojournal_checksum"}, {Opt_journal_async_commit, "journal_async_commit"}, {Opt_abort, "abort"}, {Opt_data_journal, "data=journal"}, {Opt_data_ordered, "data=ordered"}, {Opt_data_writeback, "data=writeback"}, {Opt_data_err_abort, "data_err=abort"}, {Opt_data_err_ignore, "data_err=ignore"}, {Opt_offusrjquota, "usrjquota="}, {Opt_usrjquota, "usrjquota=%s"}, {Opt_offgrpjquota, "grpjquota="}, {Opt_grpjquota, "grpjquota=%s"}, {Opt_jqfmt_vfsold, "jqfmt=vfsold"}, {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"}, {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"}, {Opt_grpquota, "grpquota"}, {Opt_noquota, "noquota"}, {Opt_quota, "quota"}, {Opt_usrquota, "usrquota"}, {Opt_barrier, "barrier=%u"}, {Opt_barrier, "barrier"}, {Opt_nobarrier, "nobarrier"}, {Opt_i_version, "i_version"}, {Opt_dax, "dax"}, {Opt_stripe, "stripe=%u"}, {Opt_delalloc, "delalloc"}, {Opt_lazytime, "lazytime"}, {Opt_nolazytime, "nolazytime"}, {Opt_nodelalloc, "nodelalloc"}, {Opt_removed, "mblk_io_submit"}, {Opt_removed, "nomblk_io_submit"}, {Opt_block_validity, "block_validity"}, {Opt_noblock_validity, "noblock_validity"}, {Opt_inode_readahead_blks, "inode_readahead_blks=%u"}, {Opt_journal_ioprio, "journal_ioprio=%u"}, {Opt_auto_da_alloc, "auto_da_alloc=%u"}, {Opt_auto_da_alloc, "auto_da_alloc"}, {Opt_noauto_da_alloc, "noauto_da_alloc"}, {Opt_dioread_nolock, "dioread_nolock"}, {Opt_dioread_lock, "dioread_lock"}, {Opt_discard, "discard"}, {Opt_nodiscard, "nodiscard"}, {Opt_init_itable, "init_itable=%u"}, {Opt_init_itable, "init_itable"}, {Opt_noinit_itable, "noinit_itable"}, {Opt_max_dir_size_kb, "max_dir_size_kb=%u"}, {Opt_test_dummy_encryption, "test_dummy_encryption"}, {Opt_removed, "check=none"}, /* mount option from ext2/3 */ {Opt_removed, "nocheck"}, /* mount option from ext2/3 */ {Opt_removed, "reservation"}, /* mount option from ext2/3 */ {Opt_removed, "noreservation"}, /* mount option from ext2/3 */ {Opt_removed, "journal=%u"}, /* mount option from ext2/3 */ {Opt_err, NULL}, }; static ext4_fsblk_t get_sb_block(void **data) { ext4_fsblk_t sb_block; char *options = (char *) *data; if (!options || strncmp(options, "sb=", 3) != 0) return 1; /* Default location */ options += 3; /* TODO: use simple_strtoll with >32bit ext4 */ sb_block = simple_strtoul(options, &options, 0); if (*options && *options != ',') { printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n", (char *) *data); return 1; } if (*options == ',') options++; *data = (void *) options; return sb_block; } #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3)) static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n" "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n"; #ifdef CONFIG_QUOTA static int set_qf_name(struct super_block *sb, int qtype, substring_t *args) { struct ext4_sb_info *sbi = EXT4_SB(sb); char *qname; int ret = -1; if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) { ext4_msg(sb, KERN_ERR, "Cannot change journaled " "quota options when quota turned on"); return -1; } if (ext4_has_feature_quota(sb)) { ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options " "when QUOTA feature is enabled"); return -1; } qname = match_strdup(args); if (!qname) { ext4_msg(sb, KERN_ERR, "Not enough memory for storing quotafile name"); return -1; } if (sbi->s_qf_names[qtype]) { if (strcmp(sbi->s_qf_names[qtype], qname) == 0) ret = 1; else ext4_msg(sb, KERN_ERR, "%s quota file already specified", QTYPE2NAME(qtype)); goto errout; } if (strchr(qname, '/')) { ext4_msg(sb, KERN_ERR, "quotafile must be on filesystem root"); goto errout; } sbi->s_qf_names[qtype] = qname; set_opt(sb, QUOTA); return 1; errout: kfree(qname); return ret; } static int clear_qf_name(struct super_block *sb, int qtype) { struct ext4_sb_info *sbi = EXT4_SB(sb); if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) { ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options" " when quota turned on"); return -1; } kfree(sbi->s_qf_names[qtype]); sbi->s_qf_names[qtype] = NULL; return 1; } #endif #define MOPT_SET 0x0001 #define MOPT_CLEAR 0x0002 #define MOPT_NOSUPPORT 0x0004 #define MOPT_EXPLICIT 0x0008 #define MOPT_CLEAR_ERR 0x0010 #define MOPT_GTE0 0x0020 #ifdef CONFIG_QUOTA #define MOPT_Q 0 #define MOPT_QFMT 0x0040 #else #define MOPT_Q MOPT_NOSUPPORT #define MOPT_QFMT MOPT_NOSUPPORT #endif #define MOPT_DATAJ 0x0080 #define MOPT_NO_EXT2 0x0100 #define MOPT_NO_EXT3 0x0200 #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3) #define MOPT_STRING 0x0400 static const struct mount_opts { int token; int mount_opt; int flags; } ext4_mount_opts[] = { {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET}, {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR}, {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET}, {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR}, {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET}, {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR}, {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK, MOPT_EXT4_ONLY | MOPT_SET}, {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK, MOPT_EXT4_ONLY | MOPT_CLEAR}, {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET}, {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR}, {Opt_delalloc, EXT4_MOUNT_DELALLOC, MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, MOPT_EXT4_ONLY | MOPT_CLEAR}, {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, MOPT_EXT4_ONLY | MOPT_CLEAR}, {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | EXT4_MOUNT_JOURNAL_CHECKSUM), MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET}, {Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR}, {Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR}, {Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR}, {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_NO_EXT2 | MOPT_SET}, {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_NO_EXT2 | MOPT_CLEAR}, {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET}, {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR}, {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET}, {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR}, {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR}, {Opt_commit, 0, MOPT_GTE0}, {Opt_max_batch_time, 0, MOPT_GTE0}, {Opt_min_batch_time, 0, MOPT_GTE0}, {Opt_inode_readahead_blks, 0, MOPT_GTE0}, {Opt_init_itable, 0, MOPT_GTE0}, {Opt_dax, EXT4_MOUNT_DAX, MOPT_SET}, {Opt_stripe, 0, MOPT_GTE0}, {Opt_resuid, 0, MOPT_GTE0}, {Opt_resgid, 0, MOPT_GTE0}, {Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0}, {Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING}, {Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0}, {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ}, {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ}, {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA, MOPT_NO_EXT2 | MOPT_DATAJ}, {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET}, {Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR}, #ifdef CONFIG_EXT4_FS_POSIX_ACL {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET}, {Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR}, #else {Opt_acl, 0, MOPT_NOSUPPORT}, {Opt_noacl, 0, MOPT_NOSUPPORT}, #endif {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET}, {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET}, {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q}, {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q}, {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA, MOPT_SET | MOPT_Q}, {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA | EXT4_MOUNT_GRPQUOTA), MOPT_CLEAR | MOPT_Q}, {Opt_usrjquota, 0, MOPT_Q}, {Opt_grpjquota, 0, MOPT_Q}, {Opt_offusrjquota, 0, MOPT_Q}, {Opt_offgrpjquota, 0, MOPT_Q}, {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT}, {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT}, {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT}, {Opt_max_dir_size_kb, 0, MOPT_GTE0}, {Opt_test_dummy_encryption, 0, MOPT_GTE0}, {Opt_err, 0, 0} }; static int handle_mount_opt(struct super_block *sb, char *opt, int token, substring_t *args, unsigned long *journal_devnum, unsigned int *journal_ioprio, int is_remount) { struct ext4_sb_info *sbi = EXT4_SB(sb); const struct mount_opts *m; kuid_t uid; kgid_t gid; int arg = 0; #ifdef CONFIG_QUOTA if (token == Opt_usrjquota) return set_qf_name(sb, USRQUOTA, &args[0]); else if (token == Opt_grpjquota) return set_qf_name(sb, GRPQUOTA, &args[0]); else if (token == Opt_offusrjquota) return clear_qf_name(sb, USRQUOTA); else if (token == Opt_offgrpjquota) return clear_qf_name(sb, GRPQUOTA); #endif switch (token) { case Opt_noacl: case Opt_nouser_xattr: ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5"); break; case Opt_sb: return 1; /* handled by get_sb_block() */ case Opt_removed: ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt); return 1; case Opt_abort: sbi->s_mount_flags |= EXT4_MF_FS_ABORTED; return 1; case Opt_i_version: sb->s_flags |= MS_I_VERSION; return 1; case Opt_lazytime: sb->s_flags |= MS_LAZYTIME; return 1; case Opt_nolazytime: sb->s_flags &= ~MS_LAZYTIME; return 1; } for (m = ext4_mount_opts; m->token != Opt_err; m++) if (token == m->token) break; if (m->token == Opt_err) { ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" " "or missing value", opt); return -1; } if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) { ext4_msg(sb, KERN_ERR, "Mount option \"%s\" incompatible with ext2", opt); return -1; } if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) { ext4_msg(sb, KERN_ERR, "Mount option \"%s\" incompatible with ext3", opt); return -1; } if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg)) return -1; if (args->from && (m->flags & MOPT_GTE0) && (arg < 0)) return -1; if (m->flags & MOPT_EXPLICIT) { if (m->mount_opt & EXT4_MOUNT_DELALLOC) { set_opt2(sb, EXPLICIT_DELALLOC); } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) { set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM); } else return -1; } if (m->flags & MOPT_CLEAR_ERR) clear_opt(sb, ERRORS_MASK); if (token == Opt_noquota && sb_any_quota_loaded(sb)) { ext4_msg(sb, KERN_ERR, "Cannot change quota " "options when quota turned on"); return -1; } if (m->flags & MOPT_NOSUPPORT) { ext4_msg(sb, KERN_ERR, "%s option not supported", opt); } else if (token == Opt_commit) { if (arg == 0) arg = JBD2_DEFAULT_MAX_COMMIT_AGE; sbi->s_commit_interval = HZ * arg; } else if (token == Opt_max_batch_time) { sbi->s_max_batch_time = arg; } else if (token == Opt_min_batch_time) { sbi->s_min_batch_time = arg; } else if (token == Opt_inode_readahead_blks) { if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) { ext4_msg(sb, KERN_ERR, "EXT4-fs: inode_readahead_blks must be " "0 or a power of 2 smaller than 2^31"); return -1; } sbi->s_inode_readahead_blks = arg; } else if (token == Opt_init_itable) { set_opt(sb, INIT_INODE_TABLE); if (!args->from) arg = EXT4_DEF_LI_WAIT_MULT; sbi->s_li_wait_mult = arg; } else if (token == Opt_max_dir_size_kb) { sbi->s_max_dir_size_kb = arg; } else if (token == Opt_stripe) { sbi->s_stripe = arg; } else if (token == Opt_resuid) { uid = make_kuid(current_user_ns(), arg); if (!uid_valid(uid)) { ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg); return -1; } sbi->s_resuid = uid; } else if (token == Opt_resgid) { gid = make_kgid(current_user_ns(), arg); if (!gid_valid(gid)) { ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg); return -1; } sbi->s_resgid = gid; } else if (token == Opt_journal_dev) { if (is_remount) { ext4_msg(sb, KERN_ERR, "Cannot specify journal on remount"); return -1; } *journal_devnum = arg; } else if (token == Opt_journal_path) { char *journal_path; struct inode *journal_inode; struct path path; int error; if (is_remount) { ext4_msg(sb, KERN_ERR, "Cannot specify journal on remount"); return -1; } journal_path = match_strdup(&args[0]); if (!journal_path) { ext4_msg(sb, KERN_ERR, "error: could not dup " "journal device string"); return -1; } error = kern_path(journal_path, LOOKUP_FOLLOW, &path); if (error) { ext4_msg(sb, KERN_ERR, "error: could not find " "journal device path: error %d", error); kfree(journal_path); return -1; } journal_inode = d_inode(path.dentry); if (!S_ISBLK(journal_inode->i_mode)) { ext4_msg(sb, KERN_ERR, "error: journal path %s " "is not a block device", journal_path); path_put(&path); kfree(journal_path); return -1; } *journal_devnum = new_encode_dev(journal_inode->i_rdev); path_put(&path); kfree(journal_path); } else if (token == Opt_journal_ioprio) { if (arg > 7) { ext4_msg(sb, KERN_ERR, "Invalid journal IO priority" " (must be 0-7)"); return -1; } *journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg); } else if (token == Opt_test_dummy_encryption) { #ifdef CONFIG_EXT4_FS_ENCRYPTION sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION; ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled"); #else ext4_msg(sb, KERN_WARNING, "Test dummy encryption mount option ignored"); #endif } else if (m->flags & MOPT_DATAJ) { if (is_remount) { if (!sbi->s_journal) ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option"); else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) { ext4_msg(sb, KERN_ERR, "Cannot change data mode on remount"); return -1; } } else { clear_opt(sb, DATA_FLAGS); sbi->s_mount_opt |= m->mount_opt; } #ifdef CONFIG_QUOTA } else if (m->flags & MOPT_QFMT) { if (sb_any_quota_loaded(sb) && sbi->s_jquota_fmt != m->mount_opt) { ext4_msg(sb, KERN_ERR, "Cannot change journaled " "quota options when quota turned on"); return -1; } if (ext4_has_feature_quota(sb)) { ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options " "when QUOTA feature is enabled"); return -1; } sbi->s_jquota_fmt = m->mount_opt; #endif } else if (token == Opt_dax) { #ifdef CONFIG_FS_DAX ext4_msg(sb, KERN_WARNING, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); sbi->s_mount_opt |= m->mount_opt; #else ext4_msg(sb, KERN_INFO, "dax option not supported"); return -1; #endif } else { if (!args->from) arg = 1; if (m->flags & MOPT_CLEAR) arg = !arg; else if (unlikely(!(m->flags & MOPT_SET))) { ext4_msg(sb, KERN_WARNING, "buggy handling of option %s", opt); WARN_ON(1); return -1; } if (arg != 0) sbi->s_mount_opt |= m->mount_opt; else sbi->s_mount_opt &= ~m->mount_opt; } return 1; } static int parse_options(char *options, struct super_block *sb, unsigned long *journal_devnum, unsigned int *journal_ioprio, int is_remount) { struct ext4_sb_info *sbi = EXT4_SB(sb); char *p; substring_t args[MAX_OPT_ARGS]; int token; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { if (!*p) continue; /* * Initialize args struct so we know whether arg was * found; some options take optional arguments. */ args[0].to = args[0].from = NULL; token = match_token(p, tokens, args); if (handle_mount_opt(sb, p, token, args, journal_devnum, journal_ioprio, is_remount) < 0) return 0; } #ifdef CONFIG_QUOTA if (ext4_has_feature_quota(sb) && (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) { ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA " "feature is enabled"); return 0; } if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) clear_opt(sb, USRQUOTA); if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) clear_opt(sb, GRPQUOTA); if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) { ext4_msg(sb, KERN_ERR, "old and new quota " "format mixing"); return 0; } if (!sbi->s_jquota_fmt) { ext4_msg(sb, KERN_ERR, "journaled quota format " "not specified"); return 0; } } #endif if (test_opt(sb, DIOREAD_NOLOCK)) { int blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); if (blocksize < PAGE_CACHE_SIZE) { ext4_msg(sb, KERN_ERR, "can't mount with " "dioread_nolock if block size != PAGE_SIZE"); return 0; } } if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA && test_opt(sb, JOURNAL_ASYNC_COMMIT)) { ext4_msg(sb, KERN_ERR, "can't mount with journal_async_commit " "in data=ordered mode"); return 0; } return 1; } static inline void ext4_show_quota_options(struct seq_file *seq, struct super_block *sb) { #if defined(CONFIG_QUOTA) struct ext4_sb_info *sbi = EXT4_SB(sb); if (sbi->s_jquota_fmt) { char *fmtname = ""; switch (sbi->s_jquota_fmt) { case QFMT_VFS_OLD: fmtname = "vfsold"; break; case QFMT_VFS_V0: fmtname = "vfsv0"; break; case QFMT_VFS_V1: fmtname = "vfsv1"; break; } seq_printf(seq, ",jqfmt=%s", fmtname); } if (sbi->s_qf_names[USRQUOTA]) seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]); if (sbi->s_qf_names[GRPQUOTA]) seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]); #endif } static const char *token2str(int token) { const struct match_token *t; for (t = tokens; t->token != Opt_err; t++) if (t->token == token && !strchr(t->pattern, '=')) break; return t->pattern; } /* * Show an option if * - it's set to a non-default value OR * - if the per-sb default is different from the global default */ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, int nodefs) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; int def_errors, def_mount_opt = nodefs ? 0 : sbi->s_def_mount_opt; const struct mount_opts *m; char sep = nodefs ? '\n' : ','; #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep) #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg) if (sbi->s_sb_block != 1) SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block); for (m = ext4_mount_opts; m->token != Opt_err; m++) { int want_set = m->flags & MOPT_SET; if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) || (m->flags & MOPT_CLEAR_ERR)) continue; if (!(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt))) continue; /* skip if same as the default */ if ((want_set && (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) || (!want_set && (sbi->s_mount_opt & m->mount_opt))) continue; /* select Opt_noFoo vs Opt_Foo */ SEQ_OPTS_PRINT("%s", token2str(m->token)); } if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) || le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID) SEQ_OPTS_PRINT("resuid=%u", from_kuid_munged(&init_user_ns, sbi->s_resuid)); if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) || le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID) SEQ_OPTS_PRINT("resgid=%u", from_kgid_munged(&init_user_ns, sbi->s_resgid)); def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors); if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO) SEQ_OPTS_PUTS("errors=remount-ro"); if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE) SEQ_OPTS_PUTS("errors=continue"); if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC) SEQ_OPTS_PUTS("errors=panic"); if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ); if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time); if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time); if (sb->s_flags & MS_I_VERSION) SEQ_OPTS_PUTS("i_version"); if (nodefs || sbi->s_stripe) SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe); if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ def_mount_opt)) { if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) SEQ_OPTS_PUTS("data=journal"); else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) SEQ_OPTS_PUTS("data=ordered"); else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) SEQ_OPTS_PUTS("data=writeback"); } if (nodefs || sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS) SEQ_OPTS_PRINT("inode_readahead_blks=%u", sbi->s_inode_readahead_blks); if (nodefs || (test_opt(sb, INIT_INODE_TABLE) && (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT))) SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult); if (nodefs || sbi->s_max_dir_size_kb) SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb); ext4_show_quota_options(seq, sb); return 0; } static int ext4_show_options(struct seq_file *seq, struct dentry *root) { return _ext4_show_options(seq, root->d_sb, 0); } int ext4_seq_options_show(struct seq_file *seq, void *offset) { struct super_block *sb = seq->private; int rc; seq_puts(seq, (sb->s_flags & MS_RDONLY) ? "ro" : "rw"); rc = _ext4_show_options(seq, sb, 1); seq_puts(seq, "\n"); return rc; } static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, int read_only) { struct ext4_sb_info *sbi = EXT4_SB(sb); int res = 0; if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) { ext4_msg(sb, KERN_ERR, "revision level too high, " "forcing read-only mode"); res = MS_RDONLY; } if (read_only) goto done; if (!(sbi->s_mount_state & EXT4_VALID_FS)) ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, " "running e2fsck is recommended"); else if (sbi->s_mount_state & EXT4_ERROR_FS) ext4_msg(sb, KERN_WARNING, "warning: mounting fs with errors, " "running e2fsck is recommended"); else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 && le16_to_cpu(es->s_mnt_count) >= (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count)) ext4_msg(sb, KERN_WARNING, "warning: maximal mount count reached, " "running e2fsck is recommended"); else if (le32_to_cpu(es->s_checkinterval) && (le32_to_cpu(es->s_lastcheck) + le32_to_cpu(es->s_checkinterval) <= get_seconds())) ext4_msg(sb, KERN_WARNING, "warning: checktime reached, " "running e2fsck is recommended"); if (!sbi->s_journal) es->s_state &= cpu_to_le16(~EXT4_VALID_FS); if (!(__s16) le16_to_cpu(es->s_max_mnt_count)) es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT); le16_add_cpu(&es->s_mnt_count, 1); es->s_mtime = cpu_to_le32(get_seconds()); ext4_update_dynamic_rev(sb); if (sbi->s_journal) ext4_set_feature_journal_needs_recovery(sb); ext4_commit_super(sb, 1); done: if (test_opt(sb, DEBUG)) printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, " "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n", sb->s_blocksize, sbi->s_groups_count, EXT4_BLOCKS_PER_GROUP(sb), EXT4_INODES_PER_GROUP(sb), sbi->s_mount_opt, sbi->s_mount_opt2); cleancache_init_fs(sb); return res; } int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct flex_groups *new_groups; int size; if (!sbi->s_log_groups_per_flex) return 0; size = ext4_flex_group(sbi, ngroup - 1) + 1; if (size <= sbi->s_flex_groups_allocated) return 0; size = roundup_pow_of_two(size * sizeof(struct flex_groups)); new_groups = ext4_kvzalloc(size, GFP_KERNEL); if (!new_groups) { ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups", size / (int) sizeof(struct flex_groups)); return -ENOMEM; } if (sbi->s_flex_groups) { memcpy(new_groups, sbi->s_flex_groups, (sbi->s_flex_groups_allocated * sizeof(struct flex_groups))); kvfree(sbi->s_flex_groups); } sbi->s_flex_groups = new_groups; sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups); return 0; } static int ext4_fill_flex_info(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_desc *gdp = NULL; ext4_group_t flex_group; int i, err; sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) { sbi->s_log_groups_per_flex = 0; return 1; } err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count); if (err) goto failed; for (i = 0; i < sbi->s_groups_count; i++) { gdp = ext4_get_group_desc(sb, i, NULL); flex_group = ext4_flex_group(sbi, i); atomic_add(ext4_free_inodes_count(sb, gdp), &sbi->s_flex_groups[flex_group].free_inodes); atomic64_add(ext4_free_group_clusters(sb, gdp), &sbi->s_flex_groups[flex_group].free_clusters); atomic_add(ext4_used_dirs_count(sb, gdp), &sbi->s_flex_groups[flex_group].used_dirs); } return 1; failed: return 0; } static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group, struct ext4_group_desc *gdp) { int offset; __u16 crc = 0; __le32 le_group = cpu_to_le32(block_group); struct ext4_sb_info *sbi = EXT4_SB(sb); if (ext4_has_metadata_csum(sbi->s_sb)) { /* Use new metadata_csum algorithm */ __le16 save_csum; __u32 csum32; save_csum = gdp->bg_checksum; gdp->bg_checksum = 0; csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group, sizeof(le_group)); csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, sbi->s_desc_size); gdp->bg_checksum = save_csum; crc = csum32 & 0xFFFF; goto out; } /* old crc16 code */ if (!ext4_has_feature_gdt_csum(sb)) return 0; offset = offsetof(struct ext4_group_desc, bg_checksum); crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); crc = crc16(crc, (__u8 *)gdp, offset); offset += sizeof(gdp->bg_checksum); /* skip checksum */ /* for checksum of struct ext4_group_desc do the rest...*/ if (ext4_has_feature_64bit(sb) && offset < le16_to_cpu(sbi->s_es->s_desc_size)) crc = crc16(crc, (__u8 *)gdp + offset, le16_to_cpu(sbi->s_es->s_desc_size) - offset); out: return cpu_to_le16(crc); } int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group, struct ext4_group_desc *gdp) { if (ext4_has_group_desc_csum(sb) && (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp))) return 0; return 1; } void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group, struct ext4_group_desc *gdp) { if (!ext4_has_group_desc_csum(sb)) return; gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp); } /* Called at mount-time, super-block is locked */ static int ext4_check_descriptors(struct super_block *sb, ext4_group_t *first_not_zeroed) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); ext4_fsblk_t last_block; ext4_fsblk_t block_bitmap; ext4_fsblk_t inode_bitmap; ext4_fsblk_t inode_table; int flexbg_flag = 0; ext4_group_t i, grp = sbi->s_groups_count; if (ext4_has_feature_flex_bg(sb)) flexbg_flag = 1; ext4_debug("Checking group descriptors"); for (i = 0; i < sbi->s_groups_count; i++) { struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); if (i == sbi->s_groups_count - 1 || flexbg_flag) last_block = ext4_blocks_count(sbi->s_es) - 1; else last_block = first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1); if ((grp == sbi->s_groups_count) && !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) grp = i; block_bitmap = ext4_block_bitmap(sb, gdp); if (block_bitmap < first_block || block_bitmap > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Block bitmap for group %u not in group " "(block %llu)!", i, block_bitmap); return 0; } inode_bitmap = ext4_inode_bitmap(sb, gdp); if (inode_bitmap < first_block || inode_bitmap > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Inode bitmap for group %u not in group " "(block %llu)!", i, inode_bitmap); return 0; } inode_table = ext4_inode_table(sb, gdp); if (inode_table < first_block || inode_table + sbi->s_itb_per_group - 1 > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Inode table for group %u not in group " "(block %llu)!", i, inode_table); return 0; } ext4_lock_group(sb, i); if (!ext4_group_desc_csum_verify(sb, i, gdp)) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Checksum for group %u failed (%u!=%u)", i, le16_to_cpu(ext4_group_desc_csum(sb, i, gdp)), le16_to_cpu(gdp->bg_checksum)); if (!(sb->s_flags & MS_RDONLY)) { ext4_unlock_group(sb, i); return 0; } } ext4_unlock_group(sb, i); if (!flexbg_flag) first_block += EXT4_BLOCKS_PER_GROUP(sb); } if (NULL != first_not_zeroed) *first_not_zeroed = grp; return 1; } /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at * the superblock) which were deleted from all directories, but held open by * a process at the time of a crash. We walk the list and try to delete these * inodes at recovery time (only with a read-write filesystem). * * In order to keep the orphan inode chain consistent during traversal (in * case of crash during recovery), we link each inode into the superblock * orphan list_head and handle it the same way as an inode deletion during * normal operation (which journals the operations for us). * * We only do an iget() and an iput() on each inode, which is very safe if we * accidentally point at an in-use or already deleted inode. The worst that * can happen in this case is that we get a "bit already cleared" message from * ext4_free_inode(). The only reason we would point at a wrong inode is if * e2fsck was run on this filesystem, and it must have already done the orphan * inode cleanup for us, so we can safely abort without any further action. */ static void ext4_orphan_cleanup(struct super_block *sb, struct ext4_super_block *es) { unsigned int s_flags = sb->s_flags; int nr_orphans = 0, nr_truncates = 0; #ifdef CONFIG_QUOTA int i; #endif if (!es->s_last_orphan) { jbd_debug(4, "no orphan inodes to clean up\n"); return; } if (bdev_read_only(sb->s_bdev)) { ext4_msg(sb, KERN_ERR, "write access " "unavailable, skipping orphan cleanup"); return; } /* Check if feature set would not allow a r/w mount */ if (!ext4_feature_set_ok(sb, 0)) { ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " "unknown ROCOMPAT features"); return; } if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { /* don't clear list on RO mount w/ errors */ if (es->s_last_orphan && !(s_flags & MS_RDONLY)) { ext4_msg(sb, KERN_INFO, "Errors on filesystem, " "clearing orphan list.\n"); es->s_last_orphan = 0; } jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); return; } if (s_flags & MS_RDONLY) { ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs"); sb->s_flags &= ~MS_RDONLY; } #ifdef CONFIG_QUOTA /* Needed for iput() to work correctly and not trash data */ sb->s_flags |= MS_ACTIVE; /* Turn on quotas so that they are updated correctly */ for (i = 0; i < EXT4_MAXQUOTAS; i++) { if (EXT4_SB(sb)->s_qf_names[i]) { int ret = ext4_quota_on_mount(sb, i); if (ret < 0) ext4_msg(sb, KERN_ERR, "Cannot turn on journaled " "quota: error %d", ret); } } #endif while (es->s_last_orphan) { struct inode *inode; inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan)); if (IS_ERR(inode)) { es->s_last_orphan = 0; break; } list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); dquot_initialize(inode); if (inode->i_nlink) { if (test_opt(sb, DEBUG)) ext4_msg(sb, KERN_DEBUG, "%s: truncating inode %lu to %lld bytes", __func__, inode->i_ino, inode->i_size); jbd_debug(2, "truncating inode %lu to %lld bytes\n", inode->i_ino, inode->i_size); mutex_lock(&inode->i_mutex); truncate_inode_pages(inode->i_mapping, inode->i_size); ext4_truncate(inode); mutex_unlock(&inode->i_mutex); nr_truncates++; } else { if (test_opt(sb, DEBUG)) ext4_msg(sb, KERN_DEBUG, "%s: deleting unreferenced inode %lu", __func__, inode->i_ino); jbd_debug(2, "deleting unreferenced inode %lu\n", inode->i_ino); nr_orphans++; } iput(inode); /* The delete magic happens here! */ } #define PLURAL(x) (x), ((x) == 1) ? "" : "s" if (nr_orphans) ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted", PLURAL(nr_orphans)); if (nr_truncates) ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up", PLURAL(nr_truncates)); #ifdef CONFIG_QUOTA /* Turn quotas off */ for (i = 0; i < EXT4_MAXQUOTAS; i++) { if (sb_dqopt(sb)->files[i]) dquot_quota_off(sb, i); } #endif sb->s_flags = s_flags; /* Restore MS_RDONLY status */ } /* * Maximal extent format file size. * Resulting logical blkno at s_maxbytes must fit in our on-disk * extent format containers, within a sector_t, and within i_blocks * in the vfs. ext4 inode has 48 bits of i_block in fsblock units, * so that won't be a limiting factor. * * However there is other limiting factor. We do store extents in the form * of starting block and length, hence the resulting length of the extent * covering maximum file size must fit into on-disk format containers as * well. Given that length is always by 1 unit bigger than max unit (because * we count 0 as well) we have to lower the s_maxbytes by one fs block. * * Note, this does *not* consider any metadata overhead for vfs i_blocks. */ static loff_t ext4_max_size(int blkbits, int has_huge_files) { loff_t res; loff_t upper_limit = MAX_LFS_FILESIZE; /* small i_blocks in vfs inode? */ if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) { /* * CONFIG_LBDAF is not enabled implies the inode * i_block represent total blocks in 512 bytes * 32 == size of vfs inode i_blocks * 8 */ upper_limit = (1LL << 32) - 1; /* total blocks in file system block size */ upper_limit >>= (blkbits - 9); upper_limit <<= blkbits; } /* * 32-bit extent-start container, ee_block. We lower the maxbytes * by one fs block, so ee_len can cover the extent of maximum file * size */ res = (1LL << 32) - 1; res <<= blkbits; /* Sanity check against vm- & vfs- imposed limits */ if (res > upper_limit) res = upper_limit; return res; } /* * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks. * We need to be 1 filesystem block less than the 2^48 sector limit. */ static loff_t ext4_max_bitmap_size(int bits, int has_huge_files) { loff_t res = EXT4_NDIR_BLOCKS; int meta_blocks; loff_t upper_limit; /* This is calculated to be the largest file size for a dense, block * mapped file such that the file's total number of 512-byte sectors, * including data and all indirect blocks, does not exceed (2^48 - 1). * * __u32 i_blocks_lo and _u16 i_blocks_high represent the total * number of 512-byte sectors of the file. */ if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) { /* * !has_huge_files or CONFIG_LBDAF not enabled implies that * the inode i_block field represents total file blocks in * 2^32 512-byte sectors == size of vfs inode i_blocks * 8 */ upper_limit = (1LL << 32) - 1; /* total blocks in file system block size */ upper_limit >>= (bits - 9); } else { /* * We use 48 bit ext4_inode i_blocks * With EXT4_HUGE_FILE_FL set the i_blocks * represent total number of blocks in * file system block size */ upper_limit = (1LL << 48) - 1; } /* indirect blocks */ meta_blocks = 1; /* double indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)); /* tripple indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2))); upper_limit -= meta_blocks; upper_limit <<= bits; res += 1LL << (bits-2); res += 1LL << (2*(bits-2)); res += 1LL << (3*(bits-2)); res <<= bits; if (res > upper_limit) res = upper_limit; if (res > MAX_LFS_FILESIZE) res = MAX_LFS_FILESIZE; return res; } static ext4_fsblk_t descriptor_loc(struct super_block *sb, ext4_fsblk_t logical_sb_block, int nr) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_group_t bg, first_meta_bg; int has_super = 0; first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg) return logical_sb_block + nr + 1; bg = sbi->s_desc_per_block * nr; if (ext4_bg_has_super(sb, bg)) has_super = 1; /* * If we have a meta_bg fs with 1k blocks, group 0's GDT is at * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled * on modern mke2fs or blksize > 1k on older mke2fs) then we must * compensate. */ if (sb->s_blocksize == 1024 && nr == 0 && le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) == 0) has_super++; return (has_super + ext4_group_first_block_no(sb, bg)); } /** * ext4_get_stripe_size: Get the stripe size. * @sbi: In memory super block info * * If we have specified it via mount option, then * use the mount option value. If the value specified at mount time is * greater than the blocks per group use the super block value. * If the super block value is greater than blocks per group return 0. * Allocator needs it be less than blocks per group. * */ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi) { unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride); unsigned long stripe_width = le32_to_cpu(sbi->s_es->s_raid_stripe_width); int ret; if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group) ret = sbi->s_stripe; else if (stripe_width <= sbi->s_blocks_per_group) ret = stripe_width; else if (stride <= sbi->s_blocks_per_group) ret = stride; else ret = 0; /* * If the stripe width is 1, this makes no sense and * we set it to 0 to turn off stripe handling code. */ if (ret <= 1) ret = 0; return ret; } /* * Check whether this filesystem can be mounted based on * the features present and the RDONLY/RDWR mount requested. * Returns 1 if this filesystem can be mounted as requested, * 0 if it cannot be. */ static int ext4_feature_set_ok(struct super_block *sb, int readonly) { if (ext4_has_unknown_ext4_incompat_features(sb)) { ext4_msg(sb, KERN_ERR, "Couldn't mount because of " "unsupported optional features (%x)", (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) & ~EXT4_FEATURE_INCOMPAT_SUPP)); return 0; } if (readonly) return 1; if (ext4_has_feature_readonly(sb)) { ext4_msg(sb, KERN_INFO, "filesystem is read-only"); sb->s_flags |= MS_RDONLY; return 1; } /* Check that feature set is OK for a read-write mount */ if (ext4_has_unknown_ext4_ro_compat_features(sb)) { ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of " "unsupported optional features (%x)", (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) & ~EXT4_FEATURE_RO_COMPAT_SUPP)); return 0; } /* * Large file size enabled file system can only be mounted * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF */ if (ext4_has_feature_huge_file(sb)) { if (sizeof(blkcnt_t) < sizeof(u64)) { ext4_msg(sb, KERN_ERR, "Filesystem with huge files " "cannot be mounted RDWR without " "CONFIG_LBDAF"); return 0; } } if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) { ext4_msg(sb, KERN_ERR, "Can't support bigalloc feature without " "extents feature\n"); return 0; } #ifndef CONFIG_QUOTA if (ext4_has_feature_quota(sb) && !readonly) { ext4_msg(sb, KERN_ERR, "Filesystem with quota feature cannot be mounted RDWR " "without CONFIG_QUOTA"); return 0; } #endif /* CONFIG_QUOTA */ return 1; } /* * This function is called once a day if we have errors logged * on the file system */ static void print_daily_error_info(unsigned long arg) { struct super_block *sb = (struct super_block *) arg; struct ext4_sb_info *sbi; struct ext4_super_block *es; sbi = EXT4_SB(sb); es = sbi->s_es; if (es->s_error_count) /* fsck newer than v1.41.13 is needed to clean this condition. */ ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u", le32_to_cpu(es->s_error_count)); if (es->s_first_error_time) { printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d", sb->s_id, le32_to_cpu(es->s_first_error_time), (int) sizeof(es->s_first_error_func), es->s_first_error_func, le32_to_cpu(es->s_first_error_line)); if (es->s_first_error_ino) printk(": inode %u", le32_to_cpu(es->s_first_error_ino)); if (es->s_first_error_block) printk(": block %llu", (unsigned long long) le64_to_cpu(es->s_first_error_block)); printk("\n"); } if (es->s_last_error_time) { printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d", sb->s_id, le32_to_cpu(es->s_last_error_time), (int) sizeof(es->s_last_error_func), es->s_last_error_func, le32_to_cpu(es->s_last_error_line)); if (es->s_last_error_ino) printk(": inode %u", le32_to_cpu(es->s_last_error_ino)); if (es->s_last_error_block) printk(": block %llu", (unsigned long long) le64_to_cpu(es->s_last_error_block)); printk("\n"); } mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ } /* Find next suitable group and run ext4_init_inode_table */ static int ext4_run_li_request(struct ext4_li_request *elr) { struct ext4_group_desc *gdp = NULL; ext4_group_t group, ngroups; struct super_block *sb; unsigned long timeout = 0; int ret = 0; sb = elr->lr_super; ngroups = EXT4_SB(sb)->s_groups_count; sb_start_write(sb); for (group = elr->lr_next_group; group < ngroups; group++) { gdp = ext4_get_group_desc(sb, group, NULL); if (!gdp) { ret = 1; break; } if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) break; } if (group >= ngroups) ret = 1; if (!ret) { timeout = jiffies; ret = ext4_init_inode_table(sb, group, elr->lr_timeout ? 0 : 1); if (elr->lr_timeout == 0) { timeout = (jiffies - timeout) * elr->lr_sbi->s_li_wait_mult; elr->lr_timeout = timeout; } elr->lr_next_sched = jiffies + elr->lr_timeout; elr->lr_next_group = group + 1; } sb_end_write(sb); return ret; } /* * Remove lr_request from the list_request and free the * request structure. Should be called with li_list_mtx held */ static void ext4_remove_li_request(struct ext4_li_request *elr) { struct ext4_sb_info *sbi; if (!elr) return; sbi = elr->lr_sbi; list_del(&elr->lr_request); sbi->s_li_request = NULL; kfree(elr); } static void ext4_unregister_li_request(struct super_block *sb) { mutex_lock(&ext4_li_mtx); if (!ext4_li_info) { mutex_unlock(&ext4_li_mtx); return; } mutex_lock(&ext4_li_info->li_list_mtx); ext4_remove_li_request(EXT4_SB(sb)->s_li_request); mutex_unlock(&ext4_li_info->li_list_mtx); mutex_unlock(&ext4_li_mtx); } static struct task_struct *ext4_lazyinit_task; /* * This is the function where ext4lazyinit thread lives. It walks * through the request list searching for next scheduled filesystem. * When such a fs is found, run the lazy initialization request * (ext4_rn_li_request) and keep track of the time spend in this * function. Based on that time we compute next schedule time of * the request. When walking through the list is complete, compute * next waking time and put itself into sleep. */ static int ext4_lazyinit_thread(void *arg) { struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg; struct list_head *pos, *n; struct ext4_li_request *elr; unsigned long next_wakeup, cur; BUG_ON(NULL == eli); cont_thread: while (true) { next_wakeup = MAX_JIFFY_OFFSET; mutex_lock(&eli->li_list_mtx); if (list_empty(&eli->li_request_list)) { mutex_unlock(&eli->li_list_mtx); goto exit_thread; } list_for_each_safe(pos, n, &eli->li_request_list) { elr = list_entry(pos, struct ext4_li_request, lr_request); if (time_after_eq(jiffies, elr->lr_next_sched)) { if (ext4_run_li_request(elr) != 0) { /* error, remove the lazy_init job */ ext4_remove_li_request(elr); continue; } } if (time_before(elr->lr_next_sched, next_wakeup)) next_wakeup = elr->lr_next_sched; } mutex_unlock(&eli->li_list_mtx); try_to_freeze(); cur = jiffies; if ((time_after_eq(cur, next_wakeup)) || (MAX_JIFFY_OFFSET == next_wakeup)) { cond_resched(); continue; } schedule_timeout_interruptible(next_wakeup - cur); if (kthread_should_stop()) { ext4_clear_request_list(); goto exit_thread; } } exit_thread: /* * It looks like the request list is empty, but we need * to check it under the li_list_mtx lock, to prevent any * additions into it, and of course we should lock ext4_li_mtx * to atomically free the list and ext4_li_info, because at * this point another ext4 filesystem could be registering * new one. */ mutex_lock(&ext4_li_mtx); mutex_lock(&eli->li_list_mtx); if (!list_empty(&eli->li_request_list)) { mutex_unlock(&eli->li_list_mtx); mutex_unlock(&ext4_li_mtx); goto cont_thread; } mutex_unlock(&eli->li_list_mtx); kfree(ext4_li_info); ext4_li_info = NULL; mutex_unlock(&ext4_li_mtx); return 0; } static void ext4_clear_request_list(void) { struct list_head *pos, *n; struct ext4_li_request *elr; mutex_lock(&ext4_li_info->li_list_mtx); list_for_each_safe(pos, n, &ext4_li_info->li_request_list) { elr = list_entry(pos, struct ext4_li_request, lr_request); ext4_remove_li_request(elr); } mutex_unlock(&ext4_li_info->li_list_mtx); } static int ext4_run_lazyinit_thread(void) { ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread, ext4_li_info, "ext4lazyinit"); if (IS_ERR(ext4_lazyinit_task)) { int err = PTR_ERR(ext4_lazyinit_task); ext4_clear_request_list(); kfree(ext4_li_info); ext4_li_info = NULL; printk(KERN_CRIT "EXT4-fs: error %d creating inode table " "initialization thread\n", err); return err; } ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING; return 0; } /* * Check whether it make sense to run itable init. thread or not. * If there is at least one uninitialized inode table, return * corresponding group number, else the loop goes through all * groups and return total number of groups. */ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) { ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; struct ext4_group_desc *gdp = NULL; for (group = 0; group < ngroups; group++) { gdp = ext4_get_group_desc(sb, group, NULL); if (!gdp) continue; if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) break; } return group; } static int ext4_li_info_new(void) { struct ext4_lazy_init *eli = NULL; eli = kzalloc(sizeof(*eli), GFP_KERNEL); if (!eli) return -ENOMEM; INIT_LIST_HEAD(&eli->li_request_list); mutex_init(&eli->li_list_mtx); eli->li_state |= EXT4_LAZYINIT_QUIT; ext4_li_info = eli; return 0; } static struct ext4_li_request *ext4_li_request_new(struct super_block *sb, ext4_group_t start) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_li_request *elr; elr = kzalloc(sizeof(*elr), GFP_KERNEL); if (!elr) return NULL; elr->lr_super = sb; elr->lr_sbi = sbi; elr->lr_next_group = start; /* * Randomize first schedule time of the request to * spread the inode table initialization requests * better. */ elr->lr_next_sched = jiffies + (prandom_u32() % (EXT4_DEF_LI_MAX_START_DELAY * HZ)); return elr; } int ext4_register_li_request(struct super_block *sb, ext4_group_t first_not_zeroed) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_li_request *elr = NULL; ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; int ret = 0; mutex_lock(&ext4_li_mtx); if (sbi->s_li_request != NULL) { /* * Reset timeout so it can be computed again, because * s_li_wait_mult might have changed. */ sbi->s_li_request->lr_timeout = 0; goto out; } if (first_not_zeroed == ngroups || (sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE)) goto out; elr = ext4_li_request_new(sb, first_not_zeroed); if (!elr) { ret = -ENOMEM; goto out; } if (NULL == ext4_li_info) { ret = ext4_li_info_new(); if (ret) goto out; } mutex_lock(&ext4_li_info->li_list_mtx); list_add(&elr->lr_request, &ext4_li_info->li_request_list); mutex_unlock(&ext4_li_info->li_list_mtx); sbi->s_li_request = elr; /* * set elr to NULL here since it has been inserted to * the request_list and the removal and free of it is * handled by ext4_clear_request_list from now on. */ elr = NULL; if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) { ret = ext4_run_lazyinit_thread(); if (ret) goto out; } out: mutex_unlock(&ext4_li_mtx); if (ret) kfree(elr); return ret; } /* * We do not need to lock anything since this is called on * module unload. */ static void ext4_destroy_lazyinit_thread(void) { /* * If thread exited earlier * there's nothing to be done. */ if (!ext4_li_info || !ext4_lazyinit_task) return; kthread_stop(ext4_lazyinit_task); } static int set_journal_csum_feature_set(struct super_block *sb) { int ret = 1; int compat, incompat; struct ext4_sb_info *sbi = EXT4_SB(sb); if (ext4_has_metadata_csum(sb)) { /* journal checksum v3 */ compat = 0; incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3; } else { /* journal checksum v1 */ compat = JBD2_FEATURE_COMPAT_CHECKSUM; incompat = 0; } jbd2_journal_clear_features(sbi->s_journal, JBD2_FEATURE_COMPAT_CHECKSUM, 0, JBD2_FEATURE_INCOMPAT_CSUM_V3 | JBD2_FEATURE_INCOMPAT_CSUM_V2); if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { ret = jbd2_journal_set_features(sbi->s_journal, compat, 0, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | incompat); } else if (test_opt(sb, JOURNAL_CHECKSUM)) { ret = jbd2_journal_set_features(sbi->s_journal, compat, 0, incompat); jbd2_journal_clear_features(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); } else { jbd2_journal_clear_features(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); } return ret; } /* * Note: calculating the overhead so we can be compatible with * historical BSD practice is quite difficult in the face of * clusters/bigalloc. This is because multiple metadata blocks from * different block group can end up in the same allocation cluster. * Calculating the exact overhead in the face of clustered allocation * requires either O(all block bitmaps) in memory or O(number of block * groups**2) in time. We will still calculate the superblock for * older file systems --- and if we come across with a bigalloc file * system with zero in s_overhead_clusters the estimate will be close to * correct especially for very large cluster sizes --- but for newer * file systems, it's better to calculate this figure once at mkfs * time, and store it in the superblock. If the superblock value is * present (even for non-bigalloc file systems), we will use it. */ static int count_overhead(struct super_block *sb, ext4_group_t grp, char *buf) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_desc *gdp; ext4_fsblk_t first_block, last_block, b; ext4_group_t i, ngroups = ext4_get_groups_count(sb); int s, j, count = 0; if (!ext4_has_feature_bigalloc(sb)) return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) + sbi->s_itb_per_group + 2); first_block = le32_to_cpu(sbi->s_es->s_first_data_block) + (grp * EXT4_BLOCKS_PER_GROUP(sb)); last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1; for (i = 0; i < ngroups; i++) { gdp = ext4_get_group_desc(sb, i, NULL); b = ext4_block_bitmap(sb, gdp); if (b >= first_block && b <= last_block) { ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf); count++; } b = ext4_inode_bitmap(sb, gdp); if (b >= first_block && b <= last_block) { ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf); count++; } b = ext4_inode_table(sb, gdp); if (b >= first_block && b + sbi->s_itb_per_group <= last_block) for (j = 0; j < sbi->s_itb_per_group; j++, b++) { int c = EXT4_B2C(sbi, b - first_block); ext4_set_bit(c, buf); count++; } if (i != grp) continue; s = 0; if (ext4_bg_has_super(sb, grp)) { ext4_set_bit(s++, buf); count++; } for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) { ext4_set_bit(EXT4_B2C(sbi, s++), buf); count++; } } if (!count) return 0; return EXT4_CLUSTERS_PER_GROUP(sb) - ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8); } /* * Compute the overhead and stash it in sbi->s_overhead */ int ext4_calculate_overhead(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; ext4_group_t i, ngroups = ext4_get_groups_count(sb); ext4_fsblk_t overhead = 0; char *buf = (char *) get_zeroed_page(GFP_NOFS); if (!buf) return -ENOMEM; /* * Compute the overhead (FS structures). This is constant * for a given filesystem unless the number of block groups * changes so we cache the previous value until it does. */ /* * All of the blocks before first_data_block are overhead */ overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block)); /* * Add the overhead found in each block group */ for (i = 0; i < ngroups; i++) { int blks; blks = count_overhead(sb, i, buf); overhead += blks; if (blks) memset(buf, 0, PAGE_SIZE); cond_resched(); } /* Add the internal journal blocks as well */ if (sbi->s_journal && !sbi->journal_bdev) overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen); sbi->s_overhead = overhead; smp_wmb(); free_page((unsigned long) buf); return 0; } static void ext4_set_resv_clusters(struct super_block *sb) { ext4_fsblk_t resv_clusters; struct ext4_sb_info *sbi = EXT4_SB(sb); /* * There's no need to reserve anything when we aren't using extents. * The space estimates are exact, there are no unwritten extents, * hole punching doesn't need new metadata... This is needed especially * to keep ext2/3 backward compatibility. */ if (!ext4_has_feature_extents(sb)) return; /* * By default we reserve 2% or 4096 clusters, whichever is smaller. * This should cover the situations where we can not afford to run * out of space like for example punch hole, or converting * unwritten extents in delalloc path. In most cases such * allocation would require 1, or 2 blocks, higher numbers are * very rare. */ resv_clusters = (ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits); do_div(resv_clusters, 50); resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096); atomic64_set(&sbi->s_resv_clusters, resv_clusters); } static int ext4_fill_super(struct super_block *sb, void *data, int silent) { char *orig_data = kstrdup(data, GFP_KERNEL); struct buffer_head *bh; struct ext4_super_block *es = NULL; struct ext4_sb_info *sbi; ext4_fsblk_t block; ext4_fsblk_t sb_block = get_sb_block(&data); ext4_fsblk_t logical_sb_block; unsigned long offset = 0; unsigned long journal_devnum = 0; unsigned long def_mount_opts; struct inode *root; const char *descr; int ret = -ENOMEM; int blocksize, clustersize; unsigned int db_count; unsigned int i; int needs_recovery, has_huge_files, has_bigalloc; __u64 blocks_count; int err = 0; unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; ext4_group_t first_not_zeroed; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) goto out_free_orig; sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); if (!sbi->s_blockgroup_lock) { kfree(sbi); goto out_free_orig; } sb->s_fs_info = sbi; sbi->s_sb = sb; sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS; sbi->s_sb_block = sb_block; if (sb->s_bdev->bd_part) sbi->s_sectors_written_start = part_stat_read(sb->s_bdev->bd_part, sectors[1]); /* Cleanup superblock name */ strreplace(sb->s_id, '/', '!'); /* -EINVAL is default */ ret = -EINVAL; blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE); if (!blocksize) { ext4_msg(sb, KERN_ERR, "unable to set blocksize"); goto out_fail; } /* * The ext4 superblock will not be buffer aligned for other than 1kB * block sizes. We need to calculate the offset from buffer start. */ if (blocksize != EXT4_MIN_BLOCK_SIZE) { logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE; offset = do_div(logical_sb_block, blocksize); } else { logical_sb_block = sb_block; } if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) { ext4_msg(sb, KERN_ERR, "unable to read superblock"); goto out_fail; } /* * Note: s_es must be initialized as soon as possible because * some ext4 macro-instructions depend on its value */ es = (struct ext4_super_block *) (bh->b_data + offset); sbi->s_es = es; sb->s_magic = le16_to_cpu(es->s_magic); if (sb->s_magic != EXT4_SUPER_MAGIC) goto cantfind_ext4; sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written); /* Warn if metadata_csum and gdt_csum are both set. */ if (ext4_has_feature_metadata_csum(sb) && ext4_has_feature_gdt_csum(sb)) ext4_warning(sb, "metadata_csum and uninit_bg are " "redundant flags; please run fsck."); /* Check for a known checksum algorithm */ if (!ext4_verify_csum_type(sb, es)) { ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " "unknown checksum algorithm."); silent = 1; goto cantfind_ext4; } /* Load the checksum driver */ if (ext4_has_feature_metadata_csum(sb)) { sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); if (IS_ERR(sbi->s_chksum_driver)) { ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver."); ret = PTR_ERR(sbi->s_chksum_driver); sbi->s_chksum_driver = NULL; goto failed_mount; } } /* Check superblock checksum */ if (!ext4_superblock_csum_verify(sb, es)) { ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " "invalid superblock checksum. Run e2fsck?"); silent = 1; ret = -EFSBADCRC; goto cantfind_ext4; } /* Precompute checksum seed for all metadata */ if (ext4_has_feature_csum_seed(sb)) sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed); else if (ext4_has_metadata_csum(sb)) sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid, sizeof(es->s_uuid)); /* Set defaults before we parse the mount options */ def_mount_opts = le32_to_cpu(es->s_default_mount_opts); set_opt(sb, INIT_INODE_TABLE); if (def_mount_opts & EXT4_DEFM_DEBUG) set_opt(sb, DEBUG); if (def_mount_opts & EXT4_DEFM_BSDGROUPS) set_opt(sb, GRPID); if (def_mount_opts & EXT4_DEFM_UID16) set_opt(sb, NO_UID32); /* xattr user namespace & acls are now defaulted on */ set_opt(sb, XATTR_USER); #ifdef CONFIG_EXT4_FS_POSIX_ACL set_opt(sb, POSIX_ACL); #endif /* don't forget to enable journal_csum when metadata_csum is enabled. */ if (ext4_has_metadata_csum(sb)) set_opt(sb, JOURNAL_CHECKSUM); if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) set_opt(sb, JOURNAL_DATA); else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) set_opt(sb, ORDERED_DATA); else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) set_opt(sb, WRITEBACK_DATA); if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC) set_opt(sb, ERRORS_PANIC); else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE) set_opt(sb, ERRORS_CONT); else set_opt(sb, ERRORS_RO); /* block_validity enabled by default; disable with noblock_validity */ set_opt(sb, BLOCK_VALIDITY); if (def_mount_opts & EXT4_DEFM_DISCARD) set_opt(sb, DISCARD); sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid)); sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid)); sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ; sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME; sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME; if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0) set_opt(sb, BARRIER); /* * enable delayed allocation by default * Use -o nodelalloc to turn it off */ if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) && ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0)) set_opt(sb, DELALLOC); /* * set default s_li_wait_mult for lazyinit, for the case there is * no mount option specified. */ sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; if (!parse_options((char *) sbi->s_es->s_mount_opts, sb, &journal_devnum, &journal_ioprio, 0)) { ext4_msg(sb, KERN_WARNING, "failed to parse options in superblock: %s", sbi->s_es->s_mount_opts); } sbi->s_def_mount_opt = sbi->s_mount_opt; if (!parse_options((char *) data, sb, &journal_devnum, &journal_ioprio, 0)) goto failed_mount; if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { printk_once(KERN_WARNING "EXT4-fs: Warning: mounting " "with data=journal disables delayed " "allocation and O_DIRECT support!\n"); if (test_opt2(sb, EXPLICIT_DELALLOC)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and delalloc"); goto failed_mount; } if (test_opt(sb, DIOREAD_NOLOCK)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and dioread_nolock"); goto failed_mount; } if (test_opt(sb, DAX)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and dax"); goto failed_mount; } if (test_opt(sb, DELALLOC)) clear_opt(sb, DELALLOC); } else { sb->s_iflags |= SB_I_CGROUPWB; } sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && (ext4_has_compat_features(sb) || ext4_has_ro_compat_features(sb) || ext4_has_incompat_features(sb))) ext4_msg(sb, KERN_WARNING, "feature flags set on rev 0 fs, " "running e2fsck is recommended"); if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) { set_opt2(sb, HURD_COMPAT); if (ext4_has_feature_64bit(sb)) { ext4_msg(sb, KERN_ERR, "The Hurd can't support 64-bit file systems"); goto failed_mount; } } if (IS_EXT2_SB(sb)) { if (ext2_feature_set_ok(sb)) ext4_msg(sb, KERN_INFO, "mounting ext2 file system " "using the ext4 subsystem"); else { ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due " "to feature incompatibilities"); goto failed_mount; } } if (IS_EXT3_SB(sb)) { if (ext3_feature_set_ok(sb)) ext4_msg(sb, KERN_INFO, "mounting ext3 file system " "using the ext4 subsystem"); else { ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due " "to feature incompatibilities"); goto failed_mount; } } /* * Check feature flags regardless of the revision level, since we * previously didn't change the revision level when setting the flags, * so there is a chance incompat flags are set on a rev 0 filesystem. */ if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY))) goto failed_mount; blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); if (blocksize < EXT4_MIN_BLOCK_SIZE || blocksize > EXT4_MAX_BLOCK_SIZE) { ext4_msg(sb, KERN_ERR, "Unsupported filesystem blocksize %d", blocksize); goto failed_mount; } if (sbi->s_mount_opt & EXT4_MOUNT_DAX) { if (blocksize != PAGE_SIZE) { ext4_msg(sb, KERN_ERR, "error: unsupported blocksize for dax"); goto failed_mount; } if (!sb->s_bdev->bd_disk->fops->direct_access) { ext4_msg(sb, KERN_ERR, "error: device does not support dax"); goto failed_mount; } } if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) { ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d", es->s_encryption_level); goto failed_mount; } if (sb->s_blocksize != blocksize) { /* Validate the filesystem blocksize */ if (!sb_set_blocksize(sb, blocksize)) { ext4_msg(sb, KERN_ERR, "bad block size %d", blocksize); goto failed_mount; } brelse(bh); logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE; offset = do_div(logical_sb_block, blocksize); bh = sb_bread_unmovable(sb, logical_sb_block); if (!bh) { ext4_msg(sb, KERN_ERR, "Can't read superblock on 2nd try"); goto failed_mount; } es = (struct ext4_super_block *)(bh->b_data + offset); sbi->s_es = es; if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) { ext4_msg(sb, KERN_ERR, "Magic mismatch, very weird!"); goto failed_mount; } } has_huge_files = ext4_has_feature_huge_file(sb); sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits, has_huge_files); sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files); if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; } else { sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_first_ino = le32_to_cpu(es->s_first_ino); if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || (!is_power_of_2(sbi->s_inode_size)) || (sbi->s_inode_size > blocksize)) { ext4_msg(sb, KERN_ERR, "unsupported inode size: %d", sbi->s_inode_size); goto failed_mount; } if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2); } sbi->s_desc_size = le16_to_cpu(es->s_desc_size); if (ext4_has_feature_64bit(sb)) { if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT || sbi->s_desc_size > EXT4_MAX_DESC_SIZE || !is_power_of_2(sbi->s_desc_size)) { ext4_msg(sb, KERN_ERR, "unsupported descriptor size %lu", sbi->s_desc_size); goto failed_mount; } } else sbi->s_desc_size = EXT4_MIN_DESC_SIZE; sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0) goto cantfind_ext4; sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb); if (sbi->s_inodes_per_block == 0) goto cantfind_ext4; sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb); sbi->s_sbh = bh; sbi->s_mount_state = le16_to_cpu(es->s_state); sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb)); sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb)); for (i = 0; i < 4; i++) sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); sbi->s_def_hash_version = es->s_def_hash_version; if (ext4_has_feature_dir_index(sb)) { i = le32_to_cpu(es->s_flags); if (i & EXT2_FLAGS_UNSIGNED_HASH) sbi->s_hash_unsigned = 3; else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { #ifdef __CHAR_UNSIGNED__ if (!(sb->s_flags & MS_RDONLY)) es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); sbi->s_hash_unsigned = 3; #else if (!(sb->s_flags & MS_RDONLY)) es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); #endif } } /* Handle clustersize */ clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size); has_bigalloc = ext4_has_feature_bigalloc(sb); if (has_bigalloc) { if (clustersize < blocksize) { ext4_msg(sb, KERN_ERR, "cluster size (%d) smaller than " "block size (%d)", clustersize, blocksize); goto failed_mount; } sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - le32_to_cpu(es->s_log_block_size); sbi->s_clusters_per_group = le32_to_cpu(es->s_clusters_per_group); if (sbi->s_clusters_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, "#clusters per group too big: %lu", sbi->s_clusters_per_group); goto failed_mount; } if (sbi->s_blocks_per_group != (sbi->s_clusters_per_group * (clustersize / blocksize))) { ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and " "clusters per group (%lu) inconsistent", sbi->s_blocks_per_group, sbi->s_clusters_per_group); goto failed_mount; } } else { if (clustersize != blocksize) { ext4_warning(sb, "fragment/cluster size (%d) != " "block size (%d)", clustersize, blocksize); clustersize = blocksize; } if (sbi->s_blocks_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, "#blocks per group too big: %lu", sbi->s_blocks_per_group); goto failed_mount; } sbi->s_clusters_per_group = sbi->s_blocks_per_group; sbi->s_cluster_bits = 0; } sbi->s_cluster_ratio = clustersize / blocksize; if (sbi->s_inodes_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, "#inodes per group too big: %lu", sbi->s_inodes_per_group); goto failed_mount; } /* Do we have standard group size of clustersize * 8 blocks ? */ if (sbi->s_blocks_per_group == clustersize << 3) set_opt2(sb, STD_GROUP_SIZE); /* * Test whether we have more sectors than will fit in sector_t, * and whether the max offset is addressable by the page cache. */ err = generic_check_addressable(sb->s_blocksize_bits, ext4_blocks_count(es)); if (err) { ext4_msg(sb, KERN_ERR, "filesystem" " too large to mount safely on this system"); if (sizeof(sector_t) < 8) ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled"); goto failed_mount; } if (EXT4_BLOCKS_PER_GROUP(sb) == 0) goto cantfind_ext4; /* check blocks count against device size */ blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; if (blocks_count && ext4_blocks_count(es) > blocks_count) { ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu " "exceeds size of device (%llu blocks)", ext4_blocks_count(es), blocks_count); goto failed_mount; } /* * It makes no sense for the first data block to be beyond the end * of the filesystem. */ if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) { ext4_msg(sb, KERN_WARNING, "bad geometry: first data " "block %u is beyond end of filesystem (%llu)", le32_to_cpu(es->s_first_data_block), ext4_blocks_count(es)); goto failed_mount; } blocks_count = (ext4_blocks_count(es) - le32_to_cpu(es->s_first_data_block) + EXT4_BLOCKS_PER_GROUP(sb) - 1); do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb)); if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) { ext4_msg(sb, KERN_WARNING, "groups count too large: %u " "(block count %llu, first data block %u, " "blocks per group %lu)", sbi->s_groups_count, ext4_blocks_count(es), le32_to_cpu(es->s_first_data_block), EXT4_BLOCKS_PER_GROUP(sb)); goto failed_mount; } sbi->s_groups_count = blocks_count; sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb); sbi->s_group_desc = ext4_kvmalloc(db_count * sizeof(struct buffer_head *), GFP_KERNEL); if (sbi->s_group_desc == NULL) { ext4_msg(sb, KERN_ERR, "not enough memory"); ret = -ENOMEM; goto failed_mount; } bgl_lock_init(sbi->s_blockgroup_lock); for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logical_sb_block, i); sbi->s_group_desc[i] = sb_bread_unmovable(sb, block); if (!sbi->s_group_desc[i]) { ext4_msg(sb, KERN_ERR, "can't read group descriptor %d", i); db_count = i; goto failed_mount2; } } if (!ext4_check_descriptors(sb, &first_not_zeroed)) { ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); ret = -EFSCORRUPTED; goto failed_mount2; } sbi->s_gdb_count = db_count; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); setup_timer(&sbi->s_err_report, print_daily_error_info, (unsigned long) sb); /* Register extent status tree shrinker */ if (ext4_es_register_shrinker(sbi)) goto failed_mount3; sbi->s_stripe = ext4_get_stripe_size(sbi); sbi->s_extent_max_zeroout_kb = 32; /* * set up enough so that it can read an inode */ sb->s_op = &ext4_sops; sb->s_export_op = &ext4_export_ops; sb->s_xattr = ext4_xattr_handlers; #ifdef CONFIG_QUOTA sb->dq_op = &ext4_quota_operations; if (ext4_has_feature_quota(sb)) sb->s_qcop = &dquot_quotactl_sysfile_ops; else sb->s_qcop = &ext4_qctl_operations; sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; #endif memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid)); INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ mutex_init(&sbi->s_orphan_lock); sb->s_root = NULL; needs_recovery = (es->s_last_orphan != 0 || ext4_has_feature_journal_needs_recovery(sb)); if (ext4_has_feature_mmp(sb) && !(sb->s_flags & MS_RDONLY)) if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) goto failed_mount3a; /* * The first inode we look at is the journal inode. Don't try * root first: it may be modified in the journal! */ if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) { if (ext4_load_journal(sb, es, journal_devnum)) goto failed_mount3a; } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) && ext4_has_feature_journal_needs_recovery(sb)) { ext4_msg(sb, KERN_ERR, "required journal recovery " "suppressed and not mounted read-only"); goto failed_mount_wq; } else { /* Nojournal mode, all journal mount options are illegal */ if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) { ext4_msg(sb, KERN_ERR, "can't mount with " "journal_checksum, fs mounted w/o journal"); goto failed_mount_wq; } if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { ext4_msg(sb, KERN_ERR, "can't mount with " "journal_async_commit, fs mounted w/o journal"); goto failed_mount_wq; } if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) { ext4_msg(sb, KERN_ERR, "can't mount with " "commit=%lu, fs mounted w/o journal", sbi->s_commit_interval / HZ); goto failed_mount_wq; } if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) { ext4_msg(sb, KERN_ERR, "can't mount with " "data=, fs mounted w/o journal"); goto failed_mount_wq; } sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM; clear_opt(sb, JOURNAL_CHECKSUM); clear_opt(sb, DATA_FLAGS); sbi->s_journal = NULL; needs_recovery = 0; goto no_journal; } if (ext4_has_feature_64bit(sb) && !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_64BIT)) { ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); goto failed_mount_wq; } if (!set_journal_csum_feature_set(sb)) { ext4_msg(sb, KERN_ERR, "Failed to set journal checksum " "feature set"); goto failed_mount_wq; } /* We have now updated the journal if required, so we can * validate the data journaling mode. */ switch (test_opt(sb, DATA_FLAGS)) { case 0: /* No mode set, assume a default based on the journal * capabilities: ORDERED_DATA if the journal can * cope, else JOURNAL_DATA */ if (jbd2_journal_check_available_features (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) set_opt(sb, ORDERED_DATA); else set_opt(sb, JOURNAL_DATA); break; case EXT4_MOUNT_ORDERED_DATA: case EXT4_MOUNT_WRITEBACK_DATA: if (!jbd2_journal_check_available_features (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { ext4_msg(sb, KERN_ERR, "Journal does not support " "requested data journaling mode"); goto failed_mount_wq; } default: break; } set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); sbi->s_journal->j_commit_callback = ext4_journal_commit_callback; no_journal: if (ext4_mballoc_ready) { sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id); if (!sbi->s_mb_cache) { ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache"); goto failed_mount_wq; } } if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) && (blocksize != PAGE_CACHE_SIZE)) { ext4_msg(sb, KERN_ERR, "Unsupported blocksize for fs encryption"); goto failed_mount_wq; } if (DUMMY_ENCRYPTION_ENABLED(sbi) && !(sb->s_flags & MS_RDONLY) && !ext4_has_feature_encrypt(sb)) { ext4_set_feature_encrypt(sb); ext4_commit_super(sb, 1); } /* * Get the # of file system overhead blocks from the * superblock if present. */ if (es->s_overhead_clusters) sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters); else { err = ext4_calculate_overhead(sb); if (err) goto failed_mount_wq; } /* * The maximum number of concurrent works can be high and * concurrency isn't really necessary. Limit it to 1. */ EXT4_SB(sb)->rsv_conversion_wq = alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); if (!EXT4_SB(sb)->rsv_conversion_wq) { printk(KERN_ERR "EXT4-fs: failed to create workqueue\n"); ret = -ENOMEM; goto failed_mount4; } /* * The jbd2_journal_load will have done any necessary log recovery, * so we can safely mount the rest of the filesystem now. */ root = ext4_iget(sb, EXT4_ROOT_INO); if (IS_ERR(root)) { ext4_msg(sb, KERN_ERR, "get root inode failed"); ret = PTR_ERR(root); root = NULL; goto failed_mount4; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck"); iput(root); goto failed_mount4; } sb->s_root = d_make_root(root); if (!sb->s_root) { ext4_msg(sb, KERN_ERR, "get root dentry failed"); ret = -ENOMEM; goto failed_mount4; } if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY)) sb->s_flags |= MS_RDONLY; /* determine the minimum size of new large inodes, if present */ if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) { sbi->s_want_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; if (ext4_has_feature_extra_isize(sb)) { if (sbi->s_want_extra_isize < le16_to_cpu(es->s_want_extra_isize)) sbi->s_want_extra_isize = le16_to_cpu(es->s_want_extra_isize); if (sbi->s_want_extra_isize < le16_to_cpu(es->s_min_extra_isize)) sbi->s_want_extra_isize = le16_to_cpu(es->s_min_extra_isize); } } /* Check if enough inode space is available */ if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize > sbi->s_inode_size) { sbi->s_want_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; ext4_msg(sb, KERN_INFO, "required extra inode space not" "available"); } ext4_set_resv_clusters(sb); err = ext4_setup_system_zone(sb); if (err) { ext4_msg(sb, KERN_ERR, "failed to initialize system " "zone (%d)", err); goto failed_mount4a; } ext4_ext_init(sb); err = ext4_mb_init(sb); if (err) { ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)", err); goto failed_mount5; } block = ext4_count_free_clusters(sb); ext4_free_blocks_count_set(sbi->s_es, EXT4_C2B(sbi, block)); err = percpu_counter_init(&sbi->s_freeclusters_counter, block, GFP_KERNEL); if (!err) { unsigned long freei = ext4_count_free_inodes(sb); sbi->s_es->s_free_inodes_count = cpu_to_le32(freei); err = percpu_counter_init(&sbi->s_freeinodes_counter, freei, GFP_KERNEL); } if (!err) err = percpu_counter_init(&sbi->s_dirs_counter, ext4_count_dirs(sb), GFP_KERNEL); if (!err) err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0, GFP_KERNEL); if (err) { ext4_msg(sb, KERN_ERR, "insufficient memory"); goto failed_mount6; } if (ext4_has_feature_flex_bg(sb)) if (!ext4_fill_flex_info(sb)) { ext4_msg(sb, KERN_ERR, "unable to initialize " "flex_bg meta info!"); goto failed_mount6; } err = ext4_register_li_request(sb, first_not_zeroed); if (err) goto failed_mount6; err = ext4_register_sysfs(sb); if (err) goto failed_mount7; #ifdef CONFIG_QUOTA /* Enable quota usage during mount. */ if (ext4_has_feature_quota(sb) && !(sb->s_flags & MS_RDONLY)) { err = ext4_enable_quotas(sb); if (err) goto failed_mount8; } #endif /* CONFIG_QUOTA */ EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; ext4_orphan_cleanup(sb, es); EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; if (needs_recovery) { ext4_msg(sb, KERN_INFO, "recovery complete"); ext4_mark_recovery_complete(sb, es); } if (EXT4_SB(sb)->s_journal) { if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) descr = " journalled data mode"; else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) descr = " ordered data mode"; else descr = " writeback data mode"; } else descr = "out journal"; if (test_opt(sb, DISCARD)) { struct request_queue *q = bdev_get_queue(sb->s_bdev); if (!blk_queue_discard(q)) ext4_msg(sb, KERN_WARNING, "mounting with \"discard\" option, but " "the device does not support discard"); } if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount")) ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. " "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts, *sbi->s_es->s_mount_opts ? "; " : "", orig_data); if (es->s_error_count) mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */ /* Enable message ratelimiting. Default is 10 messages per 5 secs. */ ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10); ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10); ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10); kfree(orig_data); return 0; cantfind_ext4: if (!silent) ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); goto failed_mount; #ifdef CONFIG_QUOTA failed_mount8: ext4_unregister_sysfs(sb); #endif failed_mount7: ext4_unregister_li_request(sb); failed_mount6: ext4_mb_release(sb); if (sbi->s_flex_groups) kvfree(sbi->s_flex_groups); percpu_counter_destroy(&sbi->s_freeclusters_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter); failed_mount5: ext4_ext_release(sb); ext4_release_system_zone(sb); failed_mount4a: dput(sb->s_root); sb->s_root = NULL; failed_mount4: ext4_msg(sb, KERN_ERR, "mount failed"); if (EXT4_SB(sb)->rsv_conversion_wq) destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq); failed_mount_wq: if (sbi->s_journal) { jbd2_journal_destroy(sbi->s_journal); sbi->s_journal = NULL; } failed_mount3a: ext4_es_unregister_shrinker(sbi); failed_mount3: del_timer_sync(&sbi->s_err_report); if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); failed_mount2: for (i = 0; i < db_count; i++) brelse(sbi->s_group_desc[i]); kvfree(sbi->s_group_desc); failed_mount: if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); #ifdef CONFIG_QUOTA for (i = 0; i < EXT4_MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif ext4_blkdev_remove(sbi); brelse(bh); out_fail: sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); out_free_orig: kfree(orig_data); return err ? err : ret; } /* * Setup any per-fs journal parameters now. We'll do this both on * initial mount, once the journal has been initialised but before we've * done any recovery; and again on any subsequent remount. */ static void ext4_init_journal_params(struct super_block *sb, journal_t *journal) { struct ext4_sb_info *sbi = EXT4_SB(sb); journal->j_commit_interval = sbi->s_commit_interval; journal->j_min_batch_time = sbi->s_min_batch_time; journal->j_max_batch_time = sbi->s_max_batch_time; write_lock(&journal->j_state_lock); if (test_opt(sb, BARRIER)) journal->j_flags |= JBD2_BARRIER; else journal->j_flags &= ~JBD2_BARRIER; if (test_opt(sb, DATA_ERR_ABORT)) journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR; else journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR; write_unlock(&journal->j_state_lock); } static journal_t *ext4_get_journal(struct super_block *sb, unsigned int journal_inum) { struct inode *journal_inode; journal_t *journal; BUG_ON(!ext4_has_feature_journal(sb)); /* First, test for the existence of a valid inode on disk. Bad * things happen if we iget() an unused inode, as the subsequent * iput() will try to delete it. */ journal_inode = ext4_iget(sb, journal_inum); if (IS_ERR(journal_inode)) { ext4_msg(sb, KERN_ERR, "no journal found"); return NULL; } if (!journal_inode->i_nlink) { make_bad_inode(journal_inode); iput(journal_inode); ext4_msg(sb, KERN_ERR, "journal inode is deleted"); return NULL; } jbd_debug(2, "Journal inode found at %p: %lld bytes\n", journal_inode, journal_inode->i_size); if (!S_ISREG(journal_inode->i_mode)) { ext4_msg(sb, KERN_ERR, "invalid journal inode"); iput(journal_inode); return NULL; } journal = jbd2_journal_init_inode(journal_inode); if (!journal) { ext4_msg(sb, KERN_ERR, "Could not load journal inode"); iput(journal_inode); return NULL; } journal->j_private = sb; ext4_init_journal_params(sb, journal); return journal; } static journal_t *ext4_get_dev_journal(struct super_block *sb, dev_t j_dev) { struct buffer_head *bh; journal_t *journal; ext4_fsblk_t start; ext4_fsblk_t len; int hblock, blocksize; ext4_fsblk_t sb_block; unsigned long offset; struct ext4_super_block *es; struct block_device *bdev; BUG_ON(!ext4_has_feature_journal(sb)); bdev = ext4_blkdev_get(j_dev, sb); if (bdev == NULL) return NULL; blocksize = sb->s_blocksize; hblock = bdev_logical_block_size(bdev); if (blocksize < hblock) { ext4_msg(sb, KERN_ERR, "blocksize too small for journal device"); goto out_bdev; } sb_block = EXT4_MIN_BLOCK_SIZE / blocksize; offset = EXT4_MIN_BLOCK_SIZE % blocksize; set_blocksize(bdev, blocksize); if (!(bh = __bread(bdev, sb_block, blocksize))) { ext4_msg(sb, KERN_ERR, "couldn't read superblock of " "external journal"); goto out_bdev; } es = (struct ext4_super_block *) (bh->b_data + offset); if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) || !(le32_to_cpu(es->s_feature_incompat) & EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) { ext4_msg(sb, KERN_ERR, "external journal has " "bad superblock"); brelse(bh); goto out_bdev; } if ((le32_to_cpu(es->s_feature_ro_compat) & EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && es->s_checksum != ext4_superblock_csum(sb, es)) { ext4_msg(sb, KERN_ERR, "external journal has " "corrupt superblock"); brelse(bh); goto out_bdev; } if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { ext4_msg(sb, KERN_ERR, "journal UUID does not match"); brelse(bh); goto out_bdev; } len = ext4_blocks_count(es); start = sb_block + 1; brelse(bh); /* we're done with the superblock */ journal = jbd2_journal_init_dev(bdev, sb->s_bdev, start, len, blocksize); if (!journal) { ext4_msg(sb, KERN_ERR, "failed to create device journal"); goto out_bdev; } journal->j_private = sb; ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer); wait_on_buffer(journal->j_sb_buffer); if (!buffer_uptodate(journal->j_sb_buffer)) { ext4_msg(sb, KERN_ERR, "I/O error on journal device"); goto out_journal; } if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) { ext4_msg(sb, KERN_ERR, "External journal has more than one " "user (unsupported) - %d", be32_to_cpu(journal->j_superblock->s_nr_users)); goto out_journal; } EXT4_SB(sb)->journal_bdev = bdev; ext4_init_journal_params(sb, journal); return journal; out_journal: jbd2_journal_destroy(journal); out_bdev: ext4_blkdev_put(bdev); return NULL; } static int ext4_load_journal(struct super_block *sb, struct ext4_super_block *es, unsigned long journal_devnum) { journal_t *journal; unsigned int journal_inum = le32_to_cpu(es->s_journal_inum); dev_t journal_dev; int err = 0; int really_read_only; BUG_ON(!ext4_has_feature_journal(sb)); if (journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { ext4_msg(sb, KERN_INFO, "external journal device major/minor " "numbers have changed"); journal_dev = new_decode_dev(journal_devnum); } else journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); really_read_only = bdev_read_only(sb->s_bdev); /* * Are we loading a blank journal or performing recovery after a * crash? For recovery, we need to check in advance whether we * can get read-write access to the device. */ if (ext4_has_feature_journal_needs_recovery(sb)) { if (sb->s_flags & MS_RDONLY) { ext4_msg(sb, KERN_INFO, "INFO: recovery " "required on readonly filesystem"); if (really_read_only) { ext4_msg(sb, KERN_ERR, "write access " "unavailable, cannot proceed"); return -EROFS; } ext4_msg(sb, KERN_INFO, "write access will " "be enabled during recovery"); } } if (journal_inum && journal_dev) { ext4_msg(sb, KERN_ERR, "filesystem has both journal " "and inode journals!"); return -EINVAL; } if (journal_inum) { if (!(journal = ext4_get_journal(sb, journal_inum))) return -EINVAL; } else { if (!(journal = ext4_get_dev_journal(sb, journal_dev))) return -EINVAL; } if (!(journal->j_flags & JBD2_BARRIER)) ext4_msg(sb, KERN_INFO, "barriers disabled"); if (!ext4_has_feature_journal_needs_recovery(sb)) err = jbd2_journal_wipe(journal, !really_read_only); if (!err) { char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL); if (save) memcpy(save, ((char *) es) + EXT4_S_ERR_START, EXT4_S_ERR_LEN); err = jbd2_journal_load(journal); if (save) memcpy(((char *) es) + EXT4_S_ERR_START, save, EXT4_S_ERR_LEN); kfree(save); } if (err) { ext4_msg(sb, KERN_ERR, "error loading journal"); jbd2_journal_destroy(journal); return err; } EXT4_SB(sb)->s_journal = journal; ext4_clear_journal_err(sb, es); if (!really_read_only && journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { es->s_journal_dev = cpu_to_le32(journal_devnum); /* Make sure we flush the recovery flag to disk. */ ext4_commit_super(sb, 1); } return 0; } static int ext4_commit_super(struct super_block *sb, int sync) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; int error = 0; if (!sbh || block_device_ejected(sb)) return error; if (buffer_write_io_error(sbh)) { /* * Oh, dear. A previous attempt to write the * superblock failed. This could happen because the * USB device was yanked out. Or it could happen to * be a transient write error and maybe the block will * be remapped. Nothing we can do but to retry the * write and hope for the best. */ ext4_msg(sb, KERN_ERR, "previous I/O error to " "superblock detected"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } /* * If the file system is mounted read-only, don't update the * superblock write time. This avoids updating the superblock * write time when we are mounting the root file system * read/only but we need to replay the journal; at that point, * for people who are east of GMT and who make their clock * tick in localtime for Windows bug-for-bug compatibility, * the clock is set in the future, and this will cause e2fsck * to complain and force a full file system check. */ if (!(sb->s_flags & MS_RDONLY)) es->s_wtime = cpu_to_le32(get_seconds()); if (sb->s_bdev->bd_part) es->s_kbytes_written = cpu_to_le64(EXT4_SB(sb)->s_kbytes_written + ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) - EXT4_SB(sb)->s_sectors_written_start) >> 1)); else es->s_kbytes_written = cpu_to_le64(EXT4_SB(sb)->s_kbytes_written); if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter)) ext4_free_blocks_count_set(es, EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive( &EXT4_SB(sb)->s_freeclusters_counter))); if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter)) es->s_free_inodes_count = cpu_to_le32(percpu_counter_sum_positive( &EXT4_SB(sb)->s_freeinodes_counter)); BUFFER_TRACE(sbh, "marking dirty"); ext4_superblock_csum_set(sb); mark_buffer_dirty(sbh); if (sync) { error = __sync_dirty_buffer(sbh, test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC); if (error) return error; error = buffer_write_io_error(sbh); if (error) { ext4_msg(sb, KERN_ERR, "I/O error while writing " "superblock"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } } return error; } /* * Have we just finished recovery? If so, and if we are mounting (or * remounting) the filesystem readonly, then we will end up with a * consistent fs on disk. Record that fact. */ static void ext4_mark_recovery_complete(struct super_block *sb, struct ext4_super_block *es) { journal_t *journal = EXT4_SB(sb)->s_journal; if (!ext4_has_feature_journal(sb)) { BUG_ON(journal != NULL); return; } jbd2_journal_lock_updates(journal); if (jbd2_journal_flush(journal) < 0) goto out; if (ext4_has_feature_journal_needs_recovery(sb) && sb->s_flags & MS_RDONLY) { ext4_clear_feature_journal_needs_recovery(sb); ext4_commit_super(sb, 1); } out: jbd2_journal_unlock_updates(journal); } /* * If we are mounting (or read-write remounting) a filesystem whose journal * has recorded an error from a previous lifetime, move that error to the * main filesystem now. */ static void ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es) { journal_t *journal; int j_errno; const char *errstr; BUG_ON(!ext4_has_feature_journal(sb)); journal = EXT4_SB(sb)->s_journal; /* * Now check for any error status which may have been recorded in the * journal by a prior ext4_error() or ext4_abort() */ j_errno = jbd2_journal_errno(journal); if (j_errno) { char nbuf[16]; errstr = ext4_decode_error(sb, j_errno, nbuf); ext4_warning(sb, "Filesystem error recorded " "from previous mount: %s", errstr); ext4_warning(sb, "Marking fs in need of filesystem check."); EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; es->s_state |= cpu_to_le16(EXT4_ERROR_FS); ext4_commit_super(sb, 1); jbd2_journal_clear_err(journal); jbd2_journal_update_sb_errno(journal); } } /* * Force the running and committing transactions to commit, * and wait on the commit. */ int ext4_force_commit(struct super_block *sb) { journal_t *journal; if (sb->s_flags & MS_RDONLY) return 0; journal = EXT4_SB(sb)->s_journal; return ext4_journal_force_commit(journal); } static int ext4_sync_fs(struct super_block *sb, int wait) { int ret = 0; tid_t target; bool needs_barrier = false; struct ext4_sb_info *sbi = EXT4_SB(sb); trace_ext4_sync_fs(sb, wait); flush_workqueue(sbi->rsv_conversion_wq); /* * Writeback quota in non-journalled quota case - journalled quota has * no dirty dquots */ dquot_writeback_dquots(sb, -1); /* * Data writeback is possible w/o journal transaction, so barrier must * being sent at the end of the function. But we can skip it if * transaction_commit will do it for us. */ if (sbi->s_journal) { target = jbd2_get_latest_transaction(sbi->s_journal); if (wait && sbi->s_journal->j_flags & JBD2_BARRIER && !jbd2_trans_will_send_data_barrier(sbi->s_journal, target)) needs_barrier = true; if (jbd2_journal_start_commit(sbi->s_journal, &target)) { if (wait) ret = jbd2_log_wait_commit(sbi->s_journal, target); } } else if (wait && test_opt(sb, BARRIER)) needs_barrier = true; if (needs_barrier) { int err; err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL); if (!ret) ret = err; } return ret; } /* * LVM calls this function before a (read-only) snapshot is created. This * gives us a chance to flush the journal completely and mark the fs clean. * * Note that only this function cannot bring a filesystem to be in a clean * state independently. It relies on upper layer to stop all data & metadata * modifications. */ static int ext4_freeze(struct super_block *sb) { int error = 0; journal_t *journal; if (sb->s_flags & MS_RDONLY) return 0; journal = EXT4_SB(sb)->s_journal; if (journal) { /* Now we set up the journal barrier. */ jbd2_journal_lock_updates(journal); /* * Don't clear the needs_recovery flag if we failed to * flush the journal. */ error = jbd2_journal_flush(journal); if (error < 0) goto out; /* Journal blocked and flushed, clear needs_recovery flag. */ ext4_clear_feature_journal_needs_recovery(sb); } error = ext4_commit_super(sb, 1); out: if (journal) /* we rely on upper layer to stop further updates */ jbd2_journal_unlock_updates(journal); return error; } /* * Called by LVM after the snapshot is done. We need to reset the RECOVER * flag here, even though the filesystem is not technically dirty yet. */ static int ext4_unfreeze(struct super_block *sb) { if (sb->s_flags & MS_RDONLY) return 0; if (EXT4_SB(sb)->s_journal) { /* Reset the needs_recovery flag before the fs is unlocked. */ ext4_set_feature_journal_needs_recovery(sb); } ext4_commit_super(sb, 1); return 0; } /* * Structure to save mount options for ext4_remount's benefit */ struct ext4_mount_options { unsigned long s_mount_opt; unsigned long s_mount_opt2; kuid_t s_resuid; kgid_t s_resgid; unsigned long s_commit_interval; u32 s_min_batch_time, s_max_batch_time; #ifdef CONFIG_QUOTA int s_jquota_fmt; char *s_qf_names[EXT4_MAXQUOTAS]; #endif }; static int ext4_remount(struct super_block *sb, int *flags, char *data) { struct ext4_super_block *es; struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned long old_sb_flags; struct ext4_mount_options old_opts; int enable_quota = 0; ext4_group_t g; unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; int err = 0; #ifdef CONFIG_QUOTA int i, j; #endif char *orig_data = kstrdup(data, GFP_KERNEL); /* Store the original options */ old_sb_flags = sb->s_flags; old_opts.s_mount_opt = sbi->s_mount_opt; old_opts.s_mount_opt2 = sbi->s_mount_opt2; old_opts.s_resuid = sbi->s_resuid; old_opts.s_resgid = sbi->s_resgid; old_opts.s_commit_interval = sbi->s_commit_interval; old_opts.s_min_batch_time = sbi->s_min_batch_time; old_opts.s_max_batch_time = sbi->s_max_batch_time; #ifdef CONFIG_QUOTA old_opts.s_jquota_fmt = sbi->s_jquota_fmt; for (i = 0; i < EXT4_MAXQUOTAS; i++) if (sbi->s_qf_names[i]) { old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i], GFP_KERNEL); if (!old_opts.s_qf_names[i]) { for (j = 0; j < i; j++) kfree(old_opts.s_qf_names[j]); kfree(orig_data); return -ENOMEM; } } else old_opts.s_qf_names[i] = NULL; #endif if (sbi->s_journal && sbi->s_journal->j_task->io_context) journal_ioprio = sbi->s_journal->j_task->io_context->ioprio; if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) { err = -EINVAL; goto restore_opts; } if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ test_opt(sb, JOURNAL_CHECKSUM)) { ext4_msg(sb, KERN_ERR, "changing journal_checksum " "during remount not supported; ignoring"); sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM; } if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { if (test_opt2(sb, EXPLICIT_DELALLOC)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and delalloc"); err = -EINVAL; goto restore_opts; } if (test_opt(sb, DIOREAD_NOLOCK)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and dioread_nolock"); err = -EINVAL; goto restore_opts; } if (test_opt(sb, DAX)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and dax"); err = -EINVAL; goto restore_opts; } } if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) { ext4_msg(sb, KERN_WARNING, "warning: refusing change of " "dax flag with busy inodes while remounting"); sbi->s_mount_opt ^= EXT4_MOUNT_DAX; } if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) ext4_abort(sb, "Abort forced by user"); sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); es = sbi->s_es; if (sbi->s_journal) { ext4_init_journal_params(sb, sbi->s_journal); set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); } if (*flags & MS_LAZYTIME) sb->s_flags |= MS_LAZYTIME; if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) { if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) { err = -EROFS; goto restore_opts; } if (*flags & MS_RDONLY) { err = sync_filesystem(sb); if (err < 0) goto restore_opts; err = dquot_suspend(sb, -1); if (err < 0) goto restore_opts; /* * First of all, the unconditional stuff we have to do * to disable replay of the journal when we next remount */ sb->s_flags |= MS_RDONLY; /* * OK, test if we are remounting a valid rw partition * readonly, and if so set the rdonly flag and then * mark the partition as valid again. */ if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) && (sbi->s_mount_state & EXT4_VALID_FS)) es->s_state = cpu_to_le16(sbi->s_mount_state); if (sbi->s_journal) ext4_mark_recovery_complete(sb, es); } else { /* Make sure we can mount this feature set readwrite */ if (ext4_has_feature_readonly(sb) || !ext4_feature_set_ok(sb, 0)) { err = -EROFS; goto restore_opts; } /* * Make sure the group descriptor checksums * are sane. If they aren't, refuse to remount r/w. */ for (g = 0; g < sbi->s_groups_count; g++) { struct ext4_group_desc *gdp = ext4_get_group_desc(sb, g, NULL); if (!ext4_group_desc_csum_verify(sb, g, gdp)) { ext4_msg(sb, KERN_ERR, "ext4_remount: Checksum for group %u failed (%u!=%u)", g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)), le16_to_cpu(gdp->bg_checksum)); err = -EFSBADCRC; goto restore_opts; } } /* * If we have an unprocessed orphan list hanging * around from a previously readonly bdev mount, * require a full umount/remount for now. */ if (es->s_last_orphan) { ext4_msg(sb, KERN_WARNING, "Couldn't " "remount RDWR because of unprocessed " "orphan inode list. Please " "umount/remount instead"); err = -EINVAL; goto restore_opts; } /* * Mounting a RDONLY partition read-write, so reread * and store the current valid flag. (It may have * been changed by e2fsck since we originally mounted * the partition.) */ if (sbi->s_journal) ext4_clear_journal_err(sb, es); sbi->s_mount_state = le16_to_cpu(es->s_state); if (!ext4_setup_super(sb, es, 0)) sb->s_flags &= ~MS_RDONLY; if (ext4_has_feature_mmp(sb)) if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) { err = -EROFS; goto restore_opts; } enable_quota = 1; } } /* * Reinitialize lazy itable initialization thread based on * current settings */ if ((sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE)) ext4_unregister_li_request(sb); else { ext4_group_t first_not_zeroed; first_not_zeroed = ext4_has_uninit_itable(sb); ext4_register_li_request(sb, first_not_zeroed); } ext4_setup_system_zone(sb); if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY)) ext4_commit_super(sb, 1); #ifdef CONFIG_QUOTA /* Release old quota file names */ for (i = 0; i < EXT4_MAXQUOTAS; i++) kfree(old_opts.s_qf_names[i]); if (enable_quota) { if (sb_any_quota_suspended(sb)) dquot_resume(sb, -1); else if (ext4_has_feature_quota(sb)) { err = ext4_enable_quotas(sb); if (err) goto restore_opts; } } #endif *flags = (*flags & ~MS_LAZYTIME) | (sb->s_flags & MS_LAZYTIME); ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data); kfree(orig_data); return 0; restore_opts: sb->s_flags = old_sb_flags; sbi->s_mount_opt = old_opts.s_mount_opt; sbi->s_mount_opt2 = old_opts.s_mount_opt2; sbi->s_resuid = old_opts.s_resuid; sbi->s_resgid = old_opts.s_resgid; sbi->s_commit_interval = old_opts.s_commit_interval; sbi->s_min_batch_time = old_opts.s_min_batch_time; sbi->s_max_batch_time = old_opts.s_max_batch_time; #ifdef CONFIG_QUOTA sbi->s_jquota_fmt = old_opts.s_jquota_fmt; for (i = 0; i < EXT4_MAXQUOTAS; i++) { kfree(sbi->s_qf_names[i]); sbi->s_qf_names[i] = old_opts.s_qf_names[i]; } #endif kfree(orig_data); return err; } static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; ext4_fsblk_t overhead = 0, resv_blocks; u64 fsid; s64 bfree; resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters)); if (!test_opt(sb, MINIX_DF)) overhead = sbi->s_overhead; buf->f_type = EXT4_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead); bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) - percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter); /* prevent underflow in case that few free space is available */ buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0)); buf->f_bavail = buf->f_bfree - (ext4_r_blocks_count(es) + resv_blocks); if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks)) buf->f_bavail = 0; buf->f_files = le32_to_cpu(es->s_inodes_count); buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); buf->f_namelen = EXT4_NAME_LEN; fsid = le64_to_cpup((void *)es->s_uuid) ^ le64_to_cpup((void *)es->s_uuid + sizeof(u64)); buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; return 0; } /* Helper function for writing quotas on sync - we need to start transaction * before quota file is locked for write. Otherwise the are possible deadlocks: * Process 1 Process 2 * ext4_create() quota_sync() * jbd2_journal_start() write_dquot() * dquot_initialize() down(dqio_mutex) * down(dqio_mutex) jbd2_journal_start() * */ #ifdef CONFIG_QUOTA static inline struct inode *dquot_to_inode(struct dquot *dquot) { return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type]; } static int ext4_write_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; struct inode *inode; inode = dquot_to_inode(dquot); handle = ext4_journal_start(inode, EXT4_HT_QUOTA, EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit(dquot); err = ext4_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext4_acquire_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_acquire(dquot); err = ext4_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext4_release_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) { /* Release dquot anyway to avoid endless cycle in dqput() */ dquot_release(dquot); return PTR_ERR(handle); } ret = dquot_release(dquot); err = ext4_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext4_mark_dquot_dirty(struct dquot *dquot) { struct super_block *sb = dquot->dq_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); /* Are we journaling quotas? */ if (ext4_has_feature_quota(sb) || sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { dquot_mark_dquot_dirty(dquot); return ext4_write_dquot(dquot); } else { return dquot_mark_dquot_dirty(dquot); } } static int ext4_write_info(struct super_block *sb, int type) { int ret, err; handle_t *handle; /* Data block + inode block */ handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit_info(sb, type); err = ext4_journal_stop(handle); if (!ret) ret = err; return ret; } /* * Turn on quotas during mount time - we need to find * the quota file and such... */ static int ext4_quota_on_mount(struct super_block *sb, int type) { return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type], EXT4_SB(sb)->s_jquota_fmt, type); } /* * Standard function to be called on quota_on */ static int ext4_quota_on(struct super_block *sb, int type, int format_id, struct path *path) { int err; if (!test_opt(sb, QUOTA)) return -EINVAL; /* Quotafile not on the same filesystem? */ if (path->dentry->d_sb != sb) return -EXDEV; /* Journaling quota? */ if (EXT4_SB(sb)->s_qf_names[type]) { /* Quotafile not in fs root? */ if (path->dentry->d_parent != sb->s_root) ext4_msg(sb, KERN_WARNING, "Quota file not on filesystem root. " "Journaled quota will not work"); } /* * When we journal data on quota file, we have to flush journal to see * all updates to the file when we bypass pagecache... */ if (EXT4_SB(sb)->s_journal && ext4_should_journal_data(d_inode(path->dentry))) { /* * We don't need to lock updates but journal_flush() could * otherwise be livelocked... */ jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); err = jbd2_journal_flush(EXT4_SB(sb)->s_journal); jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); if (err) return err; } return dquot_quota_on(sb, type, format_id, path); } static int ext4_quota_enable(struct super_block *sb, int type, int format_id, unsigned int flags) { int err; struct inode *qf_inode; unsigned long qf_inums[EXT4_MAXQUOTAS] = { le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum) }; BUG_ON(!ext4_has_feature_quota(sb)); if (!qf_inums[type]) return -EPERM; qf_inode = ext4_iget(sb, qf_inums[type]); if (IS_ERR(qf_inode)) { ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]); return PTR_ERR(qf_inode); } /* Don't account quota for quota files to avoid recursion */ qf_inode->i_flags |= S_NOQUOTA; err = dquot_enable(qf_inode, type, format_id, flags); iput(qf_inode); return err; } /* Enable usage tracking for all quota types. */ static int ext4_enable_quotas(struct super_block *sb) { int type, err = 0; unsigned long qf_inums[EXT4_MAXQUOTAS] = { le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum) }; sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE; for (type = 0; type < EXT4_MAXQUOTAS; type++) { if (qf_inums[type]) { err = ext4_quota_enable(sb, type, QFMT_VFS_V1, DQUOT_USAGE_ENABLED); if (err) { ext4_warning(sb, "Failed to enable quota tracking " "(type=%d, err=%d). Please run " "e2fsck to fix.", type, err); return err; } } } return 0; } static int ext4_quota_off(struct super_block *sb, int type) { struct inode *inode = sb_dqopt(sb)->files[type]; handle_t *handle; /* Force all delayed allocation blocks to be allocated. * Caller already holds s_umount sem */ if (test_opt(sb, DELALLOC)) sync_filesystem(sb); if (!inode) goto out; /* Update modification times of quota files when userspace can * start looking at them */ handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1); if (IS_ERR(handle)) goto out; inode->i_mtime = inode->i_ctime = CURRENT_TIME; ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); out: return dquot_quota_off(sb, type); } /* Read data from quotafile - avoid pagecache and such because we cannot afford * acquiring the locks... As quota files are never truncated and quota code * itself serializes the operations (and no one else should touch the files) * we don't have to be afraid of races */ static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); int offset = off & (sb->s_blocksize - 1); int tocopy; size_t toread; struct buffer_head *bh; loff_t i_size = i_size_read(inode); if (off > i_size) return 0; if (off+len > i_size) len = i_size-off; toread = len; while (toread > 0) { tocopy = sb->s_blocksize - offset < toread ? sb->s_blocksize - offset : toread; bh = ext4_bread(NULL, inode, blk, 0); if (IS_ERR(bh)) return PTR_ERR(bh); if (!bh) /* A hole? */ memset(data, 0, tocopy); else memcpy(data, bh->b_data+offset, tocopy); brelse(bh); offset = 0; toread -= tocopy; data += tocopy; blk++; } return len; } /* Write to quotafile (we know the transaction is already started and has * enough credits) */ static ssize_t ext4_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); int err, offset = off & (sb->s_blocksize - 1); int retries = 0; struct buffer_head *bh; handle_t *handle = journal_current_handle(); if (EXT4_SB(sb)->s_journal && !handle) { ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" " cancelled because transaction is not started", (unsigned long long)off, (unsigned long long)len); return -EIO; } /* * Since we account only one data block in transaction credits, * then it is impossible to cross a block boundary. */ if (sb->s_blocksize - offset < len) { ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" " cancelled because not block aligned", (unsigned long long)off, (unsigned long long)len); return -EIO; } do { bh = ext4_bread(handle, inode, blk, EXT4_GET_BLOCKS_CREATE | EXT4_GET_BLOCKS_METADATA_NOFAIL); } while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) && ext4_should_retry_alloc(inode->i_sb, &retries)); if (IS_ERR(bh)) return PTR_ERR(bh); if (!bh) goto out; BUFFER_TRACE(bh, "get write access"); err = ext4_journal_get_write_access(handle, bh); if (err) { brelse(bh); return err; } lock_buffer(bh); memcpy(bh->b_data+offset, data, len); flush_dcache_page(bh->b_page); unlock_buffer(bh); err = ext4_handle_dirty_metadata(handle, NULL, bh); brelse(bh); out: if (inode->i_size < off + len) { i_size_write(inode, off + len); EXT4_I(inode)->i_disksize = inode->i_size; ext4_mark_inode_dirty(handle, inode); } return len; } #endif static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super); } #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2) static inline void register_as_ext2(void) { int err = register_filesystem(&ext2_fs_type); if (err) printk(KERN_WARNING "EXT4-fs: Unable to register as ext2 (%d)\n", err); } static inline void unregister_as_ext2(void) { unregister_filesystem(&ext2_fs_type); } static inline int ext2_feature_set_ok(struct super_block *sb) { if (ext4_has_unknown_ext2_incompat_features(sb)) return 0; if (sb->s_flags & MS_RDONLY) return 1; if (ext4_has_unknown_ext2_ro_compat_features(sb)) return 0; return 1; } #else static inline void register_as_ext2(void) { } static inline void unregister_as_ext2(void) { } static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; } #endif static inline void register_as_ext3(void) { int err = register_filesystem(&ext3_fs_type); if (err) printk(KERN_WARNING "EXT4-fs: Unable to register as ext3 (%d)\n", err); } static inline void unregister_as_ext3(void) { unregister_filesystem(&ext3_fs_type); } static inline int ext3_feature_set_ok(struct super_block *sb) { if (ext4_has_unknown_ext3_incompat_features(sb)) return 0; if (!ext4_has_feature_journal(sb)) return 0; if (sb->s_flags & MS_RDONLY) return 1; if (ext4_has_unknown_ext3_ro_compat_features(sb)) return 0; return 1; } static struct file_system_type ext4_fs_type = { .owner = THIS_MODULE, .name = "ext4", .mount = ext4_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("ext4"); /* Shared across all ext4 file systems */ wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ]; static int __init ext4_init_fs(void) { int i, err; ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64); ext4_li_info = NULL; mutex_init(&ext4_li_mtx); /* Build-time check for flags consistency */ ext4_check_flag_values(); for (i = 0; i < EXT4_WQ_HASH_SZ; i++) { mutex_init(&ext4__aio_mutex[i]); init_waitqueue_head(&ext4__ioend_wq[i]); } err = ext4_init_es(); if (err) return err; err = ext4_init_pageio(); if (err) goto out5; err = ext4_init_system_zone(); if (err) goto out4; err = ext4_init_sysfs(); if (err) goto out3; err = ext4_init_mballoc(); if (err) goto out2; else ext4_mballoc_ready = 1; err = init_inodecache(); if (err) goto out1; register_as_ext3(); register_as_ext2(); err = register_filesystem(&ext4_fs_type); if (err) goto out; return 0; out: unregister_as_ext2(); unregister_as_ext3(); destroy_inodecache(); out1: ext4_mballoc_ready = 0; ext4_exit_mballoc(); out2: ext4_exit_sysfs(); out3: ext4_exit_system_zone(); out4: ext4_exit_pageio(); out5: ext4_exit_es(); return err; } static void __exit ext4_exit_fs(void) { ext4_exit_crypto(); ext4_destroy_lazyinit_thread(); unregister_as_ext2(); unregister_as_ext3(); unregister_filesystem(&ext4_fs_type); destroy_inodecache(); ext4_exit_mballoc(); ext4_exit_sysfs(); ext4_exit_system_zone(); ext4_exit_pageio(); ext4_exit_es(); } MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); MODULE_DESCRIPTION("Fourth Extended Filesystem"); MODULE_LICENSE("GPL"); module_init(ext4_init_fs) module_exit(ext4_exit_fs)
./CrossVul/dataset_final_sorted/CWE-362/c/good_1819_4
crossvul-cpp_data_bad_3159_0
/* * Performance events core code: * * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> * * For licensing details see kernel-base/COPYING */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/cpu.h> #include <linux/smp.h> #include <linux/idr.h> #include <linux/file.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/hash.h> #include <linux/tick.h> #include <linux/sysfs.h> #include <linux/dcache.h> #include <linux/percpu.h> #include <linux/ptrace.h> #include <linux/reboot.h> #include <linux/vmstat.h> #include <linux/device.h> #include <linux/export.h> #include <linux/vmalloc.h> #include <linux/hardirq.h> #include <linux/rculist.h> #include <linux/uaccess.h> #include <linux/syscalls.h> #include <linux/anon_inodes.h> #include <linux/kernel_stat.h> #include <linux/cgroup.h> #include <linux/perf_event.h> #include <linux/trace_events.h> #include <linux/hw_breakpoint.h> #include <linux/mm_types.h> #include <linux/module.h> #include <linux/mman.h> #include <linux/compat.h> #include <linux/bpf.h> #include <linux/filter.h> #include <linux/namei.h> #include <linux/parser.h> #include "internal.h" #include <asm/irq_regs.h> typedef int (*remote_function_f)(void *); struct remote_function_call { struct task_struct *p; remote_function_f func; void *info; int ret; }; static void remote_function(void *data) { struct remote_function_call *tfc = data; struct task_struct *p = tfc->p; if (p) { /* -EAGAIN */ if (task_cpu(p) != smp_processor_id()) return; /* * Now that we're on right CPU with IRQs disabled, we can test * if we hit the right task without races. */ tfc->ret = -ESRCH; /* No such (running) process */ if (p != current) return; } tfc->ret = tfc->func(tfc->info); } /** * task_function_call - call a function on the cpu on which a task runs * @p: the task to evaluate * @func: the function to be called * @info: the function call argument * * Calls the function @func when the task is currently running. This might * be on the current CPU, which just calls the function directly * * returns: @func return value, or * -ESRCH - when the process isn't running * -EAGAIN - when the process moved away */ static int task_function_call(struct task_struct *p, remote_function_f func, void *info) { struct remote_function_call data = { .p = p, .func = func, .info = info, .ret = -EAGAIN, }; int ret; do { ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1); if (!ret) ret = data.ret; } while (ret == -EAGAIN); return ret; } /** * cpu_function_call - call a function on the cpu * @func: the function to be called * @info: the function call argument * * Calls the function @func on the remote cpu. * * returns: @func return value or -ENXIO when the cpu is offline */ static int cpu_function_call(int cpu, remote_function_f func, void *info) { struct remote_function_call data = { .p = NULL, .func = func, .info = info, .ret = -ENXIO, /* No such CPU */ }; smp_call_function_single(cpu, remote_function, &data, 1); return data.ret; } static inline struct perf_cpu_context * __get_cpu_context(struct perf_event_context *ctx) { return this_cpu_ptr(ctx->pmu->pmu_cpu_context); } static void perf_ctx_lock(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { raw_spin_lock(&cpuctx->ctx.lock); if (ctx) raw_spin_lock(&ctx->lock); } static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { if (ctx) raw_spin_unlock(&ctx->lock); raw_spin_unlock(&cpuctx->ctx.lock); } #define TASK_TOMBSTONE ((void *)-1L) static bool is_kernel_event(struct perf_event *event) { return READ_ONCE(event->owner) == TASK_TOMBSTONE; } /* * On task ctx scheduling... * * When !ctx->nr_events a task context will not be scheduled. This means * we can disable the scheduler hooks (for performance) without leaving * pending task ctx state. * * This however results in two special cases: * * - removing the last event from a task ctx; this is relatively straight * forward and is done in __perf_remove_from_context. * * - adding the first event to a task ctx; this is tricky because we cannot * rely on ctx->is_active and therefore cannot use event_function_call(). * See perf_install_in_context(). * * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set. */ typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *, struct perf_event_context *, void *); struct event_function_struct { struct perf_event *event; event_f func; void *data; }; static int event_function(void *info) { struct event_function_struct *efs = info; struct perf_event *event = efs->event; struct perf_event_context *ctx = event->ctx; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); struct perf_event_context *task_ctx = cpuctx->task_ctx; int ret = 0; WARN_ON_ONCE(!irqs_disabled()); perf_ctx_lock(cpuctx, task_ctx); /* * Since we do the IPI call without holding ctx->lock things can have * changed, double check we hit the task we set out to hit. */ if (ctx->task) { if (ctx->task != current) { ret = -ESRCH; goto unlock; } /* * We only use event_function_call() on established contexts, * and event_function() is only ever called when active (or * rather, we'll have bailed in task_function_call() or the * above ctx->task != current test), therefore we must have * ctx->is_active here. */ WARN_ON_ONCE(!ctx->is_active); /* * And since we have ctx->is_active, cpuctx->task_ctx must * match. */ WARN_ON_ONCE(task_ctx != ctx); } else { WARN_ON_ONCE(&cpuctx->ctx != ctx); } efs->func(event, cpuctx, ctx, efs->data); unlock: perf_ctx_unlock(cpuctx, task_ctx); return ret; } static void event_function_call(struct perf_event *event, event_f func, void *data) { struct perf_event_context *ctx = event->ctx; struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */ struct event_function_struct efs = { .event = event, .func = func, .data = data, }; if (!event->parent) { /* * If this is a !child event, we must hold ctx::mutex to * stabilize the the event->ctx relation. See * perf_event_ctx_lock(). */ lockdep_assert_held(&ctx->mutex); } if (!task) { cpu_function_call(event->cpu, event_function, &efs); return; } if (task == TASK_TOMBSTONE) return; again: if (!task_function_call(task, event_function, &efs)) return; raw_spin_lock_irq(&ctx->lock); /* * Reload the task pointer, it might have been changed by * a concurrent perf_event_context_sched_out(). */ task = ctx->task; if (task == TASK_TOMBSTONE) { raw_spin_unlock_irq(&ctx->lock); return; } if (ctx->is_active) { raw_spin_unlock_irq(&ctx->lock); goto again; } func(event, NULL, ctx, data); raw_spin_unlock_irq(&ctx->lock); } /* * Similar to event_function_call() + event_function(), but hard assumes IRQs * are already disabled and we're on the right CPU. */ static void event_function_local(struct perf_event *event, event_f func, void *data) { struct perf_event_context *ctx = event->ctx; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); struct task_struct *task = READ_ONCE(ctx->task); struct perf_event_context *task_ctx = NULL; WARN_ON_ONCE(!irqs_disabled()); if (task) { if (task == TASK_TOMBSTONE) return; task_ctx = ctx; } perf_ctx_lock(cpuctx, task_ctx); task = ctx->task; if (task == TASK_TOMBSTONE) goto unlock; if (task) { /* * We must be either inactive or active and the right task, * otherwise we're screwed, since we cannot IPI to somewhere * else. */ if (ctx->is_active) { if (WARN_ON_ONCE(task != current)) goto unlock; if (WARN_ON_ONCE(cpuctx->task_ctx != ctx)) goto unlock; } } else { WARN_ON_ONCE(&cpuctx->ctx != ctx); } func(event, cpuctx, ctx, data); unlock: perf_ctx_unlock(cpuctx, task_ctx); } #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ PERF_FLAG_FD_OUTPUT |\ PERF_FLAG_PID_CGROUP |\ PERF_FLAG_FD_CLOEXEC) /* * branch priv levels that need permission checks */ #define PERF_SAMPLE_BRANCH_PERM_PLM \ (PERF_SAMPLE_BRANCH_KERNEL |\ PERF_SAMPLE_BRANCH_HV) enum event_type_t { EVENT_FLEXIBLE = 0x1, EVENT_PINNED = 0x2, EVENT_TIME = 0x4, EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, }; /* * perf_sched_events : >0 events exist * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu */ static void perf_sched_delayed(struct work_struct *work); DEFINE_STATIC_KEY_FALSE(perf_sched_events); static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed); static DEFINE_MUTEX(perf_sched_mutex); static atomic_t perf_sched_count; static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); static DEFINE_PER_CPU(int, perf_sched_cb_usages); static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events); static atomic_t nr_mmap_events __read_mostly; static atomic_t nr_comm_events __read_mostly; static atomic_t nr_task_events __read_mostly; static atomic_t nr_freq_events __read_mostly; static atomic_t nr_switch_events __read_mostly; static LIST_HEAD(pmus); static DEFINE_MUTEX(pmus_lock); static struct srcu_struct pmus_srcu; /* * perf event paranoia level: * -1 - not paranoid at all * 0 - disallow raw tracepoint access for unpriv * 1 - disallow cpu events for unpriv * 2 - disallow kernel profiling for unpriv */ int sysctl_perf_event_paranoid __read_mostly = 2; /* Minimum for 512 kiB + 1 user control page */ int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ /* * max perf event sample rate */ #define DEFAULT_MAX_SAMPLE_RATE 100000 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE) #define DEFAULT_CPU_TIME_MAX_PERCENT 25 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS; static int perf_sample_allowed_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; static void update_perf_cpu_limits(void) { u64 tmp = perf_sample_period_ns; tmp *= sysctl_perf_cpu_time_max_percent; tmp = div_u64(tmp, 100); if (!tmp) tmp = 1; WRITE_ONCE(perf_sample_allowed_ns, tmp); } static int perf_rotate_context(struct perf_cpu_context *cpuctx); int perf_proc_update_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret || !write) return ret; /* * If throttling is disabled don't allow the write: */ if (sysctl_perf_cpu_time_max_percent == 100 || sysctl_perf_cpu_time_max_percent == 0) return -EINVAL; max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; update_perf_cpu_limits(); return 0; } int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret || !write) return ret; if (sysctl_perf_cpu_time_max_percent == 100 || sysctl_perf_cpu_time_max_percent == 0) { printk(KERN_WARNING "perf: Dynamic interrupt throttling disabled, can hang your system!\n"); WRITE_ONCE(perf_sample_allowed_ns, 0); } else { update_perf_cpu_limits(); } return 0; } /* * perf samples are done in some very critical code paths (NMIs). * If they take too much CPU time, the system can lock up and not * get any real work done. This will drop the sample rate when * we detect that events are taking too long. */ #define NR_ACCUMULATED_SAMPLES 128 static DEFINE_PER_CPU(u64, running_sample_length); static u64 __report_avg; static u64 __report_allowed; static void perf_duration_warn(struct irq_work *w) { printk_ratelimited(KERN_INFO "perf: interrupt took too long (%lld > %lld), lowering " "kernel.perf_event_max_sample_rate to %d\n", __report_avg, __report_allowed, sysctl_perf_event_sample_rate); } static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn); void perf_sample_event_took(u64 sample_len_ns) { u64 max_len = READ_ONCE(perf_sample_allowed_ns); u64 running_len; u64 avg_len; u32 max; if (max_len == 0) return; /* Decay the counter by 1 average sample. */ running_len = __this_cpu_read(running_sample_length); running_len -= running_len/NR_ACCUMULATED_SAMPLES; running_len += sample_len_ns; __this_cpu_write(running_sample_length, running_len); /* * Note: this will be biased artifically low until we have * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us * from having to maintain a count. */ avg_len = running_len/NR_ACCUMULATED_SAMPLES; if (avg_len <= max_len) return; __report_avg = avg_len; __report_allowed = max_len; /* * Compute a throttle threshold 25% below the current duration. */ avg_len += avg_len / 4; max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent; if (avg_len < max) max /= (u32)avg_len; else max = 1; WRITE_ONCE(perf_sample_allowed_ns, avg_len); WRITE_ONCE(max_samples_per_tick, max); sysctl_perf_event_sample_rate = max * HZ; perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; if (!irq_work_queue(&perf_duration_work)) { early_printk("perf: interrupt took too long (%lld > %lld), lowering " "kernel.perf_event_max_sample_rate to %d\n", __report_avg, __report_allowed, sysctl_perf_event_sample_rate); } } static atomic64_t perf_event_id; static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, enum event_type_t event_type); static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, enum event_type_t event_type, struct task_struct *task); static void update_context_time(struct perf_event_context *ctx); static u64 perf_event_time(struct perf_event *event); void __weak perf_event_print_debug(void) { } extern __weak const char *perf_pmu_name(void) { return "pmu"; } static inline u64 perf_clock(void) { return local_clock(); } static inline u64 perf_event_clock(struct perf_event *event) { return event->clock(); } #ifdef CONFIG_CGROUP_PERF static inline bool perf_cgroup_match(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); /* @event doesn't care about cgroup */ if (!event->cgrp) return true; /* wants specific cgroup scope but @cpuctx isn't associated with any */ if (!cpuctx->cgrp) return false; /* * Cgroup scoping is recursive. An event enabled for a cgroup is * also enabled for all its descendant cgroups. If @cpuctx's * cgroup is a descendant of @event's (the test covers identity * case), it's a match. */ return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, event->cgrp->css.cgroup); } static inline void perf_detach_cgroup(struct perf_event *event) { css_put(&event->cgrp->css); event->cgrp = NULL; } static inline int is_cgroup_event(struct perf_event *event) { return event->cgrp != NULL; } static inline u64 perf_cgroup_event_time(struct perf_event *event) { struct perf_cgroup_info *t; t = per_cpu_ptr(event->cgrp->info, event->cpu); return t->time; } static inline void __update_cgrp_time(struct perf_cgroup *cgrp) { struct perf_cgroup_info *info; u64 now; now = perf_clock(); info = this_cpu_ptr(cgrp->info); info->time += now - info->timestamp; info->timestamp = now; } static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) { struct perf_cgroup *cgrp_out = cpuctx->cgrp; if (cgrp_out) __update_cgrp_time(cgrp_out); } static inline void update_cgrp_time_from_event(struct perf_event *event) { struct perf_cgroup *cgrp; /* * ensure we access cgroup data only when needed and * when we know the cgroup is pinned (css_get) */ if (!is_cgroup_event(event)) return; cgrp = perf_cgroup_from_task(current, event->ctx); /* * Do not update time when cgroup is not active */ if (cgrp == event->cgrp) __update_cgrp_time(event->cgrp); } static inline void perf_cgroup_set_timestamp(struct task_struct *task, struct perf_event_context *ctx) { struct perf_cgroup *cgrp; struct perf_cgroup_info *info; /* * ctx->lock held by caller * ensure we do not access cgroup data * unless we have the cgroup pinned (css_get) */ if (!task || !ctx->nr_cgroups) return; cgrp = perf_cgroup_from_task(task, ctx); info = this_cpu_ptr(cgrp->info); info->timestamp = ctx->timestamp; } #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ /* * reschedule events based on the cgroup constraint of task. * * mode SWOUT : schedule out everything * mode SWIN : schedule in based on cgroup for next */ static void perf_cgroup_switch(struct task_struct *task, int mode) { struct perf_cpu_context *cpuctx; struct pmu *pmu; unsigned long flags; /* * disable interrupts to avoid geting nr_cgroup * changes via __perf_event_disable(). Also * avoids preemption. */ local_irq_save(flags); /* * we reschedule only in the presence of cgroup * constrained events. */ list_for_each_entry_rcu(pmu, &pmus, entry) { cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); if (cpuctx->unique_pmu != pmu) continue; /* ensure we process each cpuctx once */ /* * perf_cgroup_events says at least one * context on this CPU has cgroup events. * * ctx->nr_cgroups reports the number of cgroup * events for a context. */ if (cpuctx->ctx.nr_cgroups > 0) { perf_ctx_lock(cpuctx, cpuctx->task_ctx); perf_pmu_disable(cpuctx->ctx.pmu); if (mode & PERF_CGROUP_SWOUT) { cpu_ctx_sched_out(cpuctx, EVENT_ALL); /* * must not be done before ctxswout due * to event_filter_match() in event_sched_out() */ cpuctx->cgrp = NULL; } if (mode & PERF_CGROUP_SWIN) { WARN_ON_ONCE(cpuctx->cgrp); /* * set cgrp before ctxsw in to allow * event_filter_match() to not have to pass * task around * we pass the cpuctx->ctx to perf_cgroup_from_task() * because cgorup events are only per-cpu */ cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx); cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); } perf_pmu_enable(cpuctx->ctx.pmu); perf_ctx_unlock(cpuctx, cpuctx->task_ctx); } } local_irq_restore(flags); } static inline void perf_cgroup_sched_out(struct task_struct *task, struct task_struct *next) { struct perf_cgroup *cgrp1; struct perf_cgroup *cgrp2 = NULL; rcu_read_lock(); /* * we come here when we know perf_cgroup_events > 0 * we do not need to pass the ctx here because we know * we are holding the rcu lock */ cgrp1 = perf_cgroup_from_task(task, NULL); cgrp2 = perf_cgroup_from_task(next, NULL); /* * only schedule out current cgroup events if we know * that we are switching to a different cgroup. Otherwise, * do no touch the cgroup events. */ if (cgrp1 != cgrp2) perf_cgroup_switch(task, PERF_CGROUP_SWOUT); rcu_read_unlock(); } static inline void perf_cgroup_sched_in(struct task_struct *prev, struct task_struct *task) { struct perf_cgroup *cgrp1; struct perf_cgroup *cgrp2 = NULL; rcu_read_lock(); /* * we come here when we know perf_cgroup_events > 0 * we do not need to pass the ctx here because we know * we are holding the rcu lock */ cgrp1 = perf_cgroup_from_task(task, NULL); cgrp2 = perf_cgroup_from_task(prev, NULL); /* * only need to schedule in cgroup events if we are changing * cgroup during ctxsw. Cgroup events were not scheduled * out of ctxsw out if that was not the case. */ if (cgrp1 != cgrp2) perf_cgroup_switch(task, PERF_CGROUP_SWIN); rcu_read_unlock(); } static inline int perf_cgroup_connect(int fd, struct perf_event *event, struct perf_event_attr *attr, struct perf_event *group_leader) { struct perf_cgroup *cgrp; struct cgroup_subsys_state *css; struct fd f = fdget(fd); int ret = 0; if (!f.file) return -EBADF; css = css_tryget_online_from_dir(f.file->f_path.dentry, &perf_event_cgrp_subsys); if (IS_ERR(css)) { ret = PTR_ERR(css); goto out; } cgrp = container_of(css, struct perf_cgroup, css); event->cgrp = cgrp; /* * all events in a group must monitor * the same cgroup because a task belongs * to only one perf cgroup at a time */ if (group_leader && group_leader->cgrp != cgrp) { perf_detach_cgroup(event); ret = -EINVAL; } out: fdput(f); return ret; } static inline void perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) { struct perf_cgroup_info *t; t = per_cpu_ptr(event->cgrp->info, event->cpu); event->shadow_ctx_time = now - t->timestamp; } static inline void perf_cgroup_defer_enabled(struct perf_event *event) { /* * when the current task's perf cgroup does not match * the event's, we need to remember to call the * perf_mark_enable() function the first time a task with * a matching perf cgroup is scheduled in. */ if (is_cgroup_event(event) && !perf_cgroup_match(event)) event->cgrp_defer_enabled = 1; } static inline void perf_cgroup_mark_enabled(struct perf_event *event, struct perf_event_context *ctx) { struct perf_event *sub; u64 tstamp = perf_event_time(event); if (!event->cgrp_defer_enabled) return; event->cgrp_defer_enabled = 0; event->tstamp_enabled = tstamp - event->total_time_enabled; list_for_each_entry(sub, &event->sibling_list, group_entry) { if (sub->state >= PERF_EVENT_STATE_INACTIVE) { sub->tstamp_enabled = tstamp - sub->total_time_enabled; sub->cgrp_defer_enabled = 0; } } } /* * Update cpuctx->cgrp so that it is set when first cgroup event is added and * cleared when last cgroup event is removed. */ static inline void list_update_cgroup_event(struct perf_event *event, struct perf_event_context *ctx, bool add) { struct perf_cpu_context *cpuctx; if (!is_cgroup_event(event)) return; if (add && ctx->nr_cgroups++) return; else if (!add && --ctx->nr_cgroups) return; /* * Because cgroup events are always per-cpu events, * this will always be called from the right CPU. */ cpuctx = __get_cpu_context(ctx); /* * cpuctx->cgrp is NULL until a cgroup event is sched in or * ctx->nr_cgroup == 0 . */ if (add && perf_cgroup_from_task(current, ctx) == event->cgrp) cpuctx->cgrp = event->cgrp; else if (!add) cpuctx->cgrp = NULL; } #else /* !CONFIG_CGROUP_PERF */ static inline bool perf_cgroup_match(struct perf_event *event) { return true; } static inline void perf_detach_cgroup(struct perf_event *event) {} static inline int is_cgroup_event(struct perf_event *event) { return 0; } static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) { return 0; } static inline void update_cgrp_time_from_event(struct perf_event *event) { } static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) { } static inline void perf_cgroup_sched_out(struct task_struct *task, struct task_struct *next) { } static inline void perf_cgroup_sched_in(struct task_struct *prev, struct task_struct *task) { } static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, struct perf_event_attr *attr, struct perf_event *group_leader) { return -EINVAL; } static inline void perf_cgroup_set_timestamp(struct task_struct *task, struct perf_event_context *ctx) { } void perf_cgroup_switch(struct task_struct *task, struct task_struct *next) { } static inline void perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) { } static inline u64 perf_cgroup_event_time(struct perf_event *event) { return 0; } static inline void perf_cgroup_defer_enabled(struct perf_event *event) { } static inline void perf_cgroup_mark_enabled(struct perf_event *event, struct perf_event_context *ctx) { } static inline void list_update_cgroup_event(struct perf_event *event, struct perf_event_context *ctx, bool add) { } #endif /* * set default to be dependent on timer tick just * like original code */ #define PERF_CPU_HRTIMER (1000 / HZ) /* * function must be called with interrupts disbled */ static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) { struct perf_cpu_context *cpuctx; int rotations = 0; WARN_ON(!irqs_disabled()); cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); rotations = perf_rotate_context(cpuctx); raw_spin_lock(&cpuctx->hrtimer_lock); if (rotations) hrtimer_forward_now(hr, cpuctx->hrtimer_interval); else cpuctx->hrtimer_active = 0; raw_spin_unlock(&cpuctx->hrtimer_lock); return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART; } static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) { struct hrtimer *timer = &cpuctx->hrtimer; struct pmu *pmu = cpuctx->ctx.pmu; u64 interval; /* no multiplexing needed for SW PMU */ if (pmu->task_ctx_nr == perf_sw_context) return; /* * check default is sane, if not set then force to * default interval (1/tick) */ interval = pmu->hrtimer_interval_ms; if (interval < 1) interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); raw_spin_lock_init(&cpuctx->hrtimer_lock); hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); timer->function = perf_mux_hrtimer_handler; } static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) { struct hrtimer *timer = &cpuctx->hrtimer; struct pmu *pmu = cpuctx->ctx.pmu; unsigned long flags; /* not for SW PMU */ if (pmu->task_ctx_nr == perf_sw_context) return 0; raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); if (!cpuctx->hrtimer_active) { cpuctx->hrtimer_active = 1; hrtimer_forward_now(timer, cpuctx->hrtimer_interval); hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); } raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags); return 0; } void perf_pmu_disable(struct pmu *pmu) { int *count = this_cpu_ptr(pmu->pmu_disable_count); if (!(*count)++) pmu->pmu_disable(pmu); } void perf_pmu_enable(struct pmu *pmu) { int *count = this_cpu_ptr(pmu->pmu_disable_count); if (!--(*count)) pmu->pmu_enable(pmu); } static DEFINE_PER_CPU(struct list_head, active_ctx_list); /* * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and * perf_event_task_tick() are fully serialized because they're strictly cpu * affine and perf_event_ctx{activate,deactivate} are called with IRQs * disabled, while perf_event_task_tick is called from IRQ context. */ static void perf_event_ctx_activate(struct perf_event_context *ctx) { struct list_head *head = this_cpu_ptr(&active_ctx_list); WARN_ON(!irqs_disabled()); WARN_ON(!list_empty(&ctx->active_ctx_list)); list_add(&ctx->active_ctx_list, head); } static void perf_event_ctx_deactivate(struct perf_event_context *ctx) { WARN_ON(!irqs_disabled()); WARN_ON(list_empty(&ctx->active_ctx_list)); list_del_init(&ctx->active_ctx_list); } static void get_ctx(struct perf_event_context *ctx) { WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); } static void free_ctx(struct rcu_head *head) { struct perf_event_context *ctx; ctx = container_of(head, struct perf_event_context, rcu_head); kfree(ctx->task_ctx_data); kfree(ctx); } static void put_ctx(struct perf_event_context *ctx) { if (atomic_dec_and_test(&ctx->refcount)) { if (ctx->parent_ctx) put_ctx(ctx->parent_ctx); if (ctx->task && ctx->task != TASK_TOMBSTONE) put_task_struct(ctx->task); call_rcu(&ctx->rcu_head, free_ctx); } } /* * Because of perf_event::ctx migration in sys_perf_event_open::move_group and * perf_pmu_migrate_context() we need some magic. * * Those places that change perf_event::ctx will hold both * perf_event_ctx::mutex of the 'old' and 'new' ctx value. * * Lock ordering is by mutex address. There are two other sites where * perf_event_context::mutex nests and those are: * * - perf_event_exit_task_context() [ child , 0 ] * perf_event_exit_event() * put_event() [ parent, 1 ] * * - perf_event_init_context() [ parent, 0 ] * inherit_task_group() * inherit_group() * inherit_event() * perf_event_alloc() * perf_init_event() * perf_try_init_event() [ child , 1 ] * * While it appears there is an obvious deadlock here -- the parent and child * nesting levels are inverted between the two. This is in fact safe because * life-time rules separate them. That is an exiting task cannot fork, and a * spawning task cannot (yet) exit. * * But remember that that these are parent<->child context relations, and * migration does not affect children, therefore these two orderings should not * interact. * * The change in perf_event::ctx does not affect children (as claimed above) * because the sys_perf_event_open() case will install a new event and break * the ctx parent<->child relation, and perf_pmu_migrate_context() is only * concerned with cpuctx and that doesn't have children. * * The places that change perf_event::ctx will issue: * * perf_remove_from_context(); * synchronize_rcu(); * perf_install_in_context(); * * to affect the change. The remove_from_context() + synchronize_rcu() should * quiesce the event, after which we can install it in the new location. This * means that only external vectors (perf_fops, prctl) can perturb the event * while in transit. Therefore all such accessors should also acquire * perf_event_context::mutex to serialize against this. * * However; because event->ctx can change while we're waiting to acquire * ctx->mutex we must be careful and use the below perf_event_ctx_lock() * function. * * Lock order: * cred_guard_mutex * task_struct::perf_event_mutex * perf_event_context::mutex * perf_event::child_mutex; * perf_event_context::lock * perf_event::mmap_mutex * mmap_sem */ static struct perf_event_context * perf_event_ctx_lock_nested(struct perf_event *event, int nesting) { struct perf_event_context *ctx; again: rcu_read_lock(); ctx = ACCESS_ONCE(event->ctx); if (!atomic_inc_not_zero(&ctx->refcount)) { rcu_read_unlock(); goto again; } rcu_read_unlock(); mutex_lock_nested(&ctx->mutex, nesting); if (event->ctx != ctx) { mutex_unlock(&ctx->mutex); put_ctx(ctx); goto again; } return ctx; } static inline struct perf_event_context * perf_event_ctx_lock(struct perf_event *event) { return perf_event_ctx_lock_nested(event, 0); } static void perf_event_ctx_unlock(struct perf_event *event, struct perf_event_context *ctx) { mutex_unlock(&ctx->mutex); put_ctx(ctx); } /* * This must be done under the ctx->lock, such as to serialize against * context_equiv(), therefore we cannot call put_ctx() since that might end up * calling scheduler related locks and ctx->lock nests inside those. */ static __must_check struct perf_event_context * unclone_ctx(struct perf_event_context *ctx) { struct perf_event_context *parent_ctx = ctx->parent_ctx; lockdep_assert_held(&ctx->lock); if (parent_ctx) ctx->parent_ctx = NULL; ctx->generation++; return parent_ctx; } static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) { /* * only top level events have the pid namespace they were created in */ if (event->parent) event = event->parent; return task_tgid_nr_ns(p, event->ns); } static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) { /* * only top level events have the pid namespace they were created in */ if (event->parent) event = event->parent; return task_pid_nr_ns(p, event->ns); } /* * If we inherit events we want to return the parent event id * to userspace. */ static u64 primary_event_id(struct perf_event *event) { u64 id = event->id; if (event->parent) id = event->parent->id; return id; } /* * Get the perf_event_context for a task and lock it. * * This has to cope with with the fact that until it is locked, * the context could get moved to another task. */ static struct perf_event_context * perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) { struct perf_event_context *ctx; retry: /* * One of the few rules of preemptible RCU is that one cannot do * rcu_read_unlock() while holding a scheduler (or nested) lock when * part of the read side critical section was irqs-enabled -- see * rcu_read_unlock_special(). * * Since ctx->lock nests under rq->lock we must ensure the entire read * side critical section has interrupts disabled. */ local_irq_save(*flags); rcu_read_lock(); ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); if (ctx) { /* * If this context is a clone of another, it might * get swapped for another underneath us by * perf_event_task_sched_out, though the * rcu_read_lock() protects us from any context * getting freed. Lock the context and check if it * got swapped before we could get the lock, and retry * if so. If we locked the right context, then it * can't get swapped on us any more. */ raw_spin_lock(&ctx->lock); if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { raw_spin_unlock(&ctx->lock); rcu_read_unlock(); local_irq_restore(*flags); goto retry; } if (ctx->task == TASK_TOMBSTONE || !atomic_inc_not_zero(&ctx->refcount)) { raw_spin_unlock(&ctx->lock); ctx = NULL; } else { WARN_ON_ONCE(ctx->task != task); } } rcu_read_unlock(); if (!ctx) local_irq_restore(*flags); return ctx; } /* * Get the context for a task and increment its pin_count so it * can't get swapped to another task. This also increments its * reference count so that the context can't get freed. */ static struct perf_event_context * perf_pin_task_context(struct task_struct *task, int ctxn) { struct perf_event_context *ctx; unsigned long flags; ctx = perf_lock_task_context(task, ctxn, &flags); if (ctx) { ++ctx->pin_count; raw_spin_unlock_irqrestore(&ctx->lock, flags); } return ctx; } static void perf_unpin_context(struct perf_event_context *ctx) { unsigned long flags; raw_spin_lock_irqsave(&ctx->lock, flags); --ctx->pin_count; raw_spin_unlock_irqrestore(&ctx->lock, flags); } /* * Update the record of the current time in a context. */ static void update_context_time(struct perf_event_context *ctx) { u64 now = perf_clock(); ctx->time += now - ctx->timestamp; ctx->timestamp = now; } static u64 perf_event_time(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; if (is_cgroup_event(event)) return perf_cgroup_event_time(event); return ctx ? ctx->time : 0; } /* * Update the total_time_enabled and total_time_running fields for a event. */ static void update_event_times(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; u64 run_end; lockdep_assert_held(&ctx->lock); if (event->state < PERF_EVENT_STATE_INACTIVE || event->group_leader->state < PERF_EVENT_STATE_INACTIVE) return; /* * in cgroup mode, time_enabled represents * the time the event was enabled AND active * tasks were in the monitored cgroup. This is * independent of the activity of the context as * there may be a mix of cgroup and non-cgroup events. * * That is why we treat cgroup events differently * here. */ if (is_cgroup_event(event)) run_end = perf_cgroup_event_time(event); else if (ctx->is_active) run_end = ctx->time; else run_end = event->tstamp_stopped; event->total_time_enabled = run_end - event->tstamp_enabled; if (event->state == PERF_EVENT_STATE_INACTIVE) run_end = event->tstamp_stopped; else run_end = perf_event_time(event); event->total_time_running = run_end - event->tstamp_running; } /* * Update total_time_enabled and total_time_running for all events in a group. */ static void update_group_times(struct perf_event *leader) { struct perf_event *event; update_event_times(leader); list_for_each_entry(event, &leader->sibling_list, group_entry) update_event_times(event); } static struct list_head * ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) { if (event->attr.pinned) return &ctx->pinned_groups; else return &ctx->flexible_groups; } /* * Add a event from the lists for its context. * Must be called with ctx->mutex and ctx->lock held. */ static void list_add_event(struct perf_event *event, struct perf_event_context *ctx) { lockdep_assert_held(&ctx->lock); WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); event->attach_state |= PERF_ATTACH_CONTEXT; /* * If we're a stand alone event or group leader, we go to the context * list, group events are kept attached to the group so that * perf_group_detach can, at all times, locate all siblings. */ if (event->group_leader == event) { struct list_head *list; event->group_caps = event->event_caps; list = ctx_group_list(event, ctx); list_add_tail(&event->group_entry, list); } list_update_cgroup_event(event, ctx, true); list_add_rcu(&event->event_entry, &ctx->event_list); ctx->nr_events++; if (event->attr.inherit_stat) ctx->nr_stat++; ctx->generation++; } /* * Initialize event state based on the perf_event_attr::disabled. */ static inline void perf_event__state_init(struct perf_event *event) { event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : PERF_EVENT_STATE_INACTIVE; } static void __perf_event_read_size(struct perf_event *event, int nr_siblings) { int entry = sizeof(u64); /* value */ int size = 0; int nr = 1; if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) size += sizeof(u64); if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) size += sizeof(u64); if (event->attr.read_format & PERF_FORMAT_ID) entry += sizeof(u64); if (event->attr.read_format & PERF_FORMAT_GROUP) { nr += nr_siblings; size += sizeof(u64); } size += entry * nr; event->read_size = size; } static void __perf_event_header_size(struct perf_event *event, u64 sample_type) { struct perf_sample_data *data; u16 size = 0; if (sample_type & PERF_SAMPLE_IP) size += sizeof(data->ip); if (sample_type & PERF_SAMPLE_ADDR) size += sizeof(data->addr); if (sample_type & PERF_SAMPLE_PERIOD) size += sizeof(data->period); if (sample_type & PERF_SAMPLE_WEIGHT) size += sizeof(data->weight); if (sample_type & PERF_SAMPLE_READ) size += event->read_size; if (sample_type & PERF_SAMPLE_DATA_SRC) size += sizeof(data->data_src.val); if (sample_type & PERF_SAMPLE_TRANSACTION) size += sizeof(data->txn); event->header_size = size; } /* * Called at perf_event creation and when events are attached/detached from a * group. */ static void perf_event__header_size(struct perf_event *event) { __perf_event_read_size(event, event->group_leader->nr_siblings); __perf_event_header_size(event, event->attr.sample_type); } static void perf_event__id_header_size(struct perf_event *event) { struct perf_sample_data *data; u64 sample_type = event->attr.sample_type; u16 size = 0; if (sample_type & PERF_SAMPLE_TID) size += sizeof(data->tid_entry); if (sample_type & PERF_SAMPLE_TIME) size += sizeof(data->time); if (sample_type & PERF_SAMPLE_IDENTIFIER) size += sizeof(data->id); if (sample_type & PERF_SAMPLE_ID) size += sizeof(data->id); if (sample_type & PERF_SAMPLE_STREAM_ID) size += sizeof(data->stream_id); if (sample_type & PERF_SAMPLE_CPU) size += sizeof(data->cpu_entry); event->id_header_size = size; } static bool perf_event_validate_size(struct perf_event *event) { /* * The values computed here will be over-written when we actually * attach the event. */ __perf_event_read_size(event, event->group_leader->nr_siblings + 1); __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); perf_event__id_header_size(event); /* * Sum the lot; should not exceed the 64k limit we have on records. * Conservative limit to allow for callchains and other variable fields. */ if (event->read_size + event->header_size + event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) return false; return true; } static void perf_group_attach(struct perf_event *event) { struct perf_event *group_leader = event->group_leader, *pos; /* * We can have double attach due to group movement in perf_event_open. */ if (event->attach_state & PERF_ATTACH_GROUP) return; event->attach_state |= PERF_ATTACH_GROUP; if (group_leader == event) return; WARN_ON_ONCE(group_leader->ctx != event->ctx); group_leader->group_caps &= event->event_caps; list_add_tail(&event->group_entry, &group_leader->sibling_list); group_leader->nr_siblings++; perf_event__header_size(group_leader); list_for_each_entry(pos, &group_leader->sibling_list, group_entry) perf_event__header_size(pos); } /* * Remove a event from the lists for its context. * Must be called with ctx->mutex and ctx->lock held. */ static void list_del_event(struct perf_event *event, struct perf_event_context *ctx) { WARN_ON_ONCE(event->ctx != ctx); lockdep_assert_held(&ctx->lock); /* * We can have double detach due to exit/hot-unplug + close. */ if (!(event->attach_state & PERF_ATTACH_CONTEXT)) return; event->attach_state &= ~PERF_ATTACH_CONTEXT; list_update_cgroup_event(event, ctx, false); ctx->nr_events--; if (event->attr.inherit_stat) ctx->nr_stat--; list_del_rcu(&event->event_entry); if (event->group_leader == event) list_del_init(&event->group_entry); update_group_times(event); /* * If event was in error state, then keep it * that way, otherwise bogus counts will be * returned on read(). The only way to get out * of error state is by explicit re-enabling * of the event */ if (event->state > PERF_EVENT_STATE_OFF) event->state = PERF_EVENT_STATE_OFF; ctx->generation++; } static void perf_group_detach(struct perf_event *event) { struct perf_event *sibling, *tmp; struct list_head *list = NULL; /* * We can have double detach due to exit/hot-unplug + close. */ if (!(event->attach_state & PERF_ATTACH_GROUP)) return; event->attach_state &= ~PERF_ATTACH_GROUP; /* * If this is a sibling, remove it from its group. */ if (event->group_leader != event) { list_del_init(&event->group_entry); event->group_leader->nr_siblings--; goto out; } if (!list_empty(&event->group_entry)) list = &event->group_entry; /* * If this was a group event with sibling events then * upgrade the siblings to singleton events by adding them * to whatever list we are on. */ list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { if (list) list_move_tail(&sibling->group_entry, list); sibling->group_leader = sibling; /* Inherit group flags from the previous leader */ sibling->group_caps = event->group_caps; WARN_ON_ONCE(sibling->ctx != event->ctx); } out: perf_event__header_size(event->group_leader); list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) perf_event__header_size(tmp); } static bool is_orphaned_event(struct perf_event *event) { return event->state == PERF_EVENT_STATE_DEAD; } static inline int __pmu_filter_match(struct perf_event *event) { struct pmu *pmu = event->pmu; return pmu->filter_match ? pmu->filter_match(event) : 1; } /* * Check whether we should attempt to schedule an event group based on * PMU-specific filtering. An event group can consist of HW and SW events, * potentially with a SW leader, so we must check all the filters, to * determine whether a group is schedulable: */ static inline int pmu_filter_match(struct perf_event *event) { struct perf_event *child; if (!__pmu_filter_match(event)) return 0; list_for_each_entry(child, &event->sibling_list, group_entry) { if (!__pmu_filter_match(child)) return 0; } return 1; } static inline int event_filter_match(struct perf_event *event) { return (event->cpu == -1 || event->cpu == smp_processor_id()) && perf_cgroup_match(event) && pmu_filter_match(event); } static void event_sched_out(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { u64 tstamp = perf_event_time(event); u64 delta; WARN_ON_ONCE(event->ctx != ctx); lockdep_assert_held(&ctx->lock); /* * An event which could not be activated because of * filter mismatch still needs to have its timings * maintained, otherwise bogus information is return * via read() for time_enabled, time_running: */ if (event->state == PERF_EVENT_STATE_INACTIVE && !event_filter_match(event)) { delta = tstamp - event->tstamp_stopped; event->tstamp_running += delta; event->tstamp_stopped = tstamp; } if (event->state != PERF_EVENT_STATE_ACTIVE) return; perf_pmu_disable(event->pmu); event->tstamp_stopped = tstamp; event->pmu->del(event, 0); event->oncpu = -1; event->state = PERF_EVENT_STATE_INACTIVE; if (event->pending_disable) { event->pending_disable = 0; event->state = PERF_EVENT_STATE_OFF; } if (!is_software_event(event)) cpuctx->active_oncpu--; if (!--ctx->nr_active) perf_event_ctx_deactivate(ctx); if (event->attr.freq && event->attr.sample_freq) ctx->nr_freq--; if (event->attr.exclusive || !cpuctx->active_oncpu) cpuctx->exclusive = 0; perf_pmu_enable(event->pmu); } static void group_sched_out(struct perf_event *group_event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { struct perf_event *event; int state = group_event->state; perf_pmu_disable(ctx->pmu); event_sched_out(group_event, cpuctx, ctx); /* * Schedule out siblings (if any): */ list_for_each_entry(event, &group_event->sibling_list, group_entry) event_sched_out(event, cpuctx, ctx); perf_pmu_enable(ctx->pmu); if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) cpuctx->exclusive = 0; } #define DETACH_GROUP 0x01UL /* * Cross CPU call to remove a performance event * * We disable the event on the hardware level first. After that we * remove it from the context list. */ static void __perf_remove_from_context(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx, void *info) { unsigned long flags = (unsigned long)info; event_sched_out(event, cpuctx, ctx); if (flags & DETACH_GROUP) perf_group_detach(event); list_del_event(event, ctx); if (!ctx->nr_events && ctx->is_active) { ctx->is_active = 0; if (ctx->task) { WARN_ON_ONCE(cpuctx->task_ctx != ctx); cpuctx->task_ctx = NULL; } } } /* * Remove the event from a task's (or a CPU's) list of events. * * If event->ctx is a cloned context, callers must make sure that * every task struct that event->ctx->task could possibly point to * remains valid. This is OK when called from perf_release since * that only calls us on the top-level context, which can't be a clone. * When called from perf_event_exit_task, it's OK because the * context has been detached from its task. */ static void perf_remove_from_context(struct perf_event *event, unsigned long flags) { lockdep_assert_held(&event->ctx->mutex); event_function_call(event, __perf_remove_from_context, (void *)flags); } /* * Cross CPU call to disable a performance event */ static void __perf_event_disable(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx, void *info) { if (event->state < PERF_EVENT_STATE_INACTIVE) return; update_context_time(ctx); update_cgrp_time_from_event(event); update_group_times(event); if (event == event->group_leader) group_sched_out(event, cpuctx, ctx); else event_sched_out(event, cpuctx, ctx); event->state = PERF_EVENT_STATE_OFF; } /* * Disable a event. * * If event->ctx is a cloned context, callers must make sure that * every task struct that event->ctx->task could possibly point to * remains valid. This condition is satisifed when called through * perf_event_for_each_child or perf_event_for_each because they * hold the top-level event's child_mutex, so any descendant that * goes to exit will block in perf_event_exit_event(). * * When called from perf_pending_event it's OK because event->ctx * is the current context on this CPU and preemption is disabled, * hence we can't get into perf_event_task_sched_out for this context. */ static void _perf_event_disable(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; raw_spin_lock_irq(&ctx->lock); if (event->state <= PERF_EVENT_STATE_OFF) { raw_spin_unlock_irq(&ctx->lock); return; } raw_spin_unlock_irq(&ctx->lock); event_function_call(event, __perf_event_disable, NULL); } void perf_event_disable_local(struct perf_event *event) { event_function_local(event, __perf_event_disable, NULL); } /* * Strictly speaking kernel users cannot create groups and therefore this * interface does not need the perf_event_ctx_lock() magic. */ void perf_event_disable(struct perf_event *event) { struct perf_event_context *ctx; ctx = perf_event_ctx_lock(event); _perf_event_disable(event); perf_event_ctx_unlock(event, ctx); } EXPORT_SYMBOL_GPL(perf_event_disable); void perf_event_disable_inatomic(struct perf_event *event) { event->pending_disable = 1; irq_work_queue(&event->pending); } static void perf_set_shadow_time(struct perf_event *event, struct perf_event_context *ctx, u64 tstamp) { /* * use the correct time source for the time snapshot * * We could get by without this by leveraging the * fact that to get to this function, the caller * has most likely already called update_context_time() * and update_cgrp_time_xx() and thus both timestamp * are identical (or very close). Given that tstamp is, * already adjusted for cgroup, we could say that: * tstamp - ctx->timestamp * is equivalent to * tstamp - cgrp->timestamp. * * Then, in perf_output_read(), the calculation would * work with no changes because: * - event is guaranteed scheduled in * - no scheduled out in between * - thus the timestamp would be the same * * But this is a bit hairy. * * So instead, we have an explicit cgroup call to remain * within the time time source all along. We believe it * is cleaner and simpler to understand. */ if (is_cgroup_event(event)) perf_cgroup_set_shadow_time(event, tstamp); else event->shadow_ctx_time = tstamp - ctx->timestamp; } #define MAX_INTERRUPTS (~0ULL) static void perf_log_throttle(struct perf_event *event, int enable); static void perf_log_itrace_start(struct perf_event *event); static int event_sched_in(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { u64 tstamp = perf_event_time(event); int ret = 0; lockdep_assert_held(&ctx->lock); if (event->state <= PERF_EVENT_STATE_OFF) return 0; WRITE_ONCE(event->oncpu, smp_processor_id()); /* * Order event::oncpu write to happen before the ACTIVE state * is visible. */ smp_wmb(); WRITE_ONCE(event->state, PERF_EVENT_STATE_ACTIVE); /* * Unthrottle events, since we scheduled we might have missed several * ticks already, also for a heavily scheduling task there is little * guarantee it'll get a tick in a timely manner. */ if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { perf_log_throttle(event, 1); event->hw.interrupts = 0; } /* * The new state must be visible before we turn it on in the hardware: */ smp_wmb(); perf_pmu_disable(event->pmu); perf_set_shadow_time(event, ctx, tstamp); perf_log_itrace_start(event); if (event->pmu->add(event, PERF_EF_START)) { event->state = PERF_EVENT_STATE_INACTIVE; event->oncpu = -1; ret = -EAGAIN; goto out; } event->tstamp_running += tstamp - event->tstamp_stopped; if (!is_software_event(event)) cpuctx->active_oncpu++; if (!ctx->nr_active++) perf_event_ctx_activate(ctx); if (event->attr.freq && event->attr.sample_freq) ctx->nr_freq++; if (event->attr.exclusive) cpuctx->exclusive = 1; out: perf_pmu_enable(event->pmu); return ret; } static int group_sched_in(struct perf_event *group_event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { struct perf_event *event, *partial_group = NULL; struct pmu *pmu = ctx->pmu; u64 now = ctx->time; bool simulate = false; if (group_event->state == PERF_EVENT_STATE_OFF) return 0; pmu->start_txn(pmu, PERF_PMU_TXN_ADD); if (event_sched_in(group_event, cpuctx, ctx)) { pmu->cancel_txn(pmu); perf_mux_hrtimer_restart(cpuctx); return -EAGAIN; } /* * Schedule in siblings as one group (if any): */ list_for_each_entry(event, &group_event->sibling_list, group_entry) { if (event_sched_in(event, cpuctx, ctx)) { partial_group = event; goto group_error; } } if (!pmu->commit_txn(pmu)) return 0; group_error: /* * Groups can be scheduled in as one unit only, so undo any * partial group before returning: * The events up to the failed event are scheduled out normally, * tstamp_stopped will be updated. * * The failed events and the remaining siblings need to have * their timings updated as if they had gone thru event_sched_in() * and event_sched_out(). This is required to get consistent timings * across the group. This also takes care of the case where the group * could never be scheduled by ensuring tstamp_stopped is set to mark * the time the event was actually stopped, such that time delta * calculation in update_event_times() is correct. */ list_for_each_entry(event, &group_event->sibling_list, group_entry) { if (event == partial_group) simulate = true; if (simulate) { event->tstamp_running += now - event->tstamp_stopped; event->tstamp_stopped = now; } else { event_sched_out(event, cpuctx, ctx); } } event_sched_out(group_event, cpuctx, ctx); pmu->cancel_txn(pmu); perf_mux_hrtimer_restart(cpuctx); return -EAGAIN; } /* * Work out whether we can put this event group on the CPU now. */ static int group_can_go_on(struct perf_event *event, struct perf_cpu_context *cpuctx, int can_add_hw) { /* * Groups consisting entirely of software events can always go on. */ if (event->group_caps & PERF_EV_CAP_SOFTWARE) return 1; /* * If an exclusive group is already on, no other hardware * events can go on. */ if (cpuctx->exclusive) return 0; /* * If this group is exclusive and there are already * events on the CPU, it can't go on. */ if (event->attr.exclusive && cpuctx->active_oncpu) return 0; /* * Otherwise, try to add it if all previous groups were able * to go on. */ return can_add_hw; } static void add_event_to_ctx(struct perf_event *event, struct perf_event_context *ctx) { u64 tstamp = perf_event_time(event); list_add_event(event, ctx); perf_group_attach(event); event->tstamp_enabled = tstamp; event->tstamp_running = tstamp; event->tstamp_stopped = tstamp; } static void ctx_sched_out(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx, enum event_type_t event_type); static void ctx_sched_in(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx, enum event_type_t event_type, struct task_struct *task); static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { if (!cpuctx->task_ctx) return; if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) return; ctx_sched_out(ctx, cpuctx, EVENT_ALL); } static void perf_event_sched_in(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx, struct task_struct *task) { cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); if (ctx) ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); if (ctx) ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); } static void ctx_resched(struct perf_cpu_context *cpuctx, struct perf_event_context *task_ctx) { perf_pmu_disable(cpuctx->ctx.pmu); if (task_ctx) task_ctx_sched_out(cpuctx, task_ctx); cpu_ctx_sched_out(cpuctx, EVENT_ALL); perf_event_sched_in(cpuctx, task_ctx, current); perf_pmu_enable(cpuctx->ctx.pmu); } /* * Cross CPU call to install and enable a performance event * * Very similar to remote_function() + event_function() but cannot assume that * things like ctx->is_active and cpuctx->task_ctx are set. */ static int __perf_install_in_context(void *info) { struct perf_event *event = info; struct perf_event_context *ctx = event->ctx; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); struct perf_event_context *task_ctx = cpuctx->task_ctx; bool reprogram = true; int ret = 0; raw_spin_lock(&cpuctx->ctx.lock); if (ctx->task) { raw_spin_lock(&ctx->lock); task_ctx = ctx; reprogram = (ctx->task == current); /* * If the task is running, it must be running on this CPU, * otherwise we cannot reprogram things. * * If its not running, we don't care, ctx->lock will * serialize against it becoming runnable. */ if (task_curr(ctx->task) && !reprogram) { ret = -ESRCH; goto unlock; } WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx); } else if (task_ctx) { raw_spin_lock(&task_ctx->lock); } if (reprogram) { ctx_sched_out(ctx, cpuctx, EVENT_TIME); add_event_to_ctx(event, ctx); ctx_resched(cpuctx, task_ctx); } else { add_event_to_ctx(event, ctx); } unlock: perf_ctx_unlock(cpuctx, task_ctx); return ret; } /* * Attach a performance event to a context. * * Very similar to event_function_call, see comment there. */ static void perf_install_in_context(struct perf_event_context *ctx, struct perf_event *event, int cpu) { struct task_struct *task = READ_ONCE(ctx->task); lockdep_assert_held(&ctx->mutex); if (event->cpu != -1) event->cpu = cpu; /* * Ensures that if we can observe event->ctx, both the event and ctx * will be 'complete'. See perf_iterate_sb_cpu(). */ smp_store_release(&event->ctx, ctx); if (!task) { cpu_function_call(cpu, __perf_install_in_context, event); return; } /* * Should not happen, we validate the ctx is still alive before calling. */ if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) return; /* * Installing events is tricky because we cannot rely on ctx->is_active * to be set in case this is the nr_events 0 -> 1 transition. * * Instead we use task_curr(), which tells us if the task is running. * However, since we use task_curr() outside of rq::lock, we can race * against the actual state. This means the result can be wrong. * * If we get a false positive, we retry, this is harmless. * * If we get a false negative, things are complicated. If we are after * perf_event_context_sched_in() ctx::lock will serialize us, and the * value must be correct. If we're before, it doesn't matter since * perf_event_context_sched_in() will program the counter. * * However, this hinges on the remote context switch having observed * our task->perf_event_ctxp[] store, such that it will in fact take * ctx::lock in perf_event_context_sched_in(). * * We do this by task_function_call(), if the IPI fails to hit the task * we know any future context switch of task must see the * perf_event_ctpx[] store. */ /* * This smp_mb() orders the task->perf_event_ctxp[] store with the * task_cpu() load, such that if the IPI then does not find the task * running, a future context switch of that task must observe the * store. */ smp_mb(); again: if (!task_function_call(task, __perf_install_in_context, event)) return; raw_spin_lock_irq(&ctx->lock); task = ctx->task; if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) { /* * Cannot happen because we already checked above (which also * cannot happen), and we hold ctx->mutex, which serializes us * against perf_event_exit_task_context(). */ raw_spin_unlock_irq(&ctx->lock); return; } /* * If the task is not running, ctx->lock will avoid it becoming so, * thus we can safely install the event. */ if (task_curr(task)) { raw_spin_unlock_irq(&ctx->lock); goto again; } add_event_to_ctx(event, ctx); raw_spin_unlock_irq(&ctx->lock); } /* * Put a event into inactive state and update time fields. * Enabling the leader of a group effectively enables all * the group members that aren't explicitly disabled, so we * have to update their ->tstamp_enabled also. * Note: this works for group members as well as group leaders * since the non-leader members' sibling_lists will be empty. */ static void __perf_event_mark_enabled(struct perf_event *event) { struct perf_event *sub; u64 tstamp = perf_event_time(event); event->state = PERF_EVENT_STATE_INACTIVE; event->tstamp_enabled = tstamp - event->total_time_enabled; list_for_each_entry(sub, &event->sibling_list, group_entry) { if (sub->state >= PERF_EVENT_STATE_INACTIVE) sub->tstamp_enabled = tstamp - sub->total_time_enabled; } } /* * Cross CPU call to enable a performance event */ static void __perf_event_enable(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx, void *info) { struct perf_event *leader = event->group_leader; struct perf_event_context *task_ctx; if (event->state >= PERF_EVENT_STATE_INACTIVE || event->state <= PERF_EVENT_STATE_ERROR) return; if (ctx->is_active) ctx_sched_out(ctx, cpuctx, EVENT_TIME); __perf_event_mark_enabled(event); if (!ctx->is_active) return; if (!event_filter_match(event)) { if (is_cgroup_event(event)) perf_cgroup_defer_enabled(event); ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); return; } /* * If the event is in a group and isn't the group leader, * then don't put it on unless the group is on. */ if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) { ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); return; } task_ctx = cpuctx->task_ctx; if (ctx->task) WARN_ON_ONCE(task_ctx != ctx); ctx_resched(cpuctx, task_ctx); } /* * Enable a event. * * If event->ctx is a cloned context, callers must make sure that * every task struct that event->ctx->task could possibly point to * remains valid. This condition is satisfied when called through * perf_event_for_each_child or perf_event_for_each as described * for perf_event_disable. */ static void _perf_event_enable(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; raw_spin_lock_irq(&ctx->lock); if (event->state >= PERF_EVENT_STATE_INACTIVE || event->state < PERF_EVENT_STATE_ERROR) { raw_spin_unlock_irq(&ctx->lock); return; } /* * If the event is in error state, clear that first. * * That way, if we see the event in error state below, we know that it * has gone back into error state, as distinct from the task having * been scheduled away before the cross-call arrived. */ if (event->state == PERF_EVENT_STATE_ERROR) event->state = PERF_EVENT_STATE_OFF; raw_spin_unlock_irq(&ctx->lock); event_function_call(event, __perf_event_enable, NULL); } /* * See perf_event_disable(); */ void perf_event_enable(struct perf_event *event) { struct perf_event_context *ctx; ctx = perf_event_ctx_lock(event); _perf_event_enable(event); perf_event_ctx_unlock(event, ctx); } EXPORT_SYMBOL_GPL(perf_event_enable); struct stop_event_data { struct perf_event *event; unsigned int restart; }; static int __perf_event_stop(void *info) { struct stop_event_data *sd = info; struct perf_event *event = sd->event; /* if it's already INACTIVE, do nothing */ if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) return 0; /* matches smp_wmb() in event_sched_in() */ smp_rmb(); /* * There is a window with interrupts enabled before we get here, * so we need to check again lest we try to stop another CPU's event. */ if (READ_ONCE(event->oncpu) != smp_processor_id()) return -EAGAIN; event->pmu->stop(event, PERF_EF_UPDATE); /* * May race with the actual stop (through perf_pmu_output_stop()), * but it is only used for events with AUX ring buffer, and such * events will refuse to restart because of rb::aux_mmap_count==0, * see comments in perf_aux_output_begin(). * * Since this is happening on a event-local CPU, no trace is lost * while restarting. */ if (sd->restart) event->pmu->start(event, 0); return 0; } static int perf_event_stop(struct perf_event *event, int restart) { struct stop_event_data sd = { .event = event, .restart = restart, }; int ret = 0; do { if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) return 0; /* matches smp_wmb() in event_sched_in() */ smp_rmb(); /* * We only want to restart ACTIVE events, so if the event goes * inactive here (event->oncpu==-1), there's nothing more to do; * fall through with ret==-ENXIO. */ ret = cpu_function_call(READ_ONCE(event->oncpu), __perf_event_stop, &sd); } while (ret == -EAGAIN); return ret; } /* * In order to contain the amount of racy and tricky in the address filter * configuration management, it is a two part process: * * (p1) when userspace mappings change as a result of (1) or (2) or (3) below, * we update the addresses of corresponding vmas in * event::addr_filters_offs array and bump the event::addr_filters_gen; * (p2) when an event is scheduled in (pmu::add), it calls * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync() * if the generation has changed since the previous call. * * If (p1) happens while the event is active, we restart it to force (p2). * * (1) perf_addr_filters_apply(): adjusting filters' offsets based on * pre-existing mappings, called once when new filters arrive via SET_FILTER * ioctl; * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly * registered mapping, called for every new mmap(), with mm::mmap_sem down * for reading; * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process * of exec. */ void perf_event_addr_filters_sync(struct perf_event *event) { struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); if (!has_addr_filter(event)) return; raw_spin_lock(&ifh->lock); if (event->addr_filters_gen != event->hw.addr_filters_gen) { event->pmu->addr_filters_sync(event); event->hw.addr_filters_gen = event->addr_filters_gen; } raw_spin_unlock(&ifh->lock); } EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync); static int _perf_event_refresh(struct perf_event *event, int refresh) { /* * not supported on inherited events */ if (event->attr.inherit || !is_sampling_event(event)) return -EINVAL; atomic_add(refresh, &event->event_limit); _perf_event_enable(event); return 0; } /* * See perf_event_disable() */ int perf_event_refresh(struct perf_event *event, int refresh) { struct perf_event_context *ctx; int ret; ctx = perf_event_ctx_lock(event); ret = _perf_event_refresh(event, refresh); perf_event_ctx_unlock(event, ctx); return ret; } EXPORT_SYMBOL_GPL(perf_event_refresh); static void ctx_sched_out(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx, enum event_type_t event_type) { int is_active = ctx->is_active; struct perf_event *event; lockdep_assert_held(&ctx->lock); if (likely(!ctx->nr_events)) { /* * See __perf_remove_from_context(). */ WARN_ON_ONCE(ctx->is_active); if (ctx->task) WARN_ON_ONCE(cpuctx->task_ctx); return; } ctx->is_active &= ~event_type; if (!(ctx->is_active & EVENT_ALL)) ctx->is_active = 0; if (ctx->task) { WARN_ON_ONCE(cpuctx->task_ctx != ctx); if (!ctx->is_active) cpuctx->task_ctx = NULL; } /* * Always update time if it was set; not only when it changes. * Otherwise we can 'forget' to update time for any but the last * context we sched out. For example: * * ctx_sched_out(.event_type = EVENT_FLEXIBLE) * ctx_sched_out(.event_type = EVENT_PINNED) * * would only update time for the pinned events. */ if (is_active & EVENT_TIME) { /* update (and stop) ctx time */ update_context_time(ctx); update_cgrp_time_from_cpuctx(cpuctx); } is_active ^= ctx->is_active; /* changed bits */ if (!ctx->nr_active || !(is_active & EVENT_ALL)) return; perf_pmu_disable(ctx->pmu); if (is_active & EVENT_PINNED) { list_for_each_entry(event, &ctx->pinned_groups, group_entry) group_sched_out(event, cpuctx, ctx); } if (is_active & EVENT_FLEXIBLE) { list_for_each_entry(event, &ctx->flexible_groups, group_entry) group_sched_out(event, cpuctx, ctx); } perf_pmu_enable(ctx->pmu); } /* * Test whether two contexts are equivalent, i.e. whether they have both been * cloned from the same version of the same context. * * Equivalence is measured using a generation number in the context that is * incremented on each modification to it; see unclone_ctx(), list_add_event() * and list_del_event(). */ static int context_equiv(struct perf_event_context *ctx1, struct perf_event_context *ctx2) { lockdep_assert_held(&ctx1->lock); lockdep_assert_held(&ctx2->lock); /* Pinning disables the swap optimization */ if (ctx1->pin_count || ctx2->pin_count) return 0; /* If ctx1 is the parent of ctx2 */ if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen) return 1; /* If ctx2 is the parent of ctx1 */ if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation) return 1; /* * If ctx1 and ctx2 have the same parent; we flatten the parent * hierarchy, see perf_event_init_context(). */ if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && ctx1->parent_gen == ctx2->parent_gen) return 1; /* Unmatched */ return 0; } static void __perf_event_sync_stat(struct perf_event *event, struct perf_event *next_event) { u64 value; if (!event->attr.inherit_stat) return; /* * Update the event value, we cannot use perf_event_read() * because we're in the middle of a context switch and have IRQs * disabled, which upsets smp_call_function_single(), however * we know the event must be on the current CPU, therefore we * don't need to use it. */ switch (event->state) { case PERF_EVENT_STATE_ACTIVE: event->pmu->read(event); /* fall-through */ case PERF_EVENT_STATE_INACTIVE: update_event_times(event); break; default: break; } /* * In order to keep per-task stats reliable we need to flip the event * values when we flip the contexts. */ value = local64_read(&next_event->count); value = local64_xchg(&event->count, value); local64_set(&next_event->count, value); swap(event->total_time_enabled, next_event->total_time_enabled); swap(event->total_time_running, next_event->total_time_running); /* * Since we swizzled the values, update the user visible data too. */ perf_event_update_userpage(event); perf_event_update_userpage(next_event); } static void perf_event_sync_stat(struct perf_event_context *ctx, struct perf_event_context *next_ctx) { struct perf_event *event, *next_event; if (!ctx->nr_stat) return; update_context_time(ctx); event = list_first_entry(&ctx->event_list, struct perf_event, event_entry); next_event = list_first_entry(&next_ctx->event_list, struct perf_event, event_entry); while (&event->event_entry != &ctx->event_list && &next_event->event_entry != &next_ctx->event_list) { __perf_event_sync_stat(event, next_event); event = list_next_entry(event, event_entry); next_event = list_next_entry(next_event, event_entry); } } static void perf_event_context_sched_out(struct task_struct *task, int ctxn, struct task_struct *next) { struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; struct perf_event_context *next_ctx; struct perf_event_context *parent, *next_parent; struct perf_cpu_context *cpuctx; int do_switch = 1; if (likely(!ctx)) return; cpuctx = __get_cpu_context(ctx); if (!cpuctx->task_ctx) return; rcu_read_lock(); next_ctx = next->perf_event_ctxp[ctxn]; if (!next_ctx) goto unlock; parent = rcu_dereference(ctx->parent_ctx); next_parent = rcu_dereference(next_ctx->parent_ctx); /* If neither context have a parent context; they cannot be clones. */ if (!parent && !next_parent) goto unlock; if (next_parent == ctx || next_ctx == parent || next_parent == parent) { /* * Looks like the two contexts are clones, so we might be * able to optimize the context switch. We lock both * contexts and check that they are clones under the * lock (including re-checking that neither has been * uncloned in the meantime). It doesn't matter which * order we take the locks because no other cpu could * be trying to lock both of these tasks. */ raw_spin_lock(&ctx->lock); raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); if (context_equiv(ctx, next_ctx)) { WRITE_ONCE(ctx->task, next); WRITE_ONCE(next_ctx->task, task); swap(ctx->task_ctx_data, next_ctx->task_ctx_data); /* * RCU_INIT_POINTER here is safe because we've not * modified the ctx and the above modification of * ctx->task and ctx->task_ctx_data are immaterial * since those values are always verified under * ctx->lock which we're now holding. */ RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx); RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx); do_switch = 0; perf_event_sync_stat(ctx, next_ctx); } raw_spin_unlock(&next_ctx->lock); raw_spin_unlock(&ctx->lock); } unlock: rcu_read_unlock(); if (do_switch) { raw_spin_lock(&ctx->lock); task_ctx_sched_out(cpuctx, ctx); raw_spin_unlock(&ctx->lock); } } static DEFINE_PER_CPU(struct list_head, sched_cb_list); void perf_sched_cb_dec(struct pmu *pmu) { struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); this_cpu_dec(perf_sched_cb_usages); if (!--cpuctx->sched_cb_usage) list_del(&cpuctx->sched_cb_entry); } void perf_sched_cb_inc(struct pmu *pmu) { struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); if (!cpuctx->sched_cb_usage++) list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list)); this_cpu_inc(perf_sched_cb_usages); } /* * This function provides the context switch callback to the lower code * layer. It is invoked ONLY when the context switch callback is enabled. * * This callback is relevant even to per-cpu events; for example multi event * PEBS requires this to provide PID/TID information. This requires we flush * all queued PEBS records before we context switch to a new task. */ static void perf_pmu_sched_task(struct task_struct *prev, struct task_struct *next, bool sched_in) { struct perf_cpu_context *cpuctx; struct pmu *pmu; if (prev == next) return; list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) { pmu = cpuctx->unique_pmu; /* software PMUs will not have sched_task */ if (WARN_ON_ONCE(!pmu->sched_task)) continue; perf_ctx_lock(cpuctx, cpuctx->task_ctx); perf_pmu_disable(pmu); pmu->sched_task(cpuctx->task_ctx, sched_in); perf_pmu_enable(pmu); perf_ctx_unlock(cpuctx, cpuctx->task_ctx); } } static void perf_event_switch(struct task_struct *task, struct task_struct *next_prev, bool sched_in); #define for_each_task_context_nr(ctxn) \ for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) /* * Called from scheduler to remove the events of the current task, * with interrupts disabled. * * We stop each event and update the event value in event->count. * * This does not protect us against NMI, but disable() * sets the disabled bit in the control field of event _before_ * accessing the event control register. If a NMI hits, then it will * not restart the event. */ void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) { int ctxn; if (__this_cpu_read(perf_sched_cb_usages)) perf_pmu_sched_task(task, next, false); if (atomic_read(&nr_switch_events)) perf_event_switch(task, next, false); for_each_task_context_nr(ctxn) perf_event_context_sched_out(task, ctxn, next); /* * if cgroup events exist on this CPU, then we need * to check if we have to switch out PMU state. * cgroup event are system-wide mode only */ if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) perf_cgroup_sched_out(task, next); } /* * Called with IRQs disabled */ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, enum event_type_t event_type) { ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); } static void ctx_pinned_sched_in(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx) { struct perf_event *event; list_for_each_entry(event, &ctx->pinned_groups, group_entry) { if (event->state <= PERF_EVENT_STATE_OFF) continue; if (!event_filter_match(event)) continue; /* may need to reset tstamp_enabled */ if (is_cgroup_event(event)) perf_cgroup_mark_enabled(event, ctx); if (group_can_go_on(event, cpuctx, 1)) group_sched_in(event, cpuctx, ctx); /* * If this pinned group hasn't been scheduled, * put it in error state. */ if (event->state == PERF_EVENT_STATE_INACTIVE) { update_group_times(event); event->state = PERF_EVENT_STATE_ERROR; } } } static void ctx_flexible_sched_in(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx) { struct perf_event *event; int can_add_hw = 1; list_for_each_entry(event, &ctx->flexible_groups, group_entry) { /* Ignore events in OFF or ERROR state */ if (event->state <= PERF_EVENT_STATE_OFF) continue; /* * Listen to the 'cpu' scheduling filter constraint * of events: */ if (!event_filter_match(event)) continue; /* may need to reset tstamp_enabled */ if (is_cgroup_event(event)) perf_cgroup_mark_enabled(event, ctx); if (group_can_go_on(event, cpuctx, can_add_hw)) { if (group_sched_in(event, cpuctx, ctx)) can_add_hw = 0; } } } static void ctx_sched_in(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx, enum event_type_t event_type, struct task_struct *task) { int is_active = ctx->is_active; u64 now; lockdep_assert_held(&ctx->lock); if (likely(!ctx->nr_events)) return; ctx->is_active |= (event_type | EVENT_TIME); if (ctx->task) { if (!is_active) cpuctx->task_ctx = ctx; else WARN_ON_ONCE(cpuctx->task_ctx != ctx); } is_active ^= ctx->is_active; /* changed bits */ if (is_active & EVENT_TIME) { /* start ctx time */ now = perf_clock(); ctx->timestamp = now; perf_cgroup_set_timestamp(task, ctx); } /* * First go through the list and put on any pinned groups * in order to give them the best chance of going on. */ if (is_active & EVENT_PINNED) ctx_pinned_sched_in(ctx, cpuctx); /* Then walk through the lower prio flexible groups */ if (is_active & EVENT_FLEXIBLE) ctx_flexible_sched_in(ctx, cpuctx); } static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, enum event_type_t event_type, struct task_struct *task) { struct perf_event_context *ctx = &cpuctx->ctx; ctx_sched_in(ctx, cpuctx, event_type, task); } static void perf_event_context_sched_in(struct perf_event_context *ctx, struct task_struct *task) { struct perf_cpu_context *cpuctx; cpuctx = __get_cpu_context(ctx); if (cpuctx->task_ctx == ctx) return; perf_ctx_lock(cpuctx, ctx); perf_pmu_disable(ctx->pmu); /* * We want to keep the following priority order: * cpu pinned (that don't need to move), task pinned, * cpu flexible, task flexible. */ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); perf_event_sched_in(cpuctx, ctx, task); perf_pmu_enable(ctx->pmu); perf_ctx_unlock(cpuctx, ctx); } /* * Called from scheduler to add the events of the current task * with interrupts disabled. * * We restore the event value and then enable it. * * This does not protect us against NMI, but enable() * sets the enabled bit in the control field of event _before_ * accessing the event control register. If a NMI hits, then it will * keep the event running. */ void __perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task) { struct perf_event_context *ctx; int ctxn; /* * If cgroup events exist on this CPU, then we need to check if we have * to switch in PMU state; cgroup event are system-wide mode only. * * Since cgroup events are CPU events, we must schedule these in before * we schedule in the task events. */ if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) perf_cgroup_sched_in(prev, task); for_each_task_context_nr(ctxn) { ctx = task->perf_event_ctxp[ctxn]; if (likely(!ctx)) continue; perf_event_context_sched_in(ctx, task); } if (atomic_read(&nr_switch_events)) perf_event_switch(task, prev, true); if (__this_cpu_read(perf_sched_cb_usages)) perf_pmu_sched_task(prev, task, true); } static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) { u64 frequency = event->attr.sample_freq; u64 sec = NSEC_PER_SEC; u64 divisor, dividend; int count_fls, nsec_fls, frequency_fls, sec_fls; count_fls = fls64(count); nsec_fls = fls64(nsec); frequency_fls = fls64(frequency); sec_fls = 30; /* * We got @count in @nsec, with a target of sample_freq HZ * the target period becomes: * * @count * 10^9 * period = ------------------- * @nsec * sample_freq * */ /* * Reduce accuracy by one bit such that @a and @b converge * to a similar magnitude. */ #define REDUCE_FLS(a, b) \ do { \ if (a##_fls > b##_fls) { \ a >>= 1; \ a##_fls--; \ } else { \ b >>= 1; \ b##_fls--; \ } \ } while (0) /* * Reduce accuracy until either term fits in a u64, then proceed with * the other, so that finally we can do a u64/u64 division. */ while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { REDUCE_FLS(nsec, frequency); REDUCE_FLS(sec, count); } if (count_fls + sec_fls > 64) { divisor = nsec * frequency; while (count_fls + sec_fls > 64) { REDUCE_FLS(count, sec); divisor >>= 1; } dividend = count * sec; } else { dividend = count * sec; while (nsec_fls + frequency_fls > 64) { REDUCE_FLS(nsec, frequency); dividend >>= 1; } divisor = nsec * frequency; } if (!divisor) return dividend; return div64_u64(dividend, divisor); } static DEFINE_PER_CPU(int, perf_throttled_count); static DEFINE_PER_CPU(u64, perf_throttled_seq); static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) { struct hw_perf_event *hwc = &event->hw; s64 period, sample_period; s64 delta; period = perf_calculate_period(event, nsec, count); delta = (s64)(period - hwc->sample_period); delta = (delta + 7) / 8; /* low pass filter */ sample_period = hwc->sample_period + delta; if (!sample_period) sample_period = 1; hwc->sample_period = sample_period; if (local64_read(&hwc->period_left) > 8*sample_period) { if (disable) event->pmu->stop(event, PERF_EF_UPDATE); local64_set(&hwc->period_left, 0); if (disable) event->pmu->start(event, PERF_EF_RELOAD); } } /* * combine freq adjustment with unthrottling to avoid two passes over the * events. At the same time, make sure, having freq events does not change * the rate of unthrottling as that would introduce bias. */ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, int needs_unthr) { struct perf_event *event; struct hw_perf_event *hwc; u64 now, period = TICK_NSEC; s64 delta; /* * only need to iterate over all events iff: * - context have events in frequency mode (needs freq adjust) * - there are events to unthrottle on this cpu */ if (!(ctx->nr_freq || needs_unthr)) return; raw_spin_lock(&ctx->lock); perf_pmu_disable(ctx->pmu); list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (event->state != PERF_EVENT_STATE_ACTIVE) continue; if (!event_filter_match(event)) continue; perf_pmu_disable(event->pmu); hwc = &event->hw; if (hwc->interrupts == MAX_INTERRUPTS) { hwc->interrupts = 0; perf_log_throttle(event, 1); event->pmu->start(event, 0); } if (!event->attr.freq || !event->attr.sample_freq) goto next; /* * stop the event and update event->count */ event->pmu->stop(event, PERF_EF_UPDATE); now = local64_read(&event->count); delta = now - hwc->freq_count_stamp; hwc->freq_count_stamp = now; /* * restart the event * reload only if value has changed * we have stopped the event so tell that * to perf_adjust_period() to avoid stopping it * twice. */ if (delta > 0) perf_adjust_period(event, period, delta, false); event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); next: perf_pmu_enable(event->pmu); } perf_pmu_enable(ctx->pmu); raw_spin_unlock(&ctx->lock); } /* * Round-robin a context's events: */ static void rotate_ctx(struct perf_event_context *ctx) { /* * Rotate the first entry last of non-pinned groups. Rotation might be * disabled by the inheritance code. */ if (!ctx->rotate_disable) list_rotate_left(&ctx->flexible_groups); } static int perf_rotate_context(struct perf_cpu_context *cpuctx) { struct perf_event_context *ctx = NULL; int rotate = 0; if (cpuctx->ctx.nr_events) { if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) rotate = 1; } ctx = cpuctx->task_ctx; if (ctx && ctx->nr_events) { if (ctx->nr_events != ctx->nr_active) rotate = 1; } if (!rotate) goto done; perf_ctx_lock(cpuctx, cpuctx->task_ctx); perf_pmu_disable(cpuctx->ctx.pmu); cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); if (ctx) ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); rotate_ctx(&cpuctx->ctx); if (ctx) rotate_ctx(ctx); perf_event_sched_in(cpuctx, ctx, current); perf_pmu_enable(cpuctx->ctx.pmu); perf_ctx_unlock(cpuctx, cpuctx->task_ctx); done: return rotate; } void perf_event_task_tick(void) { struct list_head *head = this_cpu_ptr(&active_ctx_list); struct perf_event_context *ctx, *tmp; int throttled; WARN_ON(!irqs_disabled()); __this_cpu_inc(perf_throttled_seq); throttled = __this_cpu_xchg(perf_throttled_count, 0); tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS); list_for_each_entry_safe(ctx, tmp, head, active_ctx_list) perf_adjust_freq_unthr_context(ctx, throttled); } static int event_enable_on_exec(struct perf_event *event, struct perf_event_context *ctx) { if (!event->attr.enable_on_exec) return 0; event->attr.enable_on_exec = 0; if (event->state >= PERF_EVENT_STATE_INACTIVE) return 0; __perf_event_mark_enabled(event); return 1; } /* * Enable all of a task's events that have been marked enable-on-exec. * This expects task == current. */ static void perf_event_enable_on_exec(int ctxn) { struct perf_event_context *ctx, *clone_ctx = NULL; struct perf_cpu_context *cpuctx; struct perf_event *event; unsigned long flags; int enabled = 0; local_irq_save(flags); ctx = current->perf_event_ctxp[ctxn]; if (!ctx || !ctx->nr_events) goto out; cpuctx = __get_cpu_context(ctx); perf_ctx_lock(cpuctx, ctx); ctx_sched_out(ctx, cpuctx, EVENT_TIME); list_for_each_entry(event, &ctx->event_list, event_entry) enabled |= event_enable_on_exec(event, ctx); /* * Unclone and reschedule this context if we enabled any event. */ if (enabled) { clone_ctx = unclone_ctx(ctx); ctx_resched(cpuctx, ctx); } perf_ctx_unlock(cpuctx, ctx); out: local_irq_restore(flags); if (clone_ctx) put_ctx(clone_ctx); } struct perf_read_data { struct perf_event *event; bool group; int ret; }; static int find_cpu_to_read(struct perf_event *event, int local_cpu) { int event_cpu = event->oncpu; u16 local_pkg, event_pkg; if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { event_pkg = topology_physical_package_id(event_cpu); local_pkg = topology_physical_package_id(local_cpu); if (event_pkg == local_pkg) return local_cpu; } return event_cpu; } /* * Cross CPU call to read the hardware event */ static void __perf_event_read(void *info) { struct perf_read_data *data = info; struct perf_event *sub, *event = data->event; struct perf_event_context *ctx = event->ctx; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); struct pmu *pmu = event->pmu; /* * If this is a task context, we need to check whether it is * the current task context of this cpu. If not it has been * scheduled out before the smp call arrived. In that case * event->count would have been updated to a recent sample * when the event was scheduled out. */ if (ctx->task && cpuctx->task_ctx != ctx) return; raw_spin_lock(&ctx->lock); if (ctx->is_active) { update_context_time(ctx); update_cgrp_time_from_event(event); } update_event_times(event); if (event->state != PERF_EVENT_STATE_ACTIVE) goto unlock; if (!data->group) { pmu->read(event); data->ret = 0; goto unlock; } pmu->start_txn(pmu, PERF_PMU_TXN_READ); pmu->read(event); list_for_each_entry(sub, &event->sibling_list, group_entry) { update_event_times(sub); if (sub->state == PERF_EVENT_STATE_ACTIVE) { /* * Use sibling's PMU rather than @event's since * sibling could be on different (eg: software) PMU. */ sub->pmu->read(sub); } } data->ret = pmu->commit_txn(pmu); unlock: raw_spin_unlock(&ctx->lock); } static inline u64 perf_event_count(struct perf_event *event) { if (event->pmu->count) return event->pmu->count(event); return __perf_event_count(event); } /* * NMI-safe method to read a local event, that is an event that * is: * - either for the current task, or for this CPU * - does not have inherit set, for inherited task events * will not be local and we cannot read them atomically * - must not have a pmu::count method */ u64 perf_event_read_local(struct perf_event *event) { unsigned long flags; u64 val; /* * Disabling interrupts avoids all counter scheduling (context * switches, timer based rotation and IPIs). */ local_irq_save(flags); /* If this is a per-task event, it must be for current */ WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) && event->hw.target != current); /* If this is a per-CPU event, it must be for this CPU */ WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) && event->cpu != smp_processor_id()); /* * It must not be an event with inherit set, we cannot read * all child counters from atomic context. */ WARN_ON_ONCE(event->attr.inherit); /* * It must not have a pmu::count method, those are not * NMI safe. */ WARN_ON_ONCE(event->pmu->count); /* * If the event is currently on this CPU, its either a per-task event, * or local to this CPU. Furthermore it means its ACTIVE (otherwise * oncpu == -1). */ if (event->oncpu == smp_processor_id()) event->pmu->read(event); val = local64_read(&event->count); local_irq_restore(flags); return val; } static int perf_event_read(struct perf_event *event, bool group) { int ret = 0, cpu_to_read, local_cpu; /* * If event is enabled and currently active on a CPU, update the * value in the event structure: */ if (event->state == PERF_EVENT_STATE_ACTIVE) { struct perf_read_data data = { .event = event, .group = group, .ret = 0, }; local_cpu = get_cpu(); cpu_to_read = find_cpu_to_read(event, local_cpu); put_cpu(); /* * Purposely ignore the smp_call_function_single() return * value. * * If event->oncpu isn't a valid CPU it means the event got * scheduled out and that will have updated the event count. * * Therefore, either way, we'll have an up-to-date event count * after this. */ (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1); ret = data.ret; } else if (event->state == PERF_EVENT_STATE_INACTIVE) { struct perf_event_context *ctx = event->ctx; unsigned long flags; raw_spin_lock_irqsave(&ctx->lock, flags); /* * may read while context is not active * (e.g., thread is blocked), in that case * we cannot update context time */ if (ctx->is_active) { update_context_time(ctx); update_cgrp_time_from_event(event); } if (group) update_group_times(event); else update_event_times(event); raw_spin_unlock_irqrestore(&ctx->lock, flags); } return ret; } /* * Initialize the perf_event context in a task_struct: */ static void __perf_event_init_context(struct perf_event_context *ctx) { raw_spin_lock_init(&ctx->lock); mutex_init(&ctx->mutex); INIT_LIST_HEAD(&ctx->active_ctx_list); INIT_LIST_HEAD(&ctx->pinned_groups); INIT_LIST_HEAD(&ctx->flexible_groups); INIT_LIST_HEAD(&ctx->event_list); atomic_set(&ctx->refcount, 1); } static struct perf_event_context * alloc_perf_context(struct pmu *pmu, struct task_struct *task) { struct perf_event_context *ctx; ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); if (!ctx) return NULL; __perf_event_init_context(ctx); if (task) { ctx->task = task; get_task_struct(task); } ctx->pmu = pmu; return ctx; } static struct task_struct * find_lively_task_by_vpid(pid_t vpid) { struct task_struct *task; rcu_read_lock(); if (!vpid) task = current; else task = find_task_by_vpid(vpid); if (task) get_task_struct(task); rcu_read_unlock(); if (!task) return ERR_PTR(-ESRCH); return task; } /* * Returns a matching context with refcount and pincount. */ static struct perf_event_context * find_get_context(struct pmu *pmu, struct task_struct *task, struct perf_event *event) { struct perf_event_context *ctx, *clone_ctx = NULL; struct perf_cpu_context *cpuctx; void *task_ctx_data = NULL; unsigned long flags; int ctxn, err; int cpu = event->cpu; if (!task) { /* Must be root to operate on a CPU event: */ if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) return ERR_PTR(-EACCES); /* * We could be clever and allow to attach a event to an * offline CPU and activate it when the CPU comes up, but * that's for later. */ if (!cpu_online(cpu)) return ERR_PTR(-ENODEV); cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); ctx = &cpuctx->ctx; get_ctx(ctx); ++ctx->pin_count; return ctx; } err = -EINVAL; ctxn = pmu->task_ctx_nr; if (ctxn < 0) goto errout; if (event->attach_state & PERF_ATTACH_TASK_DATA) { task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL); if (!task_ctx_data) { err = -ENOMEM; goto errout; } } retry: ctx = perf_lock_task_context(task, ctxn, &flags); if (ctx) { clone_ctx = unclone_ctx(ctx); ++ctx->pin_count; if (task_ctx_data && !ctx->task_ctx_data) { ctx->task_ctx_data = task_ctx_data; task_ctx_data = NULL; } raw_spin_unlock_irqrestore(&ctx->lock, flags); if (clone_ctx) put_ctx(clone_ctx); } else { ctx = alloc_perf_context(pmu, task); err = -ENOMEM; if (!ctx) goto errout; if (task_ctx_data) { ctx->task_ctx_data = task_ctx_data; task_ctx_data = NULL; } err = 0; mutex_lock(&task->perf_event_mutex); /* * If it has already passed perf_event_exit_task(). * we must see PF_EXITING, it takes this mutex too. */ if (task->flags & PF_EXITING) err = -ESRCH; else if (task->perf_event_ctxp[ctxn]) err = -EAGAIN; else { get_ctx(ctx); ++ctx->pin_count; rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); } mutex_unlock(&task->perf_event_mutex); if (unlikely(err)) { put_ctx(ctx); if (err == -EAGAIN) goto retry; goto errout; } } kfree(task_ctx_data); return ctx; errout: kfree(task_ctx_data); return ERR_PTR(err); } static void perf_event_free_filter(struct perf_event *event); static void perf_event_free_bpf_prog(struct perf_event *event); static void free_event_rcu(struct rcu_head *head) { struct perf_event *event; event = container_of(head, struct perf_event, rcu_head); if (event->ns) put_pid_ns(event->ns); perf_event_free_filter(event); kfree(event); } static void ring_buffer_attach(struct perf_event *event, struct ring_buffer *rb); static void detach_sb_event(struct perf_event *event) { struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); raw_spin_lock(&pel->lock); list_del_rcu(&event->sb_list); raw_spin_unlock(&pel->lock); } static bool is_sb_event(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; if (event->parent) return false; if (event->attach_state & PERF_ATTACH_TASK) return false; if (attr->mmap || attr->mmap_data || attr->mmap2 || attr->comm || attr->comm_exec || attr->task || attr->context_switch) return true; return false; } static void unaccount_pmu_sb_event(struct perf_event *event) { if (is_sb_event(event)) detach_sb_event(event); } static void unaccount_event_cpu(struct perf_event *event, int cpu) { if (event->parent) return; if (is_cgroup_event(event)) atomic_dec(&per_cpu(perf_cgroup_events, cpu)); } #ifdef CONFIG_NO_HZ_FULL static DEFINE_SPINLOCK(nr_freq_lock); #endif static void unaccount_freq_event_nohz(void) { #ifdef CONFIG_NO_HZ_FULL spin_lock(&nr_freq_lock); if (atomic_dec_and_test(&nr_freq_events)) tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS); spin_unlock(&nr_freq_lock); #endif } static void unaccount_freq_event(void) { if (tick_nohz_full_enabled()) unaccount_freq_event_nohz(); else atomic_dec(&nr_freq_events); } static void unaccount_event(struct perf_event *event) { bool dec = false; if (event->parent) return; if (event->attach_state & PERF_ATTACH_TASK) dec = true; if (event->attr.mmap || event->attr.mmap_data) atomic_dec(&nr_mmap_events); if (event->attr.comm) atomic_dec(&nr_comm_events); if (event->attr.task) atomic_dec(&nr_task_events); if (event->attr.freq) unaccount_freq_event(); if (event->attr.context_switch) { dec = true; atomic_dec(&nr_switch_events); } if (is_cgroup_event(event)) dec = true; if (has_branch_stack(event)) dec = true; if (dec) { if (!atomic_add_unless(&perf_sched_count, -1, 1)) schedule_delayed_work(&perf_sched_work, HZ); } unaccount_event_cpu(event, event->cpu); unaccount_pmu_sb_event(event); } static void perf_sched_delayed(struct work_struct *work) { mutex_lock(&perf_sched_mutex); if (atomic_dec_and_test(&perf_sched_count)) static_branch_disable(&perf_sched_events); mutex_unlock(&perf_sched_mutex); } /* * The following implement mutual exclusion of events on "exclusive" pmus * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled * at a time, so we disallow creating events that might conflict, namely: * * 1) cpu-wide events in the presence of per-task events, * 2) per-task events in the presence of cpu-wide events, * 3) two matching events on the same context. * * The former two cases are handled in the allocation path (perf_event_alloc(), * _free_event()), the latter -- before the first perf_install_in_context(). */ static int exclusive_event_init(struct perf_event *event) { struct pmu *pmu = event->pmu; if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) return 0; /* * Prevent co-existence of per-task and cpu-wide events on the * same exclusive pmu. * * Negative pmu::exclusive_cnt means there are cpu-wide * events on this "exclusive" pmu, positive means there are * per-task events. * * Since this is called in perf_event_alloc() path, event::ctx * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK * to mean "per-task event", because unlike other attach states it * never gets cleared. */ if (event->attach_state & PERF_ATTACH_TASK) { if (!atomic_inc_unless_negative(&pmu->exclusive_cnt)) return -EBUSY; } else { if (!atomic_dec_unless_positive(&pmu->exclusive_cnt)) return -EBUSY; } return 0; } static void exclusive_event_destroy(struct perf_event *event) { struct pmu *pmu = event->pmu; if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) return; /* see comment in exclusive_event_init() */ if (event->attach_state & PERF_ATTACH_TASK) atomic_dec(&pmu->exclusive_cnt); else atomic_inc(&pmu->exclusive_cnt); } static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2) { if ((e1->pmu == e2->pmu) && (e1->cpu == e2->cpu || e1->cpu == -1 || e2->cpu == -1)) return true; return false; } /* Called under the same ctx::mutex as perf_install_in_context() */ static bool exclusive_event_installable(struct perf_event *event, struct perf_event_context *ctx) { struct perf_event *iter_event; struct pmu *pmu = event->pmu; if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) return true; list_for_each_entry(iter_event, &ctx->event_list, event_entry) { if (exclusive_event_match(iter_event, event)) return false; } return true; } static void perf_addr_filters_splice(struct perf_event *event, struct list_head *head); static void _free_event(struct perf_event *event) { irq_work_sync(&event->pending); unaccount_event(event); if (event->rb) { /* * Can happen when we close an event with re-directed output. * * Since we have a 0 refcount, perf_mmap_close() will skip * over us; possibly making our ring_buffer_put() the last. */ mutex_lock(&event->mmap_mutex); ring_buffer_attach(event, NULL); mutex_unlock(&event->mmap_mutex); } if (is_cgroup_event(event)) perf_detach_cgroup(event); if (!event->parent) { if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) put_callchain_buffers(); } perf_event_free_bpf_prog(event); perf_addr_filters_splice(event, NULL); kfree(event->addr_filters_offs); if (event->destroy) event->destroy(event); if (event->ctx) put_ctx(event->ctx); exclusive_event_destroy(event); module_put(event->pmu->module); call_rcu(&event->rcu_head, free_event_rcu); } /* * Used to free events which have a known refcount of 1, such as in error paths * where the event isn't exposed yet and inherited events. */ static void free_event(struct perf_event *event) { if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, "unexpected event refcount: %ld; ptr=%p\n", atomic_long_read(&event->refcount), event)) { /* leak to avoid use-after-free */ return; } _free_event(event); } /* * Remove user event from the owner task. */ static void perf_remove_from_owner(struct perf_event *event) { struct task_struct *owner; rcu_read_lock(); /* * Matches the smp_store_release() in perf_event_exit_task(). If we * observe !owner it means the list deletion is complete and we can * indeed free this event, otherwise we need to serialize on * owner->perf_event_mutex. */ owner = lockless_dereference(event->owner); if (owner) { /* * Since delayed_put_task_struct() also drops the last * task reference we can safely take a new reference * while holding the rcu_read_lock(). */ get_task_struct(owner); } rcu_read_unlock(); if (owner) { /* * If we're here through perf_event_exit_task() we're already * holding ctx->mutex which would be an inversion wrt. the * normal lock order. * * However we can safely take this lock because its the child * ctx->mutex. */ mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING); /* * We have to re-check the event->owner field, if it is cleared * we raced with perf_event_exit_task(), acquiring the mutex * ensured they're done, and we can proceed with freeing the * event. */ if (event->owner) { list_del_init(&event->owner_entry); smp_store_release(&event->owner, NULL); } mutex_unlock(&owner->perf_event_mutex); put_task_struct(owner); } } static void put_event(struct perf_event *event) { if (!atomic_long_dec_and_test(&event->refcount)) return; _free_event(event); } /* * Kill an event dead; while event:refcount will preserve the event * object, it will not preserve its functionality. Once the last 'user' * gives up the object, we'll destroy the thing. */ int perf_event_release_kernel(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; struct perf_event *child, *tmp; /* * If we got here through err_file: fput(event_file); we will not have * attached to a context yet. */ if (!ctx) { WARN_ON_ONCE(event->attach_state & (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP)); goto no_ctx; } if (!is_kernel_event(event)) perf_remove_from_owner(event); ctx = perf_event_ctx_lock(event); WARN_ON_ONCE(ctx->parent_ctx); perf_remove_from_context(event, DETACH_GROUP); raw_spin_lock_irq(&ctx->lock); /* * Mark this even as STATE_DEAD, there is no external reference to it * anymore. * * Anybody acquiring event->child_mutex after the below loop _must_ * also see this, most importantly inherit_event() which will avoid * placing more children on the list. * * Thus this guarantees that we will in fact observe and kill _ALL_ * child events. */ event->state = PERF_EVENT_STATE_DEAD; raw_spin_unlock_irq(&ctx->lock); perf_event_ctx_unlock(event, ctx); again: mutex_lock(&event->child_mutex); list_for_each_entry(child, &event->child_list, child_list) { /* * Cannot change, child events are not migrated, see the * comment with perf_event_ctx_lock_nested(). */ ctx = lockless_dereference(child->ctx); /* * Since child_mutex nests inside ctx::mutex, we must jump * through hoops. We start by grabbing a reference on the ctx. * * Since the event cannot get freed while we hold the * child_mutex, the context must also exist and have a !0 * reference count. */ get_ctx(ctx); /* * Now that we have a ctx ref, we can drop child_mutex, and * acquire ctx::mutex without fear of it going away. Then we * can re-acquire child_mutex. */ mutex_unlock(&event->child_mutex); mutex_lock(&ctx->mutex); mutex_lock(&event->child_mutex); /* * Now that we hold ctx::mutex and child_mutex, revalidate our * state, if child is still the first entry, it didn't get freed * and we can continue doing so. */ tmp = list_first_entry_or_null(&event->child_list, struct perf_event, child_list); if (tmp == child) { perf_remove_from_context(child, DETACH_GROUP); list_del(&child->child_list); free_event(child); /* * This matches the refcount bump in inherit_event(); * this can't be the last reference. */ put_event(event); } mutex_unlock(&event->child_mutex); mutex_unlock(&ctx->mutex); put_ctx(ctx); goto again; } mutex_unlock(&event->child_mutex); no_ctx: put_event(event); /* Must be the 'last' reference */ return 0; } EXPORT_SYMBOL_GPL(perf_event_release_kernel); /* * Called when the last reference to the file is gone. */ static int perf_release(struct inode *inode, struct file *file) { perf_event_release_kernel(file->private_data); return 0; } u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) { struct perf_event *child; u64 total = 0; *enabled = 0; *running = 0; mutex_lock(&event->child_mutex); (void)perf_event_read(event, false); total += perf_event_count(event); *enabled += event->total_time_enabled + atomic64_read(&event->child_total_time_enabled); *running += event->total_time_running + atomic64_read(&event->child_total_time_running); list_for_each_entry(child, &event->child_list, child_list) { (void)perf_event_read(child, false); total += perf_event_count(child); *enabled += child->total_time_enabled; *running += child->total_time_running; } mutex_unlock(&event->child_mutex); return total; } EXPORT_SYMBOL_GPL(perf_event_read_value); static int __perf_read_group_add(struct perf_event *leader, u64 read_format, u64 *values) { struct perf_event *sub; int n = 1; /* skip @nr */ int ret; ret = perf_event_read(leader, true); if (ret) return ret; /* * Since we co-schedule groups, {enabled,running} times of siblings * will be identical to those of the leader, so we only publish one * set. */ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { values[n++] += leader->total_time_enabled + atomic64_read(&leader->child_total_time_enabled); } if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { values[n++] += leader->total_time_running + atomic64_read(&leader->child_total_time_running); } /* * Write {count,id} tuples for every sibling. */ values[n++] += perf_event_count(leader); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(leader); list_for_each_entry(sub, &leader->sibling_list, group_entry) { values[n++] += perf_event_count(sub); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(sub); } return 0; } static int perf_read_group(struct perf_event *event, u64 read_format, char __user *buf) { struct perf_event *leader = event->group_leader, *child; struct perf_event_context *ctx = leader->ctx; int ret; u64 *values; lockdep_assert_held(&ctx->mutex); values = kzalloc(event->read_size, GFP_KERNEL); if (!values) return -ENOMEM; values[0] = 1 + leader->nr_siblings; /* * By locking the child_mutex of the leader we effectively * lock the child list of all siblings.. XXX explain how. */ mutex_lock(&leader->child_mutex); ret = __perf_read_group_add(leader, read_format, values); if (ret) goto unlock; list_for_each_entry(child, &leader->child_list, child_list) { ret = __perf_read_group_add(child, read_format, values); if (ret) goto unlock; } mutex_unlock(&leader->child_mutex); ret = event->read_size; if (copy_to_user(buf, values, event->read_size)) ret = -EFAULT; goto out; unlock: mutex_unlock(&leader->child_mutex); out: kfree(values); return ret; } static int perf_read_one(struct perf_event *event, u64 read_format, char __user *buf) { u64 enabled, running; u64 values[4]; int n = 0; values[n++] = perf_event_read_value(event, &enabled, &running); if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) values[n++] = enabled; if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) values[n++] = running; if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(event); if (copy_to_user(buf, values, n * sizeof(u64))) return -EFAULT; return n * sizeof(u64); } static bool is_event_hup(struct perf_event *event) { bool no_children; if (event->state > PERF_EVENT_STATE_EXIT) return false; mutex_lock(&event->child_mutex); no_children = list_empty(&event->child_list); mutex_unlock(&event->child_mutex); return no_children; } /* * Read the performance event - simple non blocking version for now */ static ssize_t __perf_read(struct perf_event *event, char __user *buf, size_t count) { u64 read_format = event->attr.read_format; int ret; /* * Return end-of-file for a read on a event that is in * error state (i.e. because it was pinned but it couldn't be * scheduled on to the CPU at some point). */ if (event->state == PERF_EVENT_STATE_ERROR) return 0; if (count < event->read_size) return -ENOSPC; WARN_ON_ONCE(event->ctx->parent_ctx); if (read_format & PERF_FORMAT_GROUP) ret = perf_read_group(event, read_format, buf); else ret = perf_read_one(event, read_format, buf); return ret; } static ssize_t perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct perf_event *event = file->private_data; struct perf_event_context *ctx; int ret; ctx = perf_event_ctx_lock(event); ret = __perf_read(event, buf, count); perf_event_ctx_unlock(event, ctx); return ret; } static unsigned int perf_poll(struct file *file, poll_table *wait) { struct perf_event *event = file->private_data; struct ring_buffer *rb; unsigned int events = POLLHUP; poll_wait(file, &event->waitq, wait); if (is_event_hup(event)) return events; /* * Pin the event->rb by taking event->mmap_mutex; otherwise * perf_event_set_output() can swizzle our rb and make us miss wakeups. */ mutex_lock(&event->mmap_mutex); rb = event->rb; if (rb) events = atomic_xchg(&rb->poll, 0); mutex_unlock(&event->mmap_mutex); return events; } static void _perf_event_reset(struct perf_event *event) { (void)perf_event_read(event, false); local64_set(&event->count, 0); perf_event_update_userpage(event); } /* * Holding the top-level event's child_mutex means that any * descendant process that has inherited this event will block * in perf_event_exit_event() if it goes to exit, thus satisfying the * task existence requirements of perf_event_enable/disable. */ static void perf_event_for_each_child(struct perf_event *event, void (*func)(struct perf_event *)) { struct perf_event *child; WARN_ON_ONCE(event->ctx->parent_ctx); mutex_lock(&event->child_mutex); func(event); list_for_each_entry(child, &event->child_list, child_list) func(child); mutex_unlock(&event->child_mutex); } static void perf_event_for_each(struct perf_event *event, void (*func)(struct perf_event *)) { struct perf_event_context *ctx = event->ctx; struct perf_event *sibling; lockdep_assert_held(&ctx->mutex); event = event->group_leader; perf_event_for_each_child(event, func); list_for_each_entry(sibling, &event->sibling_list, group_entry) perf_event_for_each_child(sibling, func); } static void __perf_event_period(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx, void *info) { u64 value = *((u64 *)info); bool active; if (event->attr.freq) { event->attr.sample_freq = value; } else { event->attr.sample_period = value; event->hw.sample_period = value; } active = (event->state == PERF_EVENT_STATE_ACTIVE); if (active) { perf_pmu_disable(ctx->pmu); /* * We could be throttled; unthrottle now to avoid the tick * trying to unthrottle while we already re-started the event. */ if (event->hw.interrupts == MAX_INTERRUPTS) { event->hw.interrupts = 0; perf_log_throttle(event, 1); } event->pmu->stop(event, PERF_EF_UPDATE); } local64_set(&event->hw.period_left, 0); if (active) { event->pmu->start(event, PERF_EF_RELOAD); perf_pmu_enable(ctx->pmu); } } static int perf_event_period(struct perf_event *event, u64 __user *arg) { u64 value; if (!is_sampling_event(event)) return -EINVAL; if (copy_from_user(&value, arg, sizeof(value))) return -EFAULT; if (!value) return -EINVAL; if (event->attr.freq && value > sysctl_perf_event_sample_rate) return -EINVAL; event_function_call(event, __perf_event_period, &value); return 0; } static const struct file_operations perf_fops; static inline int perf_fget_light(int fd, struct fd *p) { struct fd f = fdget(fd); if (!f.file) return -EBADF; if (f.file->f_op != &perf_fops) { fdput(f); return -EBADF; } *p = f; return 0; } static int perf_event_set_output(struct perf_event *event, struct perf_event *output_event); static int perf_event_set_filter(struct perf_event *event, void __user *arg); static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd); static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) { void (*func)(struct perf_event *); u32 flags = arg; switch (cmd) { case PERF_EVENT_IOC_ENABLE: func = _perf_event_enable; break; case PERF_EVENT_IOC_DISABLE: func = _perf_event_disable; break; case PERF_EVENT_IOC_RESET: func = _perf_event_reset; break; case PERF_EVENT_IOC_REFRESH: return _perf_event_refresh(event, arg); case PERF_EVENT_IOC_PERIOD: return perf_event_period(event, (u64 __user *)arg); case PERF_EVENT_IOC_ID: { u64 id = primary_event_id(event); if (copy_to_user((void __user *)arg, &id, sizeof(id))) return -EFAULT; return 0; } case PERF_EVENT_IOC_SET_OUTPUT: { int ret; if (arg != -1) { struct perf_event *output_event; struct fd output; ret = perf_fget_light(arg, &output); if (ret) return ret; output_event = output.file->private_data; ret = perf_event_set_output(event, output_event); fdput(output); } else { ret = perf_event_set_output(event, NULL); } return ret; } case PERF_EVENT_IOC_SET_FILTER: return perf_event_set_filter(event, (void __user *)arg); case PERF_EVENT_IOC_SET_BPF: return perf_event_set_bpf_prog(event, arg); case PERF_EVENT_IOC_PAUSE_OUTPUT: { struct ring_buffer *rb; rcu_read_lock(); rb = rcu_dereference(event->rb); if (!rb || !rb->nr_pages) { rcu_read_unlock(); return -EINVAL; } rb_toggle_paused(rb, !!arg); rcu_read_unlock(); return 0; } default: return -ENOTTY; } if (flags & PERF_IOC_FLAG_GROUP) perf_event_for_each(event, func); else perf_event_for_each_child(event, func); return 0; } static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct perf_event *event = file->private_data; struct perf_event_context *ctx; long ret; ctx = perf_event_ctx_lock(event); ret = _perf_ioctl(event, cmd, arg); perf_event_ctx_unlock(event, ctx); return ret; } #ifdef CONFIG_COMPAT static long perf_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (_IOC_NR(cmd)) { case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): case _IOC_NR(PERF_EVENT_IOC_ID): /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { cmd &= ~IOCSIZE_MASK; cmd |= sizeof(void *) << IOCSIZE_SHIFT; } break; } return perf_ioctl(file, cmd, arg); } #else # define perf_compat_ioctl NULL #endif int perf_event_task_enable(void) { struct perf_event_context *ctx; struct perf_event *event; mutex_lock(&current->perf_event_mutex); list_for_each_entry(event, &current->perf_event_list, owner_entry) { ctx = perf_event_ctx_lock(event); perf_event_for_each_child(event, _perf_event_enable); perf_event_ctx_unlock(event, ctx); } mutex_unlock(&current->perf_event_mutex); return 0; } int perf_event_task_disable(void) { struct perf_event_context *ctx; struct perf_event *event; mutex_lock(&current->perf_event_mutex); list_for_each_entry(event, &current->perf_event_list, owner_entry) { ctx = perf_event_ctx_lock(event); perf_event_for_each_child(event, _perf_event_disable); perf_event_ctx_unlock(event, ctx); } mutex_unlock(&current->perf_event_mutex); return 0; } static int perf_event_index(struct perf_event *event) { if (event->hw.state & PERF_HES_STOPPED) return 0; if (event->state != PERF_EVENT_STATE_ACTIVE) return 0; return event->pmu->event_idx(event); } static void calc_timer_values(struct perf_event *event, u64 *now, u64 *enabled, u64 *running) { u64 ctx_time; *now = perf_clock(); ctx_time = event->shadow_ctx_time + *now; *enabled = ctx_time - event->tstamp_enabled; *running = ctx_time - event->tstamp_running; } static void perf_event_init_userpage(struct perf_event *event) { struct perf_event_mmap_page *userpg; struct ring_buffer *rb; rcu_read_lock(); rb = rcu_dereference(event->rb); if (!rb) goto unlock; userpg = rb->user_page; /* Allow new userspace to detect that bit 0 is deprecated */ userpg->cap_bit0_is_deprecated = 1; userpg->size = offsetof(struct perf_event_mmap_page, __reserved); userpg->data_offset = PAGE_SIZE; userpg->data_size = perf_data_size(rb); unlock: rcu_read_unlock(); } void __weak arch_perf_update_userpage( struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) { } /* * Callers need to ensure there can be no nesting of this function, otherwise * the seqlock logic goes bad. We can not serialize this because the arch * code calls this from NMI context. */ void perf_event_update_userpage(struct perf_event *event) { struct perf_event_mmap_page *userpg; struct ring_buffer *rb; u64 enabled, running, now; rcu_read_lock(); rb = rcu_dereference(event->rb); if (!rb) goto unlock; /* * compute total_time_enabled, total_time_running * based on snapshot values taken when the event * was last scheduled in. * * we cannot simply called update_context_time() * because of locking issue as we can be called in * NMI context */ calc_timer_values(event, &now, &enabled, &running); userpg = rb->user_page; /* * Disable preemption so as to not let the corresponding user-space * spin too long if we get preempted. */ preempt_disable(); ++userpg->lock; barrier(); userpg->index = perf_event_index(event); userpg->offset = perf_event_count(event); if (userpg->index) userpg->offset -= local64_read(&event->hw.prev_count); userpg->time_enabled = enabled + atomic64_read(&event->child_total_time_enabled); userpg->time_running = running + atomic64_read(&event->child_total_time_running); arch_perf_update_userpage(event, userpg, now); barrier(); ++userpg->lock; preempt_enable(); unlock: rcu_read_unlock(); } static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct perf_event *event = vma->vm_file->private_data; struct ring_buffer *rb; int ret = VM_FAULT_SIGBUS; if (vmf->flags & FAULT_FLAG_MKWRITE) { if (vmf->pgoff == 0) ret = 0; return ret; } rcu_read_lock(); rb = rcu_dereference(event->rb); if (!rb) goto unlock; if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) goto unlock; vmf->page = perf_mmap_to_page(rb, vmf->pgoff); if (!vmf->page) goto unlock; get_page(vmf->page); vmf->page->mapping = vma->vm_file->f_mapping; vmf->page->index = vmf->pgoff; ret = 0; unlock: rcu_read_unlock(); return ret; } static void ring_buffer_attach(struct perf_event *event, struct ring_buffer *rb) { struct ring_buffer *old_rb = NULL; unsigned long flags; if (event->rb) { /* * Should be impossible, we set this when removing * event->rb_entry and wait/clear when adding event->rb_entry. */ WARN_ON_ONCE(event->rcu_pending); old_rb = event->rb; spin_lock_irqsave(&old_rb->event_lock, flags); list_del_rcu(&event->rb_entry); spin_unlock_irqrestore(&old_rb->event_lock, flags); event->rcu_batches = get_state_synchronize_rcu(); event->rcu_pending = 1; } if (rb) { if (event->rcu_pending) { cond_synchronize_rcu(event->rcu_batches); event->rcu_pending = 0; } spin_lock_irqsave(&rb->event_lock, flags); list_add_rcu(&event->rb_entry, &rb->event_list); spin_unlock_irqrestore(&rb->event_lock, flags); } /* * Avoid racing with perf_mmap_close(AUX): stop the event * before swizzling the event::rb pointer; if it's getting * unmapped, its aux_mmap_count will be 0 and it won't * restart. See the comment in __perf_pmu_output_stop(). * * Data will inevitably be lost when set_output is done in * mid-air, but then again, whoever does it like this is * not in for the data anyway. */ if (has_aux(event)) perf_event_stop(event, 0); rcu_assign_pointer(event->rb, rb); if (old_rb) { ring_buffer_put(old_rb); /* * Since we detached before setting the new rb, so that we * could attach the new rb, we could have missed a wakeup. * Provide it now. */ wake_up_all(&event->waitq); } } static void ring_buffer_wakeup(struct perf_event *event) { struct ring_buffer *rb; rcu_read_lock(); rb = rcu_dereference(event->rb); if (rb) { list_for_each_entry_rcu(event, &rb->event_list, rb_entry) wake_up_all(&event->waitq); } rcu_read_unlock(); } struct ring_buffer *ring_buffer_get(struct perf_event *event) { struct ring_buffer *rb; rcu_read_lock(); rb = rcu_dereference(event->rb); if (rb) { if (!atomic_inc_not_zero(&rb->refcount)) rb = NULL; } rcu_read_unlock(); return rb; } void ring_buffer_put(struct ring_buffer *rb) { if (!atomic_dec_and_test(&rb->refcount)) return; WARN_ON_ONCE(!list_empty(&rb->event_list)); call_rcu(&rb->rcu_head, rb_free_rcu); } static void perf_mmap_open(struct vm_area_struct *vma) { struct perf_event *event = vma->vm_file->private_data; atomic_inc(&event->mmap_count); atomic_inc(&event->rb->mmap_count); if (vma->vm_pgoff) atomic_inc(&event->rb->aux_mmap_count); if (event->pmu->event_mapped) event->pmu->event_mapped(event); } static void perf_pmu_output_stop(struct perf_event *event); /* * A buffer can be mmap()ed multiple times; either directly through the same * event, or through other events by use of perf_event_set_output(). * * In order to undo the VM accounting done by perf_mmap() we need to destroy * the buffer here, where we still have a VM context. This means we need * to detach all events redirecting to us. */ static void perf_mmap_close(struct vm_area_struct *vma) { struct perf_event *event = vma->vm_file->private_data; struct ring_buffer *rb = ring_buffer_get(event); struct user_struct *mmap_user = rb->mmap_user; int mmap_locked = rb->mmap_locked; unsigned long size = perf_data_size(rb); if (event->pmu->event_unmapped) event->pmu->event_unmapped(event); /* * rb->aux_mmap_count will always drop before rb->mmap_count and * event->mmap_count, so it is ok to use event->mmap_mutex to * serialize with perf_mmap here. */ if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { /* * Stop all AUX events that are writing to this buffer, * so that we can free its AUX pages and corresponding PMU * data. Note that after rb::aux_mmap_count dropped to zero, * they won't start any more (see perf_aux_output_begin()). */ perf_pmu_output_stop(event); /* now it's safe to free the pages */ atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm); vma->vm_mm->pinned_vm -= rb->aux_mmap_locked; /* this has to be the last one */ rb_free_aux(rb); WARN_ON_ONCE(atomic_read(&rb->aux_refcount)); mutex_unlock(&event->mmap_mutex); } atomic_dec(&rb->mmap_count); if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) goto out_put; ring_buffer_attach(event, NULL); mutex_unlock(&event->mmap_mutex); /* If there's still other mmap()s of this buffer, we're done. */ if (atomic_read(&rb->mmap_count)) goto out_put; /* * No other mmap()s, detach from all other events that might redirect * into the now unreachable buffer. Somewhat complicated by the * fact that rb::event_lock otherwise nests inside mmap_mutex. */ again: rcu_read_lock(); list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { if (!atomic_long_inc_not_zero(&event->refcount)) { /* * This event is en-route to free_event() which will * detach it and remove it from the list. */ continue; } rcu_read_unlock(); mutex_lock(&event->mmap_mutex); /* * Check we didn't race with perf_event_set_output() which can * swizzle the rb from under us while we were waiting to * acquire mmap_mutex. * * If we find a different rb; ignore this event, a next * iteration will no longer find it on the list. We have to * still restart the iteration to make sure we're not now * iterating the wrong list. */ if (event->rb == rb) ring_buffer_attach(event, NULL); mutex_unlock(&event->mmap_mutex); put_event(event); /* * Restart the iteration; either we're on the wrong list or * destroyed its integrity by doing a deletion. */ goto again; } rcu_read_unlock(); /* * It could be there's still a few 0-ref events on the list; they'll * get cleaned up by free_event() -- they'll also still have their * ref on the rb and will free it whenever they are done with it. * * Aside from that, this buffer is 'fully' detached and unmapped, * undo the VM accounting. */ atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); vma->vm_mm->pinned_vm -= mmap_locked; free_uid(mmap_user); out_put: ring_buffer_put(rb); /* could be last */ } static const struct vm_operations_struct perf_mmap_vmops = { .open = perf_mmap_open, .close = perf_mmap_close, /* non mergable */ .fault = perf_mmap_fault, .page_mkwrite = perf_mmap_fault, }; static int perf_mmap(struct file *file, struct vm_area_struct *vma) { struct perf_event *event = file->private_data; unsigned long user_locked, user_lock_limit; struct user_struct *user = current_user(); unsigned long locked, lock_limit; struct ring_buffer *rb = NULL; unsigned long vma_size; unsigned long nr_pages; long user_extra = 0, extra = 0; int ret = 0, flags = 0; /* * Don't allow mmap() of inherited per-task counters. This would * create a performance issue due to all children writing to the * same rb. */ if (event->cpu == -1 && event->attr.inherit) return -EINVAL; if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vma_size = vma->vm_end - vma->vm_start; if (vma->vm_pgoff == 0) { nr_pages = (vma_size / PAGE_SIZE) - 1; } else { /* * AUX area mapping: if rb->aux_nr_pages != 0, it's already * mapped, all subsequent mappings should have the same size * and offset. Must be above the normal perf buffer. */ u64 aux_offset, aux_size; if (!event->rb) return -EINVAL; nr_pages = vma_size / PAGE_SIZE; mutex_lock(&event->mmap_mutex); ret = -EINVAL; rb = event->rb; if (!rb) goto aux_unlock; aux_offset = ACCESS_ONCE(rb->user_page->aux_offset); aux_size = ACCESS_ONCE(rb->user_page->aux_size); if (aux_offset < perf_data_size(rb) + PAGE_SIZE) goto aux_unlock; if (aux_offset != vma->vm_pgoff << PAGE_SHIFT) goto aux_unlock; /* already mapped with a different offset */ if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff) goto aux_unlock; if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE) goto aux_unlock; /* already mapped with a different size */ if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages) goto aux_unlock; if (!is_power_of_2(nr_pages)) goto aux_unlock; if (!atomic_inc_not_zero(&rb->mmap_count)) goto aux_unlock; if (rb_has_aux(rb)) { atomic_inc(&rb->aux_mmap_count); ret = 0; goto unlock; } atomic_set(&rb->aux_mmap_count, 1); user_extra = nr_pages; goto accounting; } /* * If we have rb pages ensure they're a power-of-two number, so we * can do bitmasks instead of modulo. */ if (nr_pages != 0 && !is_power_of_2(nr_pages)) return -EINVAL; if (vma_size != PAGE_SIZE * (1 + nr_pages)) return -EINVAL; WARN_ON_ONCE(event->ctx->parent_ctx); again: mutex_lock(&event->mmap_mutex); if (event->rb) { if (event->rb->nr_pages != nr_pages) { ret = -EINVAL; goto unlock; } if (!atomic_inc_not_zero(&event->rb->mmap_count)) { /* * Raced against perf_mmap_close() through * perf_event_set_output(). Try again, hope for better * luck. */ mutex_unlock(&event->mmap_mutex); goto again; } goto unlock; } user_extra = nr_pages + 1; accounting: user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); /* * Increase the limit linearly with more CPUs: */ user_lock_limit *= num_online_cpus(); user_locked = atomic_long_read(&user->locked_vm) + user_extra; if (user_locked > user_lock_limit) extra = user_locked - user_lock_limit; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; locked = vma->vm_mm->pinned_vm + extra; if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && !capable(CAP_IPC_LOCK)) { ret = -EPERM; goto unlock; } WARN_ON(!rb && event->rb); if (vma->vm_flags & VM_WRITE) flags |= RING_BUFFER_WRITABLE; if (!rb) { rb = rb_alloc(nr_pages, event->attr.watermark ? event->attr.wakeup_watermark : 0, event->cpu, flags); if (!rb) { ret = -ENOMEM; goto unlock; } atomic_set(&rb->mmap_count, 1); rb->mmap_user = get_current_user(); rb->mmap_locked = extra; ring_buffer_attach(event, rb); perf_event_init_userpage(event); perf_event_update_userpage(event); } else { ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, event->attr.aux_watermark, flags); if (!ret) rb->aux_mmap_locked = extra; } unlock: if (!ret) { atomic_long_add(user_extra, &user->locked_vm); vma->vm_mm->pinned_vm += extra; atomic_inc(&event->mmap_count); } else if (rb) { atomic_dec(&rb->mmap_count); } aux_unlock: mutex_unlock(&event->mmap_mutex); /* * Since pinned accounting is per vm we cannot allow fork() to copy our * vma. */ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_ops = &perf_mmap_vmops; if (event->pmu->event_mapped) event->pmu->event_mapped(event); return ret; } static int perf_fasync(int fd, struct file *filp, int on) { struct inode *inode = file_inode(filp); struct perf_event *event = filp->private_data; int retval; inode_lock(inode); retval = fasync_helper(fd, filp, on, &event->fasync); inode_unlock(inode); if (retval < 0) return retval; return 0; } static const struct file_operations perf_fops = { .llseek = no_llseek, .release = perf_release, .read = perf_read, .poll = perf_poll, .unlocked_ioctl = perf_ioctl, .compat_ioctl = perf_compat_ioctl, .mmap = perf_mmap, .fasync = perf_fasync, }; /* * Perf event wakeup * * If there's data, ensure we set the poll() state and publish everything * to user-space before waking everybody up. */ static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) { /* only the parent has fasync state */ if (event->parent) event = event->parent; return &event->fasync; } void perf_event_wakeup(struct perf_event *event) { ring_buffer_wakeup(event); if (event->pending_kill) { kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); event->pending_kill = 0; } } static void perf_pending_event(struct irq_work *entry) { struct perf_event *event = container_of(entry, struct perf_event, pending); int rctx; rctx = perf_swevent_get_recursion_context(); /* * If we 'fail' here, that's OK, it means recursion is already disabled * and we won't recurse 'further'. */ if (event->pending_disable) { event->pending_disable = 0; perf_event_disable_local(event); } if (event->pending_wakeup) { event->pending_wakeup = 0; perf_event_wakeup(event); } if (rctx >= 0) perf_swevent_put_recursion_context(rctx); } /* * We assume there is only KVM supporting the callbacks. * Later on, we might change it to a list if there is * another virtualization implementation supporting the callbacks. */ struct perf_guest_info_callbacks *perf_guest_cbs; int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) { perf_guest_cbs = cbs; return 0; } EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) { perf_guest_cbs = NULL; return 0; } EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); static void perf_output_sample_regs(struct perf_output_handle *handle, struct pt_regs *regs, u64 mask) { int bit; DECLARE_BITMAP(_mask, 64); bitmap_from_u64(_mask, mask); for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) { u64 val; val = perf_reg_value(regs, bit); perf_output_put(handle, val); } } static void perf_sample_regs_user(struct perf_regs *regs_user, struct pt_regs *regs, struct pt_regs *regs_user_copy) { if (user_mode(regs)) { regs_user->abi = perf_reg_abi(current); regs_user->regs = regs; } else if (current->mm) { perf_get_regs_user(regs_user, regs, regs_user_copy); } else { regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; regs_user->regs = NULL; } } static void perf_sample_regs_intr(struct perf_regs *regs_intr, struct pt_regs *regs) { regs_intr->regs = regs; regs_intr->abi = perf_reg_abi(current); } /* * Get remaining task size from user stack pointer. * * It'd be better to take stack vma map and limit this more * precisly, but there's no way to get it safely under interrupt, * so using TASK_SIZE as limit. */ static u64 perf_ustack_task_size(struct pt_regs *regs) { unsigned long addr = perf_user_stack_pointer(regs); if (!addr || addr >= TASK_SIZE) return 0; return TASK_SIZE - addr; } static u16 perf_sample_ustack_size(u16 stack_size, u16 header_size, struct pt_regs *regs) { u64 task_size; /* No regs, no stack pointer, no dump. */ if (!regs) return 0; /* * Check if we fit in with the requested stack size into the: * - TASK_SIZE * If we don't, we limit the size to the TASK_SIZE. * * - remaining sample size * If we don't, we customize the stack size to * fit in to the remaining sample size. */ task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs)); stack_size = min(stack_size, (u16) task_size); /* Current header size plus static size and dynamic size. */ header_size += 2 * sizeof(u64); /* Do we fit in with the current stack dump size? */ if ((u16) (header_size + stack_size) < header_size) { /* * If we overflow the maximum size for the sample, * we customize the stack dump size to fit in. */ stack_size = USHRT_MAX - header_size - sizeof(u64); stack_size = round_up(stack_size, sizeof(u64)); } return stack_size; } static void perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, struct pt_regs *regs) { /* Case of a kernel thread, nothing to dump */ if (!regs) { u64 size = 0; perf_output_put(handle, size); } else { unsigned long sp; unsigned int rem; u64 dyn_size; /* * We dump: * static size * - the size requested by user or the best one we can fit * in to the sample max size * data * - user stack dump data * dynamic size * - the actual dumped size */ /* Static size. */ perf_output_put(handle, dump_size); /* Data. */ sp = perf_user_stack_pointer(regs); rem = __output_copy_user(handle, (void *) sp, dump_size); dyn_size = dump_size - rem; perf_output_skip(handle, rem); /* Dynamic size. */ perf_output_put(handle, dyn_size); } } static void __perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event) { u64 sample_type = event->attr.sample_type; data->type = sample_type; header->size += event->id_header_size; if (sample_type & PERF_SAMPLE_TID) { /* namespace issues */ data->tid_entry.pid = perf_event_pid(event, current); data->tid_entry.tid = perf_event_tid(event, current); } if (sample_type & PERF_SAMPLE_TIME) data->time = perf_event_clock(event); if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) data->id = primary_event_id(event); if (sample_type & PERF_SAMPLE_STREAM_ID) data->stream_id = event->id; if (sample_type & PERF_SAMPLE_CPU) { data->cpu_entry.cpu = raw_smp_processor_id(); data->cpu_entry.reserved = 0; } } void perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event) { if (event->attr.sample_id_all) __perf_event_header__init_id(header, data, event); } static void __perf_event__output_id_sample(struct perf_output_handle *handle, struct perf_sample_data *data) { u64 sample_type = data->type; if (sample_type & PERF_SAMPLE_TID) perf_output_put(handle, data->tid_entry); if (sample_type & PERF_SAMPLE_TIME) perf_output_put(handle, data->time); if (sample_type & PERF_SAMPLE_ID) perf_output_put(handle, data->id); if (sample_type & PERF_SAMPLE_STREAM_ID) perf_output_put(handle, data->stream_id); if (sample_type & PERF_SAMPLE_CPU) perf_output_put(handle, data->cpu_entry); if (sample_type & PERF_SAMPLE_IDENTIFIER) perf_output_put(handle, data->id); } void perf_event__output_id_sample(struct perf_event *event, struct perf_output_handle *handle, struct perf_sample_data *sample) { if (event->attr.sample_id_all) __perf_event__output_id_sample(handle, sample); } static void perf_output_read_one(struct perf_output_handle *handle, struct perf_event *event, u64 enabled, u64 running) { u64 read_format = event->attr.read_format; u64 values[4]; int n = 0; values[n++] = perf_event_count(event); if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { values[n++] = enabled + atomic64_read(&event->child_total_time_enabled); } if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { values[n++] = running + atomic64_read(&event->child_total_time_running); } if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(event); __output_copy(handle, values, n * sizeof(u64)); } /* * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. */ static void perf_output_read_group(struct perf_output_handle *handle, struct perf_event *event, u64 enabled, u64 running) { struct perf_event *leader = event->group_leader, *sub; u64 read_format = event->attr.read_format; u64 values[5]; int n = 0; values[n++] = 1 + leader->nr_siblings; if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) values[n++] = enabled; if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) values[n++] = running; if (leader != event) leader->pmu->read(leader); values[n++] = perf_event_count(leader); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(leader); __output_copy(handle, values, n * sizeof(u64)); list_for_each_entry(sub, &leader->sibling_list, group_entry) { n = 0; if ((sub != event) && (sub->state == PERF_EVENT_STATE_ACTIVE)) sub->pmu->read(sub); values[n++] = perf_event_count(sub); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(sub); __output_copy(handle, values, n * sizeof(u64)); } } #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ PERF_FORMAT_TOTAL_TIME_RUNNING) static void perf_output_read(struct perf_output_handle *handle, struct perf_event *event) { u64 enabled = 0, running = 0, now; u64 read_format = event->attr.read_format; /* * compute total_time_enabled, total_time_running * based on snapshot values taken when the event * was last scheduled in. * * we cannot simply called update_context_time() * because of locking issue as we are called in * NMI context */ if (read_format & PERF_FORMAT_TOTAL_TIMES) calc_timer_values(event, &now, &enabled, &running); if (event->attr.read_format & PERF_FORMAT_GROUP) perf_output_read_group(handle, event, enabled, running); else perf_output_read_one(handle, event, enabled, running); } void perf_output_sample(struct perf_output_handle *handle, struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event) { u64 sample_type = data->type; perf_output_put(handle, *header); if (sample_type & PERF_SAMPLE_IDENTIFIER) perf_output_put(handle, data->id); if (sample_type & PERF_SAMPLE_IP) perf_output_put(handle, data->ip); if (sample_type & PERF_SAMPLE_TID) perf_output_put(handle, data->tid_entry); if (sample_type & PERF_SAMPLE_TIME) perf_output_put(handle, data->time); if (sample_type & PERF_SAMPLE_ADDR) perf_output_put(handle, data->addr); if (sample_type & PERF_SAMPLE_ID) perf_output_put(handle, data->id); if (sample_type & PERF_SAMPLE_STREAM_ID) perf_output_put(handle, data->stream_id); if (sample_type & PERF_SAMPLE_CPU) perf_output_put(handle, data->cpu_entry); if (sample_type & PERF_SAMPLE_PERIOD) perf_output_put(handle, data->period); if (sample_type & PERF_SAMPLE_READ) perf_output_read(handle, event); if (sample_type & PERF_SAMPLE_CALLCHAIN) { if (data->callchain) { int size = 1; if (data->callchain) size += data->callchain->nr; size *= sizeof(u64); __output_copy(handle, data->callchain, size); } else { u64 nr = 0; perf_output_put(handle, nr); } } if (sample_type & PERF_SAMPLE_RAW) { struct perf_raw_record *raw = data->raw; if (raw) { struct perf_raw_frag *frag = &raw->frag; perf_output_put(handle, raw->size); do { if (frag->copy) { __output_custom(handle, frag->copy, frag->data, frag->size); } else { __output_copy(handle, frag->data, frag->size); } if (perf_raw_frag_last(frag)) break; frag = frag->next; } while (1); if (frag->pad) __output_skip(handle, NULL, frag->pad); } else { struct { u32 size; u32 data; } raw = { .size = sizeof(u32), .data = 0, }; perf_output_put(handle, raw); } } if (sample_type & PERF_SAMPLE_BRANCH_STACK) { if (data->br_stack) { size_t size; size = data->br_stack->nr * sizeof(struct perf_branch_entry); perf_output_put(handle, data->br_stack->nr); perf_output_copy(handle, data->br_stack->entries, size); } else { /* * we always store at least the value of nr */ u64 nr = 0; perf_output_put(handle, nr); } } if (sample_type & PERF_SAMPLE_REGS_USER) { u64 abi = data->regs_user.abi; /* * If there are no regs to dump, notice it through * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). */ perf_output_put(handle, abi); if (abi) { u64 mask = event->attr.sample_regs_user; perf_output_sample_regs(handle, data->regs_user.regs, mask); } } if (sample_type & PERF_SAMPLE_STACK_USER) { perf_output_sample_ustack(handle, data->stack_user_size, data->regs_user.regs); } if (sample_type & PERF_SAMPLE_WEIGHT) perf_output_put(handle, data->weight); if (sample_type & PERF_SAMPLE_DATA_SRC) perf_output_put(handle, data->data_src.val); if (sample_type & PERF_SAMPLE_TRANSACTION) perf_output_put(handle, data->txn); if (sample_type & PERF_SAMPLE_REGS_INTR) { u64 abi = data->regs_intr.abi; /* * If there are no regs to dump, notice it through * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). */ perf_output_put(handle, abi); if (abi) { u64 mask = event->attr.sample_regs_intr; perf_output_sample_regs(handle, data->regs_intr.regs, mask); } } if (!event->attr.watermark) { int wakeup_events = event->attr.wakeup_events; if (wakeup_events) { struct ring_buffer *rb = handle->rb; int events = local_inc_return(&rb->events); if (events >= wakeup_events) { local_sub(wakeup_events, &rb->events); local_inc(&rb->wakeup); } } } } void perf_prepare_sample(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event, struct pt_regs *regs) { u64 sample_type = event->attr.sample_type; header->type = PERF_RECORD_SAMPLE; header->size = sizeof(*header) + event->header_size; header->misc = 0; header->misc |= perf_misc_flags(regs); __perf_event_header__init_id(header, data, event); if (sample_type & PERF_SAMPLE_IP) data->ip = perf_instruction_pointer(regs); if (sample_type & PERF_SAMPLE_CALLCHAIN) { int size = 1; data->callchain = perf_callchain(event, regs); if (data->callchain) size += data->callchain->nr; header->size += size * sizeof(u64); } if (sample_type & PERF_SAMPLE_RAW) { struct perf_raw_record *raw = data->raw; int size; if (raw) { struct perf_raw_frag *frag = &raw->frag; u32 sum = 0; do { sum += frag->size; if (perf_raw_frag_last(frag)) break; frag = frag->next; } while (1); size = round_up(sum + sizeof(u32), sizeof(u64)); raw->size = size - sizeof(u32); frag->pad = raw->size - sum; } else { size = sizeof(u64); } header->size += size; } if (sample_type & PERF_SAMPLE_BRANCH_STACK) { int size = sizeof(u64); /* nr */ if (data->br_stack) { size += data->br_stack->nr * sizeof(struct perf_branch_entry); } header->size += size; } if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) perf_sample_regs_user(&data->regs_user, regs, &data->regs_user_copy); if (sample_type & PERF_SAMPLE_REGS_USER) { /* regs dump ABI info */ int size = sizeof(u64); if (data->regs_user.regs) { u64 mask = event->attr.sample_regs_user; size += hweight64(mask) * sizeof(u64); } header->size += size; } if (sample_type & PERF_SAMPLE_STACK_USER) { /* * Either we need PERF_SAMPLE_STACK_USER bit to be allways * processed as the last one or have additional check added * in case new sample type is added, because we could eat * up the rest of the sample size. */ u16 stack_size = event->attr.sample_stack_user; u16 size = sizeof(u64); stack_size = perf_sample_ustack_size(stack_size, header->size, data->regs_user.regs); /* * If there is something to dump, add space for the dump * itself and for the field that tells the dynamic size, * which is how many have been actually dumped. */ if (stack_size) size += sizeof(u64) + stack_size; data->stack_user_size = stack_size; header->size += size; } if (sample_type & PERF_SAMPLE_REGS_INTR) { /* regs dump ABI info */ int size = sizeof(u64); perf_sample_regs_intr(&data->regs_intr, regs); if (data->regs_intr.regs) { u64 mask = event->attr.sample_regs_intr; size += hweight64(mask) * sizeof(u64); } header->size += size; } } static void __always_inline __perf_event_output(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs, int (*output_begin)(struct perf_output_handle *, struct perf_event *, unsigned int)) { struct perf_output_handle handle; struct perf_event_header header; /* protect the callchain buffers */ rcu_read_lock(); perf_prepare_sample(&header, data, event, regs); if (output_begin(&handle, event, header.size)) goto exit; perf_output_sample(&handle, &header, data, event); perf_output_end(&handle); exit: rcu_read_unlock(); } void perf_event_output_forward(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { __perf_event_output(event, data, regs, perf_output_begin_forward); } void perf_event_output_backward(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { __perf_event_output(event, data, regs, perf_output_begin_backward); } void perf_event_output(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { __perf_event_output(event, data, regs, perf_output_begin); } /* * read event_id */ struct perf_read_event { struct perf_event_header header; u32 pid; u32 tid; }; static void perf_event_read_event(struct perf_event *event, struct task_struct *task) { struct perf_output_handle handle; struct perf_sample_data sample; struct perf_read_event read_event = { .header = { .type = PERF_RECORD_READ, .misc = 0, .size = sizeof(read_event) + event->read_size, }, .pid = perf_event_pid(event, task), .tid = perf_event_tid(event, task), }; int ret; perf_event_header__init_id(&read_event.header, &sample, event); ret = perf_output_begin(&handle, event, read_event.header.size); if (ret) return; perf_output_put(&handle, read_event); perf_output_read(&handle, event); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); } typedef void (perf_iterate_f)(struct perf_event *event, void *data); static void perf_iterate_ctx(struct perf_event_context *ctx, perf_iterate_f output, void *data, bool all) { struct perf_event *event; list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (!all) { if (event->state < PERF_EVENT_STATE_INACTIVE) continue; if (!event_filter_match(event)) continue; } output(event, data); } } static void perf_iterate_sb_cpu(perf_iterate_f output, void *data) { struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events); struct perf_event *event; list_for_each_entry_rcu(event, &pel->list, sb_list) { /* * Skip events that are not fully formed yet; ensure that * if we observe event->ctx, both event and ctx will be * complete enough. See perf_install_in_context(). */ if (!smp_load_acquire(&event->ctx)) continue; if (event->state < PERF_EVENT_STATE_INACTIVE) continue; if (!event_filter_match(event)) continue; output(event, data); } } /* * Iterate all events that need to receive side-band events. * * For new callers; ensure that account_pmu_sb_event() includes * your event, otherwise it might not get delivered. */ static void perf_iterate_sb(perf_iterate_f output, void *data, struct perf_event_context *task_ctx) { struct perf_event_context *ctx; int ctxn; rcu_read_lock(); preempt_disable(); /* * If we have task_ctx != NULL we only notify the task context itself. * The task_ctx is set only for EXIT events before releasing task * context. */ if (task_ctx) { perf_iterate_ctx(task_ctx, output, data, false); goto done; } perf_iterate_sb_cpu(output, data); for_each_task_context_nr(ctxn) { ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); if (ctx) perf_iterate_ctx(ctx, output, data, false); } done: preempt_enable(); rcu_read_unlock(); } /* * Clear all file-based filters at exec, they'll have to be * re-instated when/if these objects are mmapped again. */ static void perf_event_addr_filters_exec(struct perf_event *event, void *data) { struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); struct perf_addr_filter *filter; unsigned int restart = 0, count = 0; unsigned long flags; if (!has_addr_filter(event)) return; raw_spin_lock_irqsave(&ifh->lock, flags); list_for_each_entry(filter, &ifh->list, entry) { if (filter->inode) { event->addr_filters_offs[count] = 0; restart++; } count++; } if (restart) event->addr_filters_gen++; raw_spin_unlock_irqrestore(&ifh->lock, flags); if (restart) perf_event_stop(event, 1); } void perf_event_exec(void) { struct perf_event_context *ctx; int ctxn; rcu_read_lock(); for_each_task_context_nr(ctxn) { ctx = current->perf_event_ctxp[ctxn]; if (!ctx) continue; perf_event_enable_on_exec(ctxn); perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true); } rcu_read_unlock(); } struct remote_output { struct ring_buffer *rb; int err; }; static void __perf_event_output_stop(struct perf_event *event, void *data) { struct perf_event *parent = event->parent; struct remote_output *ro = data; struct ring_buffer *rb = ro->rb; struct stop_event_data sd = { .event = event, }; if (!has_aux(event)) return; if (!parent) parent = event; /* * In case of inheritance, it will be the parent that links to the * ring-buffer, but it will be the child that's actually using it. * * We are using event::rb to determine if the event should be stopped, * however this may race with ring_buffer_attach() (through set_output), * which will make us skip the event that actually needs to be stopped. * So ring_buffer_attach() has to stop an aux event before re-assigning * its rb pointer. */ if (rcu_dereference(parent->rb) == rb) ro->err = __perf_event_stop(&sd); } static int __perf_pmu_output_stop(void *info) { struct perf_event *event = info; struct pmu *pmu = event->pmu; struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); struct remote_output ro = { .rb = event->rb, }; rcu_read_lock(); perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false); if (cpuctx->task_ctx) perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop, &ro, false); rcu_read_unlock(); return ro.err; } static void perf_pmu_output_stop(struct perf_event *event) { struct perf_event *iter; int err, cpu; restart: rcu_read_lock(); list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { /* * For per-CPU events, we need to make sure that neither they * nor their children are running; for cpu==-1 events it's * sufficient to stop the event itself if it's active, since * it can't have children. */ cpu = iter->cpu; if (cpu == -1) cpu = READ_ONCE(iter->oncpu); if (cpu == -1) continue; err = cpu_function_call(cpu, __perf_pmu_output_stop, event); if (err == -EAGAIN) { rcu_read_unlock(); goto restart; } } rcu_read_unlock(); } /* * task tracking -- fork/exit * * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task */ struct perf_task_event { struct task_struct *task; struct perf_event_context *task_ctx; struct { struct perf_event_header header; u32 pid; u32 ppid; u32 tid; u32 ptid; u64 time; } event_id; }; static int perf_event_task_match(struct perf_event *event) { return event->attr.comm || event->attr.mmap || event->attr.mmap2 || event->attr.mmap_data || event->attr.task; } static void perf_event_task_output(struct perf_event *event, void *data) { struct perf_task_event *task_event = data; struct perf_output_handle handle; struct perf_sample_data sample; struct task_struct *task = task_event->task; int ret, size = task_event->event_id.header.size; if (!perf_event_task_match(event)) return; perf_event_header__init_id(&task_event->event_id.header, &sample, event); ret = perf_output_begin(&handle, event, task_event->event_id.header.size); if (ret) goto out; task_event->event_id.pid = perf_event_pid(event, task); task_event->event_id.ppid = perf_event_pid(event, current); task_event->event_id.tid = perf_event_tid(event, task); task_event->event_id.ptid = perf_event_tid(event, current); task_event->event_id.time = perf_event_clock(event); perf_output_put(&handle, task_event->event_id); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); out: task_event->event_id.header.size = size; } static void perf_event_task(struct task_struct *task, struct perf_event_context *task_ctx, int new) { struct perf_task_event task_event; if (!atomic_read(&nr_comm_events) && !atomic_read(&nr_mmap_events) && !atomic_read(&nr_task_events)) return; task_event = (struct perf_task_event){ .task = task, .task_ctx = task_ctx, .event_id = { .header = { .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, .misc = 0, .size = sizeof(task_event.event_id), }, /* .pid */ /* .ppid */ /* .tid */ /* .ptid */ /* .time */ }, }; perf_iterate_sb(perf_event_task_output, &task_event, task_ctx); } void perf_event_fork(struct task_struct *task) { perf_event_task(task, NULL, 1); } /* * comm tracking */ struct perf_comm_event { struct task_struct *task; char *comm; int comm_size; struct { struct perf_event_header header; u32 pid; u32 tid; } event_id; }; static int perf_event_comm_match(struct perf_event *event) { return event->attr.comm; } static void perf_event_comm_output(struct perf_event *event, void *data) { struct perf_comm_event *comm_event = data; struct perf_output_handle handle; struct perf_sample_data sample; int size = comm_event->event_id.header.size; int ret; if (!perf_event_comm_match(event)) return; perf_event_header__init_id(&comm_event->event_id.header, &sample, event); ret = perf_output_begin(&handle, event, comm_event->event_id.header.size); if (ret) goto out; comm_event->event_id.pid = perf_event_pid(event, comm_event->task); comm_event->event_id.tid = perf_event_tid(event, comm_event->task); perf_output_put(&handle, comm_event->event_id); __output_copy(&handle, comm_event->comm, comm_event->comm_size); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); out: comm_event->event_id.header.size = size; } static void perf_event_comm_event(struct perf_comm_event *comm_event) { char comm[TASK_COMM_LEN]; unsigned int size; memset(comm, 0, sizeof(comm)); strlcpy(comm, comm_event->task->comm, sizeof(comm)); size = ALIGN(strlen(comm)+1, sizeof(u64)); comm_event->comm = comm; comm_event->comm_size = size; comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; perf_iterate_sb(perf_event_comm_output, comm_event, NULL); } void perf_event_comm(struct task_struct *task, bool exec) { struct perf_comm_event comm_event; if (!atomic_read(&nr_comm_events)) return; comm_event = (struct perf_comm_event){ .task = task, /* .comm */ /* .comm_size */ .event_id = { .header = { .type = PERF_RECORD_COMM, .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0, /* .size */ }, /* .pid */ /* .tid */ }, }; perf_event_comm_event(&comm_event); } /* * mmap tracking */ struct perf_mmap_event { struct vm_area_struct *vma; const char *file_name; int file_size; int maj, min; u64 ino; u64 ino_generation; u32 prot, flags; struct { struct perf_event_header header; u32 pid; u32 tid; u64 start; u64 len; u64 pgoff; } event_id; }; static int perf_event_mmap_match(struct perf_event *event, void *data) { struct perf_mmap_event *mmap_event = data; struct vm_area_struct *vma = mmap_event->vma; int executable = vma->vm_flags & VM_EXEC; return (!executable && event->attr.mmap_data) || (executable && (event->attr.mmap || event->attr.mmap2)); } static void perf_event_mmap_output(struct perf_event *event, void *data) { struct perf_mmap_event *mmap_event = data; struct perf_output_handle handle; struct perf_sample_data sample; int size = mmap_event->event_id.header.size; int ret; if (!perf_event_mmap_match(event, data)) return; if (event->attr.mmap2) { mmap_event->event_id.header.type = PERF_RECORD_MMAP2; mmap_event->event_id.header.size += sizeof(mmap_event->maj); mmap_event->event_id.header.size += sizeof(mmap_event->min); mmap_event->event_id.header.size += sizeof(mmap_event->ino); mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation); mmap_event->event_id.header.size += sizeof(mmap_event->prot); mmap_event->event_id.header.size += sizeof(mmap_event->flags); } perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); ret = perf_output_begin(&handle, event, mmap_event->event_id.header.size); if (ret) goto out; mmap_event->event_id.pid = perf_event_pid(event, current); mmap_event->event_id.tid = perf_event_tid(event, current); perf_output_put(&handle, mmap_event->event_id); if (event->attr.mmap2) { perf_output_put(&handle, mmap_event->maj); perf_output_put(&handle, mmap_event->min); perf_output_put(&handle, mmap_event->ino); perf_output_put(&handle, mmap_event->ino_generation); perf_output_put(&handle, mmap_event->prot); perf_output_put(&handle, mmap_event->flags); } __output_copy(&handle, mmap_event->file_name, mmap_event->file_size); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); out: mmap_event->event_id.header.size = size; } static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) { struct vm_area_struct *vma = mmap_event->vma; struct file *file = vma->vm_file; int maj = 0, min = 0; u64 ino = 0, gen = 0; u32 prot = 0, flags = 0; unsigned int size; char tmp[16]; char *buf = NULL; char *name; if (file) { struct inode *inode; dev_t dev; buf = kmalloc(PATH_MAX, GFP_KERNEL); if (!buf) { name = "//enomem"; goto cpy_name; } /* * d_path() works from the end of the rb backwards, so we * need to add enough zero bytes after the string to handle * the 64bit alignment we do later. */ name = file_path(file, buf, PATH_MAX - sizeof(u64)); if (IS_ERR(name)) { name = "//toolong"; goto cpy_name; } inode = file_inode(vma->vm_file); dev = inode->i_sb->s_dev; ino = inode->i_ino; gen = inode->i_generation; maj = MAJOR(dev); min = MINOR(dev); if (vma->vm_flags & VM_READ) prot |= PROT_READ; if (vma->vm_flags & VM_WRITE) prot |= PROT_WRITE; if (vma->vm_flags & VM_EXEC) prot |= PROT_EXEC; if (vma->vm_flags & VM_MAYSHARE) flags = MAP_SHARED; else flags = MAP_PRIVATE; if (vma->vm_flags & VM_DENYWRITE) flags |= MAP_DENYWRITE; if (vma->vm_flags & VM_MAYEXEC) flags |= MAP_EXECUTABLE; if (vma->vm_flags & VM_LOCKED) flags |= MAP_LOCKED; if (vma->vm_flags & VM_HUGETLB) flags |= MAP_HUGETLB; goto got_name; } else { if (vma->vm_ops && vma->vm_ops->name) { name = (char *) vma->vm_ops->name(vma); if (name) goto cpy_name; } name = (char *)arch_vma_name(vma); if (name) goto cpy_name; if (vma->vm_start <= vma->vm_mm->start_brk && vma->vm_end >= vma->vm_mm->brk) { name = "[heap]"; goto cpy_name; } if (vma->vm_start <= vma->vm_mm->start_stack && vma->vm_end >= vma->vm_mm->start_stack) { name = "[stack]"; goto cpy_name; } name = "//anon"; goto cpy_name; } cpy_name: strlcpy(tmp, name, sizeof(tmp)); name = tmp; got_name: /* * Since our buffer works in 8 byte units we need to align our string * size to a multiple of 8. However, we must guarantee the tail end is * zero'd out to avoid leaking random bits to userspace. */ size = strlen(name)+1; while (!IS_ALIGNED(size, sizeof(u64))) name[size++] = '\0'; mmap_event->file_name = name; mmap_event->file_size = size; mmap_event->maj = maj; mmap_event->min = min; mmap_event->ino = ino; mmap_event->ino_generation = gen; mmap_event->prot = prot; mmap_event->flags = flags; if (!(vma->vm_flags & VM_EXEC)) mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA; mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; perf_iterate_sb(perf_event_mmap_output, mmap_event, NULL); kfree(buf); } /* * Check whether inode and address range match filter criteria. */ static bool perf_addr_filter_match(struct perf_addr_filter *filter, struct file *file, unsigned long offset, unsigned long size) { if (filter->inode != file_inode(file)) return false; if (filter->offset > offset + size) return false; if (filter->offset + filter->size < offset) return false; return true; } static void __perf_addr_filters_adjust(struct perf_event *event, void *data) { struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); struct vm_area_struct *vma = data; unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags; struct file *file = vma->vm_file; struct perf_addr_filter *filter; unsigned int restart = 0, count = 0; if (!has_addr_filter(event)) return; if (!file) return; raw_spin_lock_irqsave(&ifh->lock, flags); list_for_each_entry(filter, &ifh->list, entry) { if (perf_addr_filter_match(filter, file, off, vma->vm_end - vma->vm_start)) { event->addr_filters_offs[count] = vma->vm_start; restart++; } count++; } if (restart) event->addr_filters_gen++; raw_spin_unlock_irqrestore(&ifh->lock, flags); if (restart) perf_event_stop(event, 1); } /* * Adjust all task's events' filters to the new vma */ static void perf_addr_filters_adjust(struct vm_area_struct *vma) { struct perf_event_context *ctx; int ctxn; /* * Data tracing isn't supported yet and as such there is no need * to keep track of anything that isn't related to executable code: */ if (!(vma->vm_flags & VM_EXEC)) return; rcu_read_lock(); for_each_task_context_nr(ctxn) { ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); if (!ctx) continue; perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true); } rcu_read_unlock(); } void perf_event_mmap(struct vm_area_struct *vma) { struct perf_mmap_event mmap_event; if (!atomic_read(&nr_mmap_events)) return; mmap_event = (struct perf_mmap_event){ .vma = vma, /* .file_name */ /* .file_size */ .event_id = { .header = { .type = PERF_RECORD_MMAP, .misc = PERF_RECORD_MISC_USER, /* .size */ }, /* .pid */ /* .tid */ .start = vma->vm_start, .len = vma->vm_end - vma->vm_start, .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, }, /* .maj (attr_mmap2 only) */ /* .min (attr_mmap2 only) */ /* .ino (attr_mmap2 only) */ /* .ino_generation (attr_mmap2 only) */ /* .prot (attr_mmap2 only) */ /* .flags (attr_mmap2 only) */ }; perf_addr_filters_adjust(vma); perf_event_mmap_event(&mmap_event); } void perf_event_aux_event(struct perf_event *event, unsigned long head, unsigned long size, u64 flags) { struct perf_output_handle handle; struct perf_sample_data sample; struct perf_aux_event { struct perf_event_header header; u64 offset; u64 size; u64 flags; } rec = { .header = { .type = PERF_RECORD_AUX, .misc = 0, .size = sizeof(rec), }, .offset = head, .size = size, .flags = flags, }; int ret; perf_event_header__init_id(&rec.header, &sample, event); ret = perf_output_begin(&handle, event, rec.header.size); if (ret) return; perf_output_put(&handle, rec); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); } /* * Lost/dropped samples logging */ void perf_log_lost_samples(struct perf_event *event, u64 lost) { struct perf_output_handle handle; struct perf_sample_data sample; int ret; struct { struct perf_event_header header; u64 lost; } lost_samples_event = { .header = { .type = PERF_RECORD_LOST_SAMPLES, .misc = 0, .size = sizeof(lost_samples_event), }, .lost = lost, }; perf_event_header__init_id(&lost_samples_event.header, &sample, event); ret = perf_output_begin(&handle, event, lost_samples_event.header.size); if (ret) return; perf_output_put(&handle, lost_samples_event); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); } /* * context_switch tracking */ struct perf_switch_event { struct task_struct *task; struct task_struct *next_prev; struct { struct perf_event_header header; u32 next_prev_pid; u32 next_prev_tid; } event_id; }; static int perf_event_switch_match(struct perf_event *event) { return event->attr.context_switch; } static void perf_event_switch_output(struct perf_event *event, void *data) { struct perf_switch_event *se = data; struct perf_output_handle handle; struct perf_sample_data sample; int ret; if (!perf_event_switch_match(event)) return; /* Only CPU-wide events are allowed to see next/prev pid/tid */ if (event->ctx->task) { se->event_id.header.type = PERF_RECORD_SWITCH; se->event_id.header.size = sizeof(se->event_id.header); } else { se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE; se->event_id.header.size = sizeof(se->event_id); se->event_id.next_prev_pid = perf_event_pid(event, se->next_prev); se->event_id.next_prev_tid = perf_event_tid(event, se->next_prev); } perf_event_header__init_id(&se->event_id.header, &sample, event); ret = perf_output_begin(&handle, event, se->event_id.header.size); if (ret) return; if (event->ctx->task) perf_output_put(&handle, se->event_id.header); else perf_output_put(&handle, se->event_id); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); } static void perf_event_switch(struct task_struct *task, struct task_struct *next_prev, bool sched_in) { struct perf_switch_event switch_event; /* N.B. caller checks nr_switch_events != 0 */ switch_event = (struct perf_switch_event){ .task = task, .next_prev = next_prev, .event_id = { .header = { /* .type */ .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT, /* .size */ }, /* .next_prev_pid */ /* .next_prev_tid */ }, }; perf_iterate_sb(perf_event_switch_output, &switch_event, NULL); } /* * IRQ throttle logging */ static void perf_log_throttle(struct perf_event *event, int enable) { struct perf_output_handle handle; struct perf_sample_data sample; int ret; struct { struct perf_event_header header; u64 time; u64 id; u64 stream_id; } throttle_event = { .header = { .type = PERF_RECORD_THROTTLE, .misc = 0, .size = sizeof(throttle_event), }, .time = perf_event_clock(event), .id = primary_event_id(event), .stream_id = event->id, }; if (enable) throttle_event.header.type = PERF_RECORD_UNTHROTTLE; perf_event_header__init_id(&throttle_event.header, &sample, event); ret = perf_output_begin(&handle, event, throttle_event.header.size); if (ret) return; perf_output_put(&handle, throttle_event); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); } static void perf_log_itrace_start(struct perf_event *event) { struct perf_output_handle handle; struct perf_sample_data sample; struct perf_aux_event { struct perf_event_header header; u32 pid; u32 tid; } rec; int ret; if (event->parent) event = event->parent; if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || event->hw.itrace_started) return; rec.header.type = PERF_RECORD_ITRACE_START; rec.header.misc = 0; rec.header.size = sizeof(rec); rec.pid = perf_event_pid(event, current); rec.tid = perf_event_tid(event, current); perf_event_header__init_id(&rec.header, &sample, event); ret = perf_output_begin(&handle, event, rec.header.size); if (ret) return; perf_output_put(&handle, rec); perf_event__output_id_sample(event, &handle, &sample); perf_output_end(&handle); } /* * Generic event overflow handling, sampling. */ static int __perf_event_overflow(struct perf_event *event, int throttle, struct perf_sample_data *data, struct pt_regs *regs) { int events = atomic_read(&event->event_limit); struct hw_perf_event *hwc = &event->hw; u64 seq; int ret = 0; /* * Non-sampling counters might still use the PMI to fold short * hardware counters, ignore those. */ if (unlikely(!is_sampling_event(event))) return 0; seq = __this_cpu_read(perf_throttled_seq); if (seq != hwc->interrupts_seq) { hwc->interrupts_seq = seq; hwc->interrupts = 1; } else { hwc->interrupts++; if (unlikely(throttle && hwc->interrupts >= max_samples_per_tick)) { __this_cpu_inc(perf_throttled_count); tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS); hwc->interrupts = MAX_INTERRUPTS; perf_log_throttle(event, 0); ret = 1; } } if (event->attr.freq) { u64 now = perf_clock(); s64 delta = now - hwc->freq_time_stamp; hwc->freq_time_stamp = now; if (delta > 0 && delta < 2*TICK_NSEC) perf_adjust_period(event, delta, hwc->last_period, true); } /* * XXX event_limit might not quite work as expected on inherited * events */ event->pending_kill = POLL_IN; if (events && atomic_dec_and_test(&event->event_limit)) { ret = 1; event->pending_kill = POLL_HUP; perf_event_disable_inatomic(event); } READ_ONCE(event->overflow_handler)(event, data, regs); if (*perf_event_fasync(event) && event->pending_kill) { event->pending_wakeup = 1; irq_work_queue(&event->pending); } return ret; } int perf_event_overflow(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { return __perf_event_overflow(event, 1, data, regs); } /* * Generic software event infrastructure */ struct swevent_htable { struct swevent_hlist *swevent_hlist; struct mutex hlist_mutex; int hlist_refcount; /* Recursion avoidance in each contexts */ int recursion[PERF_NR_CONTEXTS]; }; static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); /* * We directly increment event->count and keep a second value in * event->hw.period_left to count intervals. This period event * is kept in the range [-sample_period, 0] so that we can use the * sign as trigger. */ u64 perf_swevent_set_period(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; u64 period = hwc->last_period; u64 nr, offset; s64 old, val; hwc->last_period = hwc->sample_period; again: old = val = local64_read(&hwc->period_left); if (val < 0) return 0; nr = div64_u64(period + val, period); offset = nr * period; val -= offset; if (local64_cmpxchg(&hwc->period_left, old, val) != old) goto again; return nr; } static void perf_swevent_overflow(struct perf_event *event, u64 overflow, struct perf_sample_data *data, struct pt_regs *regs) { struct hw_perf_event *hwc = &event->hw; int throttle = 0; if (!overflow) overflow = perf_swevent_set_period(event); if (hwc->interrupts == MAX_INTERRUPTS) return; for (; overflow; overflow--) { if (__perf_event_overflow(event, throttle, data, regs)) { /* * We inhibit the overflow from happening when * hwc->interrupts == MAX_INTERRUPTS. */ break; } throttle = 1; } } static void perf_swevent_event(struct perf_event *event, u64 nr, struct perf_sample_data *data, struct pt_regs *regs) { struct hw_perf_event *hwc = &event->hw; local64_add(nr, &event->count); if (!regs) return; if (!is_sampling_event(event)) return; if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { data->period = nr; return perf_swevent_overflow(event, 1, data, regs); } else data->period = event->hw.last_period; if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) return perf_swevent_overflow(event, 1, data, regs); if (local64_add_negative(nr, &hwc->period_left)) return; perf_swevent_overflow(event, 0, data, regs); } static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs) { if (event->hw.state & PERF_HES_STOPPED) return 1; if (regs) { if (event->attr.exclude_user && user_mode(regs)) return 1; if (event->attr.exclude_kernel && !user_mode(regs)) return 1; } return 0; } static int perf_swevent_match(struct perf_event *event, enum perf_type_id type, u32 event_id, struct perf_sample_data *data, struct pt_regs *regs) { if (event->attr.type != type) return 0; if (event->attr.config != event_id) return 0; if (perf_exclude_event(event, regs)) return 0; return 1; } static inline u64 swevent_hash(u64 type, u32 event_id) { u64 val = event_id | (type << 32); return hash_64(val, SWEVENT_HLIST_BITS); } static inline struct hlist_head * __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) { u64 hash = swevent_hash(type, event_id); return &hlist->heads[hash]; } /* For the read side: events when they trigger */ static inline struct hlist_head * find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) { struct swevent_hlist *hlist; hlist = rcu_dereference(swhash->swevent_hlist); if (!hlist) return NULL; return __find_swevent_head(hlist, type, event_id); } /* For the event head insertion and removal in the hlist */ static inline struct hlist_head * find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) { struct swevent_hlist *hlist; u32 event_id = event->attr.config; u64 type = event->attr.type; /* * Event scheduling is always serialized against hlist allocation * and release. Which makes the protected version suitable here. * The context lock guarantees that. */ hlist = rcu_dereference_protected(swhash->swevent_hlist, lockdep_is_held(&event->ctx->lock)); if (!hlist) return NULL; return __find_swevent_head(hlist, type, event_id); } static void do_perf_sw_event(enum perf_type_id type, u32 event_id, u64 nr, struct perf_sample_data *data, struct pt_regs *regs) { struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); struct perf_event *event; struct hlist_head *head; rcu_read_lock(); head = find_swevent_head_rcu(swhash, type, event_id); if (!head) goto end; hlist_for_each_entry_rcu(event, head, hlist_entry) { if (perf_swevent_match(event, type, event_id, data, regs)) perf_swevent_event(event, nr, data, regs); } end: rcu_read_unlock(); } DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]); int perf_swevent_get_recursion_context(void) { struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); return get_recursion_context(swhash->recursion); } EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); void perf_swevent_put_recursion_context(int rctx) { struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); put_recursion_context(swhash->recursion, rctx); } void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { struct perf_sample_data data; if (WARN_ON_ONCE(!regs)) return; perf_sample_data_init(&data, addr, 0); do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); } void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { int rctx; preempt_disable_notrace(); rctx = perf_swevent_get_recursion_context(); if (unlikely(rctx < 0)) goto fail; ___perf_sw_event(event_id, nr, regs, addr); perf_swevent_put_recursion_context(rctx); fail: preempt_enable_notrace(); } static void perf_swevent_read(struct perf_event *event) { } static int perf_swevent_add(struct perf_event *event, int flags) { struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); struct hw_perf_event *hwc = &event->hw; struct hlist_head *head; if (is_sampling_event(event)) { hwc->last_period = hwc->sample_period; perf_swevent_set_period(event); } hwc->state = !(flags & PERF_EF_START); head = find_swevent_head(swhash, event); if (WARN_ON_ONCE(!head)) return -EINVAL; hlist_add_head_rcu(&event->hlist_entry, head); perf_event_update_userpage(event); return 0; } static void perf_swevent_del(struct perf_event *event, int flags) { hlist_del_rcu(&event->hlist_entry); } static void perf_swevent_start(struct perf_event *event, int flags) { event->hw.state = 0; } static void perf_swevent_stop(struct perf_event *event, int flags) { event->hw.state = PERF_HES_STOPPED; } /* Deref the hlist from the update side */ static inline struct swevent_hlist * swevent_hlist_deref(struct swevent_htable *swhash) { return rcu_dereference_protected(swhash->swevent_hlist, lockdep_is_held(&swhash->hlist_mutex)); } static void swevent_hlist_release(struct swevent_htable *swhash) { struct swevent_hlist *hlist = swevent_hlist_deref(swhash); if (!hlist) return; RCU_INIT_POINTER(swhash->swevent_hlist, NULL); kfree_rcu(hlist, rcu_head); } static void swevent_hlist_put_cpu(int cpu) { struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); mutex_lock(&swhash->hlist_mutex); if (!--swhash->hlist_refcount) swevent_hlist_release(swhash); mutex_unlock(&swhash->hlist_mutex); } static void swevent_hlist_put(void) { int cpu; for_each_possible_cpu(cpu) swevent_hlist_put_cpu(cpu); } static int swevent_hlist_get_cpu(int cpu) { struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); int err = 0; mutex_lock(&swhash->hlist_mutex); if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { struct swevent_hlist *hlist; hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); if (!hlist) { err = -ENOMEM; goto exit; } rcu_assign_pointer(swhash->swevent_hlist, hlist); } swhash->hlist_refcount++; exit: mutex_unlock(&swhash->hlist_mutex); return err; } static int swevent_hlist_get(void) { int err, cpu, failed_cpu; get_online_cpus(); for_each_possible_cpu(cpu) { err = swevent_hlist_get_cpu(cpu); if (err) { failed_cpu = cpu; goto fail; } } put_online_cpus(); return 0; fail: for_each_possible_cpu(cpu) { if (cpu == failed_cpu) break; swevent_hlist_put_cpu(cpu); } put_online_cpus(); return err; } struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; static void sw_perf_event_destroy(struct perf_event *event) { u64 event_id = event->attr.config; WARN_ON(event->parent); static_key_slow_dec(&perf_swevent_enabled[event_id]); swevent_hlist_put(); } static int perf_swevent_init(struct perf_event *event) { u64 event_id = event->attr.config; if (event->attr.type != PERF_TYPE_SOFTWARE) return -ENOENT; /* * no branch sampling for software events */ if (has_branch_stack(event)) return -EOPNOTSUPP; switch (event_id) { case PERF_COUNT_SW_CPU_CLOCK: case PERF_COUNT_SW_TASK_CLOCK: return -ENOENT; default: break; } if (event_id >= PERF_COUNT_SW_MAX) return -ENOENT; if (!event->parent) { int err; err = swevent_hlist_get(); if (err) return err; static_key_slow_inc(&perf_swevent_enabled[event_id]); event->destroy = sw_perf_event_destroy; } return 0; } static struct pmu perf_swevent = { .task_ctx_nr = perf_sw_context, .capabilities = PERF_PMU_CAP_NO_NMI, .event_init = perf_swevent_init, .add = perf_swevent_add, .del = perf_swevent_del, .start = perf_swevent_start, .stop = perf_swevent_stop, .read = perf_swevent_read, }; #ifdef CONFIG_EVENT_TRACING static int perf_tp_filter_match(struct perf_event *event, struct perf_sample_data *data) { void *record = data->raw->frag.data; /* only top level events have filters set */ if (event->parent) event = event->parent; if (likely(!event->filter) || filter_match_preds(event->filter, record)) return 1; return 0; } static int perf_tp_event_match(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { if (event->hw.state & PERF_HES_STOPPED) return 0; /* * All tracepoints are from kernel-space. */ if (event->attr.exclude_kernel) return 0; if (!perf_tp_filter_match(event, data)) return 0; return 1; } void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, struct trace_event_call *call, u64 count, struct pt_regs *regs, struct hlist_head *head, struct task_struct *task) { struct bpf_prog *prog = call->prog; if (prog) { *(struct pt_regs **)raw_data = regs; if (!trace_call_bpf(prog, raw_data) || hlist_empty(head)) { perf_swevent_put_recursion_context(rctx); return; } } perf_tp_event(call->event.type, count, raw_data, size, regs, head, rctx, task); } EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit); void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, struct pt_regs *regs, struct hlist_head *head, int rctx, struct task_struct *task) { struct perf_sample_data data; struct perf_event *event; struct perf_raw_record raw = { .frag = { .size = entry_size, .data = record, }, }; perf_sample_data_init(&data, 0, 0); data.raw = &raw; perf_trace_buf_update(record, event_type); hlist_for_each_entry_rcu(event, head, hlist_entry) { if (perf_tp_event_match(event, &data, regs)) perf_swevent_event(event, count, &data, regs); } /* * If we got specified a target task, also iterate its context and * deliver this event there too. */ if (task && task != current) { struct perf_event_context *ctx; struct trace_entry *entry = record; rcu_read_lock(); ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); if (!ctx) goto unlock; list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (event->attr.type != PERF_TYPE_TRACEPOINT) continue; if (event->attr.config != entry->type) continue; if (perf_tp_event_match(event, &data, regs)) perf_swevent_event(event, count, &data, regs); } unlock: rcu_read_unlock(); } perf_swevent_put_recursion_context(rctx); } EXPORT_SYMBOL_GPL(perf_tp_event); static void tp_perf_event_destroy(struct perf_event *event) { perf_trace_destroy(event); } static int perf_tp_event_init(struct perf_event *event) { int err; if (event->attr.type != PERF_TYPE_TRACEPOINT) return -ENOENT; /* * no branch sampling for tracepoint events */ if (has_branch_stack(event)) return -EOPNOTSUPP; err = perf_trace_init(event); if (err) return err; event->destroy = tp_perf_event_destroy; return 0; } static struct pmu perf_tracepoint = { .task_ctx_nr = perf_sw_context, .event_init = perf_tp_event_init, .add = perf_trace_add, .del = perf_trace_del, .start = perf_swevent_start, .stop = perf_swevent_stop, .read = perf_swevent_read, }; static inline void perf_tp_register(void) { perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); } static void perf_event_free_filter(struct perf_event *event) { ftrace_profile_free_filter(event); } #ifdef CONFIG_BPF_SYSCALL static void bpf_overflow_handler(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { struct bpf_perf_event_data_kern ctx = { .data = data, .regs = regs, }; int ret = 0; preempt_disable(); if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) goto out; rcu_read_lock(); ret = BPF_PROG_RUN(event->prog, &ctx); rcu_read_unlock(); out: __this_cpu_dec(bpf_prog_active); preempt_enable(); if (!ret) return; event->orig_overflow_handler(event, data, regs); } static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) { struct bpf_prog *prog; if (event->overflow_handler_context) /* hw breakpoint or kernel counter */ return -EINVAL; if (event->prog) return -EEXIST; prog = bpf_prog_get_type(prog_fd, BPF_PROG_TYPE_PERF_EVENT); if (IS_ERR(prog)) return PTR_ERR(prog); event->prog = prog; event->orig_overflow_handler = READ_ONCE(event->overflow_handler); WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); return 0; } static void perf_event_free_bpf_handler(struct perf_event *event) { struct bpf_prog *prog = event->prog; if (!prog) return; WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler); event->prog = NULL; bpf_prog_put(prog); } #else static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) { return -EOPNOTSUPP; } static void perf_event_free_bpf_handler(struct perf_event *event) { } #endif static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) { bool is_kprobe, is_tracepoint; struct bpf_prog *prog; if (event->attr.type == PERF_TYPE_HARDWARE || event->attr.type == PERF_TYPE_SOFTWARE) return perf_event_set_bpf_handler(event, prog_fd); if (event->attr.type != PERF_TYPE_TRACEPOINT) return -EINVAL; if (event->tp_event->prog) return -EEXIST; is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE; is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; if (!is_kprobe && !is_tracepoint) /* bpf programs can only be attached to u/kprobe or tracepoint */ return -EINVAL; prog = bpf_prog_get(prog_fd); if (IS_ERR(prog)) return PTR_ERR(prog); if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) || (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT)) { /* valid fd, but invalid bpf program type */ bpf_prog_put(prog); return -EINVAL; } if (is_tracepoint) { int off = trace_event_get_offsets(event->tp_event); if (prog->aux->max_ctx_offset > off) { bpf_prog_put(prog); return -EACCES; } } event->tp_event->prog = prog; return 0; } static void perf_event_free_bpf_prog(struct perf_event *event) { struct bpf_prog *prog; perf_event_free_bpf_handler(event); if (!event->tp_event) return; prog = event->tp_event->prog; if (prog) { event->tp_event->prog = NULL; bpf_prog_put(prog); } } #else static inline void perf_tp_register(void) { } static void perf_event_free_filter(struct perf_event *event) { } static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) { return -ENOENT; } static void perf_event_free_bpf_prog(struct perf_event *event) { } #endif /* CONFIG_EVENT_TRACING */ #ifdef CONFIG_HAVE_HW_BREAKPOINT void perf_bp_event(struct perf_event *bp, void *data) { struct perf_sample_data sample; struct pt_regs *regs = data; perf_sample_data_init(&sample, bp->attr.bp_addr, 0); if (!bp->hw.state && !perf_exclude_event(bp, regs)) perf_swevent_event(bp, 1, &sample, regs); } #endif /* * Allocate a new address filter */ static struct perf_addr_filter * perf_addr_filter_new(struct perf_event *event, struct list_head *filters) { int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); struct perf_addr_filter *filter; filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node); if (!filter) return NULL; INIT_LIST_HEAD(&filter->entry); list_add_tail(&filter->entry, filters); return filter; } static void free_filters_list(struct list_head *filters) { struct perf_addr_filter *filter, *iter; list_for_each_entry_safe(filter, iter, filters, entry) { if (filter->inode) iput(filter->inode); list_del(&filter->entry); kfree(filter); } } /* * Free existing address filters and optionally install new ones */ static void perf_addr_filters_splice(struct perf_event *event, struct list_head *head) { unsigned long flags; LIST_HEAD(list); if (!has_addr_filter(event)) return; /* don't bother with children, they don't have their own filters */ if (event->parent) return; raw_spin_lock_irqsave(&event->addr_filters.lock, flags); list_splice_init(&event->addr_filters.list, &list); if (head) list_splice(head, &event->addr_filters.list); raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); free_filters_list(&list); } /* * Scan through mm's vmas and see if one of them matches the * @filter; if so, adjust filter's address range. * Called with mm::mmap_sem down for reading. */ static unsigned long perf_addr_filter_apply(struct perf_addr_filter *filter, struct mm_struct *mm) { struct vm_area_struct *vma; for (vma = mm->mmap; vma; vma = vma->vm_next) { struct file *file = vma->vm_file; unsigned long off = vma->vm_pgoff << PAGE_SHIFT; unsigned long vma_size = vma->vm_end - vma->vm_start; if (!file) continue; if (!perf_addr_filter_match(filter, file, off, vma_size)) continue; return vma->vm_start; } return 0; } /* * Update event's address range filters based on the * task's existing mappings, if any. */ static void perf_event_addr_filters_apply(struct perf_event *event) { struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); struct task_struct *task = READ_ONCE(event->ctx->task); struct perf_addr_filter *filter; struct mm_struct *mm = NULL; unsigned int count = 0; unsigned long flags; /* * We may observe TASK_TOMBSTONE, which means that the event tear-down * will stop on the parent's child_mutex that our caller is also holding */ if (task == TASK_TOMBSTONE) return; mm = get_task_mm(event->ctx->task); if (!mm) goto restart; down_read(&mm->mmap_sem); raw_spin_lock_irqsave(&ifh->lock, flags); list_for_each_entry(filter, &ifh->list, entry) { event->addr_filters_offs[count] = 0; /* * Adjust base offset if the filter is associated to a binary * that needs to be mapped: */ if (filter->inode) event->addr_filters_offs[count] = perf_addr_filter_apply(filter, mm); count++; } event->addr_filters_gen++; raw_spin_unlock_irqrestore(&ifh->lock, flags); up_read(&mm->mmap_sem); mmput(mm); restart: perf_event_stop(event, 1); } /* * Address range filtering: limiting the data to certain * instruction address ranges. Filters are ioctl()ed to us from * userspace as ascii strings. * * Filter string format: * * ACTION RANGE_SPEC * where ACTION is one of the * * "filter": limit the trace to this region * * "start": start tracing from this address * * "stop": stop tracing at this address/region; * RANGE_SPEC is * * for kernel addresses: <start address>[/<size>] * * for object files: <start address>[/<size>]@</path/to/object/file> * * if <size> is not specified, the range is treated as a single address. */ enum { IF_ACT_NONE = -1, IF_ACT_FILTER, IF_ACT_START, IF_ACT_STOP, IF_SRC_FILE, IF_SRC_KERNEL, IF_SRC_FILEADDR, IF_SRC_KERNELADDR, }; enum { IF_STATE_ACTION = 0, IF_STATE_SOURCE, IF_STATE_END, }; static const match_table_t if_tokens = { { IF_ACT_FILTER, "filter" }, { IF_ACT_START, "start" }, { IF_ACT_STOP, "stop" }, { IF_SRC_FILE, "%u/%u@%s" }, { IF_SRC_KERNEL, "%u/%u" }, { IF_SRC_FILEADDR, "%u@%s" }, { IF_SRC_KERNELADDR, "%u" }, { IF_ACT_NONE, NULL }, }; /* * Address filter string parser */ static int perf_event_parse_addr_filter(struct perf_event *event, char *fstr, struct list_head *filters) { struct perf_addr_filter *filter = NULL; char *start, *orig, *filename = NULL; struct path path; substring_t args[MAX_OPT_ARGS]; int state = IF_STATE_ACTION, token; unsigned int kernel = 0; int ret = -EINVAL; orig = fstr = kstrdup(fstr, GFP_KERNEL); if (!fstr) return -ENOMEM; while ((start = strsep(&fstr, " ,\n")) != NULL) { ret = -EINVAL; if (!*start) continue; /* filter definition begins */ if (state == IF_STATE_ACTION) { filter = perf_addr_filter_new(event, filters); if (!filter) goto fail; } token = match_token(start, if_tokens, args); switch (token) { case IF_ACT_FILTER: case IF_ACT_START: filter->filter = 1; case IF_ACT_STOP: if (state != IF_STATE_ACTION) goto fail; state = IF_STATE_SOURCE; break; case IF_SRC_KERNELADDR: case IF_SRC_KERNEL: kernel = 1; case IF_SRC_FILEADDR: case IF_SRC_FILE: if (state != IF_STATE_SOURCE) goto fail; if (token == IF_SRC_FILE || token == IF_SRC_KERNEL) filter->range = 1; *args[0].to = 0; ret = kstrtoul(args[0].from, 0, &filter->offset); if (ret) goto fail; if (filter->range) { *args[1].to = 0; ret = kstrtoul(args[1].from, 0, &filter->size); if (ret) goto fail; } if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) { int fpos = filter->range ? 2 : 1; filename = match_strdup(&args[fpos]); if (!filename) { ret = -ENOMEM; goto fail; } } state = IF_STATE_END; break; default: goto fail; } /* * Filter definition is fully parsed, validate and install it. * Make sure that it doesn't contradict itself or the event's * attribute. */ if (state == IF_STATE_END) { if (kernel && event->attr.exclude_kernel) goto fail; if (!kernel) { if (!filename) goto fail; /* look up the path and grab its inode */ ret = kern_path(filename, LOOKUP_FOLLOW, &path); if (ret) goto fail_free_name; filter->inode = igrab(d_inode(path.dentry)); path_put(&path); kfree(filename); filename = NULL; ret = -EINVAL; if (!filter->inode || !S_ISREG(filter->inode->i_mode)) /* free_filters_list() will iput() */ goto fail; } /* ready to consume more filters */ state = IF_STATE_ACTION; filter = NULL; } } if (state != IF_STATE_ACTION) goto fail; kfree(orig); return 0; fail_free_name: kfree(filename); fail: free_filters_list(filters); kfree(orig); return ret; } static int perf_event_set_addr_filter(struct perf_event *event, char *filter_str) { LIST_HEAD(filters); int ret; /* * Since this is called in perf_ioctl() path, we're already holding * ctx::mutex. */ lockdep_assert_held(&event->ctx->mutex); if (WARN_ON_ONCE(event->parent)) return -EINVAL; /* * For now, we only support filtering in per-task events; doing so * for CPU-wide events requires additional context switching trickery, * since same object code will be mapped at different virtual * addresses in different processes. */ if (!event->ctx->task) return -EOPNOTSUPP; ret = perf_event_parse_addr_filter(event, filter_str, &filters); if (ret) return ret; ret = event->pmu->addr_filters_validate(&filters); if (ret) { free_filters_list(&filters); return ret; } /* remove existing filters, if any */ perf_addr_filters_splice(event, &filters); /* install new filters */ perf_event_for_each_child(event, perf_event_addr_filters_apply); return ret; } static int perf_event_set_filter(struct perf_event *event, void __user *arg) { char *filter_str; int ret = -EINVAL; if ((event->attr.type != PERF_TYPE_TRACEPOINT || !IS_ENABLED(CONFIG_EVENT_TRACING)) && !has_addr_filter(event)) return -EINVAL; filter_str = strndup_user(arg, PAGE_SIZE); if (IS_ERR(filter_str)) return PTR_ERR(filter_str); if (IS_ENABLED(CONFIG_EVENT_TRACING) && event->attr.type == PERF_TYPE_TRACEPOINT) ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); else if (has_addr_filter(event)) ret = perf_event_set_addr_filter(event, filter_str); kfree(filter_str); return ret; } /* * hrtimer based swevent callback */ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) { enum hrtimer_restart ret = HRTIMER_RESTART; struct perf_sample_data data; struct pt_regs *regs; struct perf_event *event; u64 period; event = container_of(hrtimer, struct perf_event, hw.hrtimer); if (event->state != PERF_EVENT_STATE_ACTIVE) return HRTIMER_NORESTART; event->pmu->read(event); perf_sample_data_init(&data, 0, event->hw.last_period); regs = get_irq_regs(); if (regs && !perf_exclude_event(event, regs)) { if (!(event->attr.exclude_idle && is_idle_task(current))) if (__perf_event_overflow(event, 1, &data, regs)) ret = HRTIMER_NORESTART; } period = max_t(u64, 10000, event->hw.sample_period); hrtimer_forward_now(hrtimer, ns_to_ktime(period)); return ret; } static void perf_swevent_start_hrtimer(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; s64 period; if (!is_sampling_event(event)) return; period = local64_read(&hwc->period_left); if (period) { if (period < 0) period = 10000; local64_set(&hwc->period_left, 0); } else { period = max_t(u64, 10000, hwc->sample_period); } hrtimer_start(&hwc->hrtimer, ns_to_ktime(period), HRTIMER_MODE_REL_PINNED); } static void perf_swevent_cancel_hrtimer(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; if (is_sampling_event(event)) { ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); local64_set(&hwc->period_left, ktime_to_ns(remaining)); hrtimer_cancel(&hwc->hrtimer); } } static void perf_swevent_init_hrtimer(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; if (!is_sampling_event(event)) return; hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hwc->hrtimer.function = perf_swevent_hrtimer; /* * Since hrtimers have a fixed rate, we can do a static freq->period * mapping and avoid the whole period adjust feedback stuff. */ if (event->attr.freq) { long freq = event->attr.sample_freq; event->attr.sample_period = NSEC_PER_SEC / freq; hwc->sample_period = event->attr.sample_period; local64_set(&hwc->period_left, hwc->sample_period); hwc->last_period = hwc->sample_period; event->attr.freq = 0; } } /* * Software event: cpu wall time clock */ static void cpu_clock_event_update(struct perf_event *event) { s64 prev; u64 now; now = local_clock(); prev = local64_xchg(&event->hw.prev_count, now); local64_add(now - prev, &event->count); } static void cpu_clock_event_start(struct perf_event *event, int flags) { local64_set(&event->hw.prev_count, local_clock()); perf_swevent_start_hrtimer(event); } static void cpu_clock_event_stop(struct perf_event *event, int flags) { perf_swevent_cancel_hrtimer(event); cpu_clock_event_update(event); } static int cpu_clock_event_add(struct perf_event *event, int flags) { if (flags & PERF_EF_START) cpu_clock_event_start(event, flags); perf_event_update_userpage(event); return 0; } static void cpu_clock_event_del(struct perf_event *event, int flags) { cpu_clock_event_stop(event, flags); } static void cpu_clock_event_read(struct perf_event *event) { cpu_clock_event_update(event); } static int cpu_clock_event_init(struct perf_event *event) { if (event->attr.type != PERF_TYPE_SOFTWARE) return -ENOENT; if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) return -ENOENT; /* * no branch sampling for software events */ if (has_branch_stack(event)) return -EOPNOTSUPP; perf_swevent_init_hrtimer(event); return 0; } static struct pmu perf_cpu_clock = { .task_ctx_nr = perf_sw_context, .capabilities = PERF_PMU_CAP_NO_NMI, .event_init = cpu_clock_event_init, .add = cpu_clock_event_add, .del = cpu_clock_event_del, .start = cpu_clock_event_start, .stop = cpu_clock_event_stop, .read = cpu_clock_event_read, }; /* * Software event: task time clock */ static void task_clock_event_update(struct perf_event *event, u64 now) { u64 prev; s64 delta; prev = local64_xchg(&event->hw.prev_count, now); delta = now - prev; local64_add(delta, &event->count); } static void task_clock_event_start(struct perf_event *event, int flags) { local64_set(&event->hw.prev_count, event->ctx->time); perf_swevent_start_hrtimer(event); } static void task_clock_event_stop(struct perf_event *event, int flags) { perf_swevent_cancel_hrtimer(event); task_clock_event_update(event, event->ctx->time); } static int task_clock_event_add(struct perf_event *event, int flags) { if (flags & PERF_EF_START) task_clock_event_start(event, flags); perf_event_update_userpage(event); return 0; } static void task_clock_event_del(struct perf_event *event, int flags) { task_clock_event_stop(event, PERF_EF_UPDATE); } static void task_clock_event_read(struct perf_event *event) { u64 now = perf_clock(); u64 delta = now - event->ctx->timestamp; u64 time = event->ctx->time + delta; task_clock_event_update(event, time); } static int task_clock_event_init(struct perf_event *event) { if (event->attr.type != PERF_TYPE_SOFTWARE) return -ENOENT; if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) return -ENOENT; /* * no branch sampling for software events */ if (has_branch_stack(event)) return -EOPNOTSUPP; perf_swevent_init_hrtimer(event); return 0; } static struct pmu perf_task_clock = { .task_ctx_nr = perf_sw_context, .capabilities = PERF_PMU_CAP_NO_NMI, .event_init = task_clock_event_init, .add = task_clock_event_add, .del = task_clock_event_del, .start = task_clock_event_start, .stop = task_clock_event_stop, .read = task_clock_event_read, }; static void perf_pmu_nop_void(struct pmu *pmu) { } static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags) { } static int perf_pmu_nop_int(struct pmu *pmu) { return 0; } static DEFINE_PER_CPU(unsigned int, nop_txn_flags); static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) { __this_cpu_write(nop_txn_flags, flags); if (flags & ~PERF_PMU_TXN_ADD) return; perf_pmu_disable(pmu); } static int perf_pmu_commit_txn(struct pmu *pmu) { unsigned int flags = __this_cpu_read(nop_txn_flags); __this_cpu_write(nop_txn_flags, 0); if (flags & ~PERF_PMU_TXN_ADD) return 0; perf_pmu_enable(pmu); return 0; } static void perf_pmu_cancel_txn(struct pmu *pmu) { unsigned int flags = __this_cpu_read(nop_txn_flags); __this_cpu_write(nop_txn_flags, 0); if (flags & ~PERF_PMU_TXN_ADD) return; perf_pmu_enable(pmu); } static int perf_event_idx_default(struct perf_event *event) { return 0; } /* * Ensures all contexts with the same task_ctx_nr have the same * pmu_cpu_context too. */ static struct perf_cpu_context __percpu *find_pmu_context(int ctxn) { struct pmu *pmu; if (ctxn < 0) return NULL; list_for_each_entry(pmu, &pmus, entry) { if (pmu->task_ctx_nr == ctxn) return pmu->pmu_cpu_context; } return NULL; } static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) { int cpu; for_each_possible_cpu(cpu) { struct perf_cpu_context *cpuctx; cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); if (cpuctx->unique_pmu == old_pmu) cpuctx->unique_pmu = pmu; } } static void free_pmu_context(struct pmu *pmu) { struct pmu *i; mutex_lock(&pmus_lock); /* * Like a real lame refcount. */ list_for_each_entry(i, &pmus, entry) { if (i->pmu_cpu_context == pmu->pmu_cpu_context) { update_pmu_context(i, pmu); goto out; } } free_percpu(pmu->pmu_cpu_context); out: mutex_unlock(&pmus_lock); } /* * Let userspace know that this PMU supports address range filtering: */ static ssize_t nr_addr_filters_show(struct device *dev, struct device_attribute *attr, char *page) { struct pmu *pmu = dev_get_drvdata(dev); return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters); } DEVICE_ATTR_RO(nr_addr_filters); static struct idr pmu_idr; static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *page) { struct pmu *pmu = dev_get_drvdata(dev); return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); } static DEVICE_ATTR_RO(type); static ssize_t perf_event_mux_interval_ms_show(struct device *dev, struct device_attribute *attr, char *page) { struct pmu *pmu = dev_get_drvdata(dev); return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); } static DEFINE_MUTEX(mux_interval_mutex); static ssize_t perf_event_mux_interval_ms_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pmu *pmu = dev_get_drvdata(dev); int timer, cpu, ret; ret = kstrtoint(buf, 0, &timer); if (ret) return ret; if (timer < 1) return -EINVAL; /* same value, noting to do */ if (timer == pmu->hrtimer_interval_ms) return count; mutex_lock(&mux_interval_mutex); pmu->hrtimer_interval_ms = timer; /* update all cpuctx for this PMU */ get_online_cpus(); for_each_online_cpu(cpu) { struct perf_cpu_context *cpuctx; cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); cpu_function_call(cpu, (remote_function_f)perf_mux_hrtimer_restart, cpuctx); } put_online_cpus(); mutex_unlock(&mux_interval_mutex); return count; } static DEVICE_ATTR_RW(perf_event_mux_interval_ms); static struct attribute *pmu_dev_attrs[] = { &dev_attr_type.attr, &dev_attr_perf_event_mux_interval_ms.attr, NULL, }; ATTRIBUTE_GROUPS(pmu_dev); static int pmu_bus_running; static struct bus_type pmu_bus = { .name = "event_source", .dev_groups = pmu_dev_groups, }; static void pmu_dev_release(struct device *dev) { kfree(dev); } static int pmu_dev_alloc(struct pmu *pmu) { int ret = -ENOMEM; pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!pmu->dev) goto out; pmu->dev->groups = pmu->attr_groups; device_initialize(pmu->dev); ret = dev_set_name(pmu->dev, "%s", pmu->name); if (ret) goto free_dev; dev_set_drvdata(pmu->dev, pmu); pmu->dev->bus = &pmu_bus; pmu->dev->release = pmu_dev_release; ret = device_add(pmu->dev); if (ret) goto free_dev; /* For PMUs with address filters, throw in an extra attribute: */ if (pmu->nr_addr_filters) ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters); if (ret) goto del_dev; out: return ret; del_dev: device_del(pmu->dev); free_dev: put_device(pmu->dev); goto out; } static struct lock_class_key cpuctx_mutex; static struct lock_class_key cpuctx_lock; int perf_pmu_register(struct pmu *pmu, const char *name, int type) { int cpu, ret; mutex_lock(&pmus_lock); ret = -ENOMEM; pmu->pmu_disable_count = alloc_percpu(int); if (!pmu->pmu_disable_count) goto unlock; pmu->type = -1; if (!name) goto skip_type; pmu->name = name; if (type < 0) { type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL); if (type < 0) { ret = type; goto free_pdc; } } pmu->type = type; if (pmu_bus_running) { ret = pmu_dev_alloc(pmu); if (ret) goto free_idr; } skip_type: if (pmu->task_ctx_nr == perf_hw_context) { static int hw_context_taken = 0; /* * Other than systems with heterogeneous CPUs, it never makes * sense for two PMUs to share perf_hw_context. PMUs which are * uncore must use perf_invalid_context. */ if (WARN_ON_ONCE(hw_context_taken && !(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS))) pmu->task_ctx_nr = perf_invalid_context; hw_context_taken = 1; } pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); if (pmu->pmu_cpu_context) goto got_cpu_context; ret = -ENOMEM; pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); if (!pmu->pmu_cpu_context) goto free_dev; for_each_possible_cpu(cpu) { struct perf_cpu_context *cpuctx; cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); __perf_event_init_context(&cpuctx->ctx); lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); cpuctx->ctx.pmu = pmu; __perf_mux_hrtimer_init(cpuctx, cpu); cpuctx->unique_pmu = pmu; } got_cpu_context: if (!pmu->start_txn) { if (pmu->pmu_enable) { /* * If we have pmu_enable/pmu_disable calls, install * transaction stubs that use that to try and batch * hardware accesses. */ pmu->start_txn = perf_pmu_start_txn; pmu->commit_txn = perf_pmu_commit_txn; pmu->cancel_txn = perf_pmu_cancel_txn; } else { pmu->start_txn = perf_pmu_nop_txn; pmu->commit_txn = perf_pmu_nop_int; pmu->cancel_txn = perf_pmu_nop_void; } } if (!pmu->pmu_enable) { pmu->pmu_enable = perf_pmu_nop_void; pmu->pmu_disable = perf_pmu_nop_void; } if (!pmu->event_idx) pmu->event_idx = perf_event_idx_default; list_add_rcu(&pmu->entry, &pmus); atomic_set(&pmu->exclusive_cnt, 0); ret = 0; unlock: mutex_unlock(&pmus_lock); return ret; free_dev: device_del(pmu->dev); put_device(pmu->dev); free_idr: if (pmu->type >= PERF_TYPE_MAX) idr_remove(&pmu_idr, pmu->type); free_pdc: free_percpu(pmu->pmu_disable_count); goto unlock; } EXPORT_SYMBOL_GPL(perf_pmu_register); void perf_pmu_unregister(struct pmu *pmu) { int remove_device; mutex_lock(&pmus_lock); remove_device = pmu_bus_running; list_del_rcu(&pmu->entry); mutex_unlock(&pmus_lock); /* * We dereference the pmu list under both SRCU and regular RCU, so * synchronize against both of those. */ synchronize_srcu(&pmus_srcu); synchronize_rcu(); free_percpu(pmu->pmu_disable_count); if (pmu->type >= PERF_TYPE_MAX) idr_remove(&pmu_idr, pmu->type); if (remove_device) { if (pmu->nr_addr_filters) device_remove_file(pmu->dev, &dev_attr_nr_addr_filters); device_del(pmu->dev); put_device(pmu->dev); } free_pmu_context(pmu); } EXPORT_SYMBOL_GPL(perf_pmu_unregister); static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) { struct perf_event_context *ctx = NULL; int ret; if (!try_module_get(pmu->module)) return -ENODEV; if (event->group_leader != event) { /* * This ctx->mutex can nest when we're called through * inheritance. See the perf_event_ctx_lock_nested() comment. */ ctx = perf_event_ctx_lock_nested(event->group_leader, SINGLE_DEPTH_NESTING); BUG_ON(!ctx); } event->pmu = pmu; ret = pmu->event_init(event); if (ctx) perf_event_ctx_unlock(event->group_leader, ctx); if (ret) module_put(pmu->module); return ret; } static struct pmu *perf_init_event(struct perf_event *event) { struct pmu *pmu = NULL; int idx; int ret; idx = srcu_read_lock(&pmus_srcu); rcu_read_lock(); pmu = idr_find(&pmu_idr, event->attr.type); rcu_read_unlock(); if (pmu) { ret = perf_try_init_event(pmu, event); if (ret) pmu = ERR_PTR(ret); goto unlock; } list_for_each_entry_rcu(pmu, &pmus, entry) { ret = perf_try_init_event(pmu, event); if (!ret) goto unlock; if (ret != -ENOENT) { pmu = ERR_PTR(ret); goto unlock; } } pmu = ERR_PTR(-ENOENT); unlock: srcu_read_unlock(&pmus_srcu, idx); return pmu; } static void attach_sb_event(struct perf_event *event) { struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); raw_spin_lock(&pel->lock); list_add_rcu(&event->sb_list, &pel->list); raw_spin_unlock(&pel->lock); } /* * We keep a list of all !task (and therefore per-cpu) events * that need to receive side-band records. * * This avoids having to scan all the various PMU per-cpu contexts * looking for them. */ static void account_pmu_sb_event(struct perf_event *event) { if (is_sb_event(event)) attach_sb_event(event); } static void account_event_cpu(struct perf_event *event, int cpu) { if (event->parent) return; if (is_cgroup_event(event)) atomic_inc(&per_cpu(perf_cgroup_events, cpu)); } /* Freq events need the tick to stay alive (see perf_event_task_tick). */ static void account_freq_event_nohz(void) { #ifdef CONFIG_NO_HZ_FULL /* Lock so we don't race with concurrent unaccount */ spin_lock(&nr_freq_lock); if (atomic_inc_return(&nr_freq_events) == 1) tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS); spin_unlock(&nr_freq_lock); #endif } static void account_freq_event(void) { if (tick_nohz_full_enabled()) account_freq_event_nohz(); else atomic_inc(&nr_freq_events); } static void account_event(struct perf_event *event) { bool inc = false; if (event->parent) return; if (event->attach_state & PERF_ATTACH_TASK) inc = true; if (event->attr.mmap || event->attr.mmap_data) atomic_inc(&nr_mmap_events); if (event->attr.comm) atomic_inc(&nr_comm_events); if (event->attr.task) atomic_inc(&nr_task_events); if (event->attr.freq) account_freq_event(); if (event->attr.context_switch) { atomic_inc(&nr_switch_events); inc = true; } if (has_branch_stack(event)) inc = true; if (is_cgroup_event(event)) inc = true; if (inc) { if (atomic_inc_not_zero(&perf_sched_count)) goto enabled; mutex_lock(&perf_sched_mutex); if (!atomic_read(&perf_sched_count)) { static_branch_enable(&perf_sched_events); /* * Guarantee that all CPUs observe they key change and * call the perf scheduling hooks before proceeding to * install events that need them. */ synchronize_sched(); } /* * Now that we have waited for the sync_sched(), allow further * increments to by-pass the mutex. */ atomic_inc(&perf_sched_count); mutex_unlock(&perf_sched_mutex); } enabled: account_event_cpu(event, event->cpu); account_pmu_sb_event(event); } /* * Allocate and initialize a event structure */ static struct perf_event * perf_event_alloc(struct perf_event_attr *attr, int cpu, struct task_struct *task, struct perf_event *group_leader, struct perf_event *parent_event, perf_overflow_handler_t overflow_handler, void *context, int cgroup_fd) { struct pmu *pmu; struct perf_event *event; struct hw_perf_event *hwc; long err = -EINVAL; if ((unsigned)cpu >= nr_cpu_ids) { if (!task || cpu != -1) return ERR_PTR(-EINVAL); } event = kzalloc(sizeof(*event), GFP_KERNEL); if (!event) return ERR_PTR(-ENOMEM); /* * Single events are their own group leaders, with an * empty sibling list: */ if (!group_leader) group_leader = event; mutex_init(&event->child_mutex); INIT_LIST_HEAD(&event->child_list); INIT_LIST_HEAD(&event->group_entry); INIT_LIST_HEAD(&event->event_entry); INIT_LIST_HEAD(&event->sibling_list); INIT_LIST_HEAD(&event->rb_entry); INIT_LIST_HEAD(&event->active_entry); INIT_LIST_HEAD(&event->addr_filters.list); INIT_HLIST_NODE(&event->hlist_entry); init_waitqueue_head(&event->waitq); init_irq_work(&event->pending, perf_pending_event); mutex_init(&event->mmap_mutex); raw_spin_lock_init(&event->addr_filters.lock); atomic_long_set(&event->refcount, 1); event->cpu = cpu; event->attr = *attr; event->group_leader = group_leader; event->pmu = NULL; event->oncpu = -1; event->parent = parent_event; event->ns = get_pid_ns(task_active_pid_ns(current)); event->id = atomic64_inc_return(&perf_event_id); event->state = PERF_EVENT_STATE_INACTIVE; if (task) { event->attach_state = PERF_ATTACH_TASK; /* * XXX pmu::event_init needs to know what task to account to * and we cannot use the ctx information because we need the * pmu before we get a ctx. */ event->hw.target = task; } event->clock = &local_clock; if (parent_event) event->clock = parent_event->clock; if (!overflow_handler && parent_event) { overflow_handler = parent_event->overflow_handler; context = parent_event->overflow_handler_context; #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING) if (overflow_handler == bpf_overflow_handler) { struct bpf_prog *prog = bpf_prog_inc(parent_event->prog); if (IS_ERR(prog)) { err = PTR_ERR(prog); goto err_ns; } event->prog = prog; event->orig_overflow_handler = parent_event->orig_overflow_handler; } #endif } if (overflow_handler) { event->overflow_handler = overflow_handler; event->overflow_handler_context = context; } else if (is_write_backward(event)){ event->overflow_handler = perf_event_output_backward; event->overflow_handler_context = NULL; } else { event->overflow_handler = perf_event_output_forward; event->overflow_handler_context = NULL; } perf_event__state_init(event); pmu = NULL; hwc = &event->hw; hwc->sample_period = attr->sample_period; if (attr->freq && attr->sample_freq) hwc->sample_period = 1; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); /* * we currently do not support PERF_FORMAT_GROUP on inherited events */ if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) goto err_ns; if (!has_branch_stack(event)) event->attr.branch_sample_type = 0; if (cgroup_fd != -1) { err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); if (err) goto err_ns; } pmu = perf_init_event(event); if (!pmu) goto err_ns; else if (IS_ERR(pmu)) { err = PTR_ERR(pmu); goto err_ns; } err = exclusive_event_init(event); if (err) goto err_pmu; if (has_addr_filter(event)) { event->addr_filters_offs = kcalloc(pmu->nr_addr_filters, sizeof(unsigned long), GFP_KERNEL); if (!event->addr_filters_offs) goto err_per_task; /* force hw sync on the address filters */ event->addr_filters_gen = 1; } if (!event->parent) { if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { err = get_callchain_buffers(attr->sample_max_stack); if (err) goto err_addr_filters; } } /* symmetric to unaccount_event() in _free_event() */ account_event(event); return event; err_addr_filters: kfree(event->addr_filters_offs); err_per_task: exclusive_event_destroy(event); err_pmu: if (event->destroy) event->destroy(event); module_put(pmu->module); err_ns: if (is_cgroup_event(event)) perf_detach_cgroup(event); if (event->ns) put_pid_ns(event->ns); kfree(event); return ERR_PTR(err); } static int perf_copy_attr(struct perf_event_attr __user *uattr, struct perf_event_attr *attr) { u32 size; int ret; if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) return -EFAULT; /* * zero the full structure, so that a short copy will be nice. */ memset(attr, 0, sizeof(*attr)); ret = get_user(size, &uattr->size); if (ret) return ret; if (size > PAGE_SIZE) /* silly large */ goto err_size; if (!size) /* abi compat */ size = PERF_ATTR_SIZE_VER0; if (size < PERF_ATTR_SIZE_VER0) goto err_size; /* * If we're handed a bigger struct than we know of, * ensure all the unknown bits are 0 - i.e. new * user-space does not rely on any kernel feature * extensions we dont know about yet. */ if (size > sizeof(*attr)) { unsigned char __user *addr; unsigned char __user *end; unsigned char val; addr = (void __user *)uattr + sizeof(*attr); end = (void __user *)uattr + size; for (; addr < end; addr++) { ret = get_user(val, addr); if (ret) return ret; if (val) goto err_size; } size = sizeof(*attr); } ret = copy_from_user(attr, uattr, size); if (ret) return -EFAULT; if (attr->__reserved_1) return -EINVAL; if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) return -EINVAL; if (attr->read_format & ~(PERF_FORMAT_MAX-1)) return -EINVAL; if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) { u64 mask = attr->branch_sample_type; /* only using defined bits */ if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1)) return -EINVAL; /* at least one branch bit must be set */ if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL)) return -EINVAL; /* propagate priv level, when not set for branch */ if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) { /* exclude_kernel checked on syscall entry */ if (!attr->exclude_kernel) mask |= PERF_SAMPLE_BRANCH_KERNEL; if (!attr->exclude_user) mask |= PERF_SAMPLE_BRANCH_USER; if (!attr->exclude_hv) mask |= PERF_SAMPLE_BRANCH_HV; /* * adjust user setting (for HW filter setup) */ attr->branch_sample_type = mask; } /* privileged levels capture (kernel, hv): check permissions */ if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM) && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) return -EACCES; } if (attr->sample_type & PERF_SAMPLE_REGS_USER) { ret = perf_reg_validate(attr->sample_regs_user); if (ret) return ret; } if (attr->sample_type & PERF_SAMPLE_STACK_USER) { if (!arch_perf_have_user_stack_dump()) return -ENOSYS; /* * We have __u32 type for the size, but so far * we can only use __u16 as maximum due to the * __u16 sample size limit. */ if (attr->sample_stack_user >= USHRT_MAX) ret = -EINVAL; else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) ret = -EINVAL; } if (attr->sample_type & PERF_SAMPLE_REGS_INTR) ret = perf_reg_validate(attr->sample_regs_intr); out: return ret; err_size: put_user(sizeof(*attr), &uattr->size); ret = -E2BIG; goto out; } static int perf_event_set_output(struct perf_event *event, struct perf_event *output_event) { struct ring_buffer *rb = NULL; int ret = -EINVAL; if (!output_event) goto set; /* don't allow circular references */ if (event == output_event) goto out; /* * Don't allow cross-cpu buffers */ if (output_event->cpu != event->cpu) goto out; /* * If its not a per-cpu rb, it must be the same task. */ if (output_event->cpu == -1 && output_event->ctx != event->ctx) goto out; /* * Mixing clocks in the same buffer is trouble you don't need. */ if (output_event->clock != event->clock) goto out; /* * Either writing ring buffer from beginning or from end. * Mixing is not allowed. */ if (is_write_backward(output_event) != is_write_backward(event)) goto out; /* * If both events generate aux data, they must be on the same PMU */ if (has_aux(event) && has_aux(output_event) && event->pmu != output_event->pmu) goto out; set: mutex_lock(&event->mmap_mutex); /* Can't redirect output if we've got an active mmap() */ if (atomic_read(&event->mmap_count)) goto unlock; if (output_event) { /* get the rb we want to redirect to */ rb = ring_buffer_get(output_event); if (!rb) goto unlock; } ring_buffer_attach(event, rb); ret = 0; unlock: mutex_unlock(&event->mmap_mutex); out: return ret; } static void mutex_lock_double(struct mutex *a, struct mutex *b) { if (b < a) swap(a, b); mutex_lock(a); mutex_lock_nested(b, SINGLE_DEPTH_NESTING); } static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) { bool nmi_safe = false; switch (clk_id) { case CLOCK_MONOTONIC: event->clock = &ktime_get_mono_fast_ns; nmi_safe = true; break; case CLOCK_MONOTONIC_RAW: event->clock = &ktime_get_raw_fast_ns; nmi_safe = true; break; case CLOCK_REALTIME: event->clock = &ktime_get_real_ns; break; case CLOCK_BOOTTIME: event->clock = &ktime_get_boot_ns; break; case CLOCK_TAI: event->clock = &ktime_get_tai_ns; break; default: return -EINVAL; } if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) return -EINVAL; return 0; } /** * sys_perf_event_open - open a performance event, associate it to a task/cpu * * @attr_uptr: event_id type attributes for monitoring/sampling * @pid: target pid * @cpu: target cpu * @group_fd: group leader event fd */ SYSCALL_DEFINE5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) { struct perf_event *group_leader = NULL, *output_event = NULL; struct perf_event *event, *sibling; struct perf_event_attr attr; struct perf_event_context *ctx, *uninitialized_var(gctx); struct file *event_file = NULL; struct fd group = {NULL, 0}; struct task_struct *task = NULL; struct pmu *pmu; int event_fd; int move_group = 0; int err; int f_flags = O_RDWR; int cgroup_fd = -1; /* for future expandability... */ if (flags & ~PERF_FLAG_ALL) return -EINVAL; err = perf_copy_attr(attr_uptr, &attr); if (err) return err; if (!attr.exclude_kernel) { if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) return -EACCES; } if (attr.freq) { if (attr.sample_freq > sysctl_perf_event_sample_rate) return -EINVAL; } else { if (attr.sample_period & (1ULL << 63)) return -EINVAL; } if (!attr.sample_max_stack) attr.sample_max_stack = sysctl_perf_event_max_stack; /* * In cgroup mode, the pid argument is used to pass the fd * opened to the cgroup directory in cgroupfs. The cpu argument * designates the cpu on which to monitor threads from that * cgroup. */ if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) return -EINVAL; if (flags & PERF_FLAG_FD_CLOEXEC) f_flags |= O_CLOEXEC; event_fd = get_unused_fd_flags(f_flags); if (event_fd < 0) return event_fd; if (group_fd != -1) { err = perf_fget_light(group_fd, &group); if (err) goto err_fd; group_leader = group.file->private_data; if (flags & PERF_FLAG_FD_OUTPUT) output_event = group_leader; if (flags & PERF_FLAG_FD_NO_GROUP) group_leader = NULL; } if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { task = find_lively_task_by_vpid(pid); if (IS_ERR(task)) { err = PTR_ERR(task); goto err_group_fd; } } if (task && group_leader && group_leader->attr.inherit != attr.inherit) { err = -EINVAL; goto err_task; } get_online_cpus(); if (task) { err = mutex_lock_interruptible(&task->signal->cred_guard_mutex); if (err) goto err_cpus; /* * Reuse ptrace permission checks for now. * * We must hold cred_guard_mutex across this and any potential * perf_install_in_context() call for this new event to * serialize against exec() altering our credentials (and the * perf_event_exit_task() that could imply). */ err = -EACCES; if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) goto err_cred; } if (flags & PERF_FLAG_PID_CGROUP) cgroup_fd = pid; event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL, NULL, cgroup_fd); if (IS_ERR(event)) { err = PTR_ERR(event); goto err_cred; } if (is_sampling_event(event)) { if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { err = -EOPNOTSUPP; goto err_alloc; } } /* * Special case software events and allow them to be part of * any hardware group. */ pmu = event->pmu; if (attr.use_clockid) { err = perf_event_set_clock(event, attr.clockid); if (err) goto err_alloc; } if (pmu->task_ctx_nr == perf_sw_context) event->event_caps |= PERF_EV_CAP_SOFTWARE; if (group_leader && (is_software_event(event) != is_software_event(group_leader))) { if (is_software_event(event)) { /* * If event and group_leader are not both a software * event, and event is, then group leader is not. * * Allow the addition of software events to !software * groups, this is safe because software events never * fail to schedule. */ pmu = group_leader->pmu; } else if (is_software_event(group_leader) && (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) { /* * In case the group is a pure software group, and we * try to add a hardware event, move the whole group to * the hardware context. */ move_group = 1; } } /* * Get the target context (task or percpu): */ ctx = find_get_context(pmu, task, event); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto err_alloc; } if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) { err = -EBUSY; goto err_context; } /* * Look up the group leader (we will attach this event to it): */ if (group_leader) { err = -EINVAL; /* * Do not allow a recursive hierarchy (this new sibling * becoming part of another group-sibling): */ if (group_leader->group_leader != group_leader) goto err_context; /* All events in a group should have the same clock */ if (group_leader->clock != event->clock) goto err_context; /* * Do not allow to attach to a group in a different * task or CPU context: */ if (move_group) { /* * Make sure we're both on the same task, or both * per-cpu events. */ if (group_leader->ctx->task != ctx->task) goto err_context; /* * Make sure we're both events for the same CPU; * grouping events for different CPUs is broken; since * you can never concurrently schedule them anyhow. */ if (group_leader->cpu != event->cpu) goto err_context; } else { if (group_leader->ctx != ctx) goto err_context; } /* * Only a group leader can be exclusive or pinned */ if (attr.exclusive || attr.pinned) goto err_context; } if (output_event) { err = perf_event_set_output(event, output_event); if (err) goto err_context; } event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, f_flags); if (IS_ERR(event_file)) { err = PTR_ERR(event_file); event_file = NULL; goto err_context; } if (move_group) { gctx = group_leader->ctx; mutex_lock_double(&gctx->mutex, &ctx->mutex); if (gctx->task == TASK_TOMBSTONE) { err = -ESRCH; goto err_locked; } } else { mutex_lock(&ctx->mutex); } if (ctx->task == TASK_TOMBSTONE) { err = -ESRCH; goto err_locked; } if (!perf_event_validate_size(event)) { err = -E2BIG; goto err_locked; } /* * Must be under the same ctx::mutex as perf_install_in_context(), * because we need to serialize with concurrent event creation. */ if (!exclusive_event_installable(event, ctx)) { /* exclusive and group stuff are assumed mutually exclusive */ WARN_ON_ONCE(move_group); err = -EBUSY; goto err_locked; } WARN_ON_ONCE(ctx->parent_ctx); /* * This is the point on no return; we cannot fail hereafter. This is * where we start modifying current state. */ if (move_group) { /* * See perf_event_ctx_lock() for comments on the details * of swizzling perf_event::ctx. */ perf_remove_from_context(group_leader, 0); list_for_each_entry(sibling, &group_leader->sibling_list, group_entry) { perf_remove_from_context(sibling, 0); put_ctx(gctx); } /* * Wait for everybody to stop referencing the events through * the old lists, before installing it on new lists. */ synchronize_rcu(); /* * Install the group siblings before the group leader. * * Because a group leader will try and install the entire group * (through the sibling list, which is still in-tact), we can * end up with siblings installed in the wrong context. * * By installing siblings first we NO-OP because they're not * reachable through the group lists. */ list_for_each_entry(sibling, &group_leader->sibling_list, group_entry) { perf_event__state_init(sibling); perf_install_in_context(ctx, sibling, sibling->cpu); get_ctx(ctx); } /* * Removing from the context ends up with disabled * event. What we want here is event in the initial * startup state, ready to be add into new context. */ perf_event__state_init(group_leader); perf_install_in_context(ctx, group_leader, group_leader->cpu); get_ctx(ctx); /* * Now that all events are installed in @ctx, nothing * references @gctx anymore, so drop the last reference we have * on it. */ put_ctx(gctx); } /* * Precalculate sample_data sizes; do while holding ctx::mutex such * that we're serialized against further additions and before * perf_install_in_context() which is the point the event is active and * can use these values. */ perf_event__header_size(event); perf_event__id_header_size(event); event->owner = current; perf_install_in_context(ctx, event, event->cpu); perf_unpin_context(ctx); if (move_group) mutex_unlock(&gctx->mutex); mutex_unlock(&ctx->mutex); if (task) { mutex_unlock(&task->signal->cred_guard_mutex); put_task_struct(task); } put_online_cpus(); mutex_lock(&current->perf_event_mutex); list_add_tail(&event->owner_entry, &current->perf_event_list); mutex_unlock(&current->perf_event_mutex); /* * Drop the reference on the group_event after placing the * new event on the sibling_list. This ensures destruction * of the group leader will find the pointer to itself in * perf_group_detach(). */ fdput(group); fd_install(event_fd, event_file); return event_fd; err_locked: if (move_group) mutex_unlock(&gctx->mutex); mutex_unlock(&ctx->mutex); /* err_file: */ fput(event_file); err_context: perf_unpin_context(ctx); put_ctx(ctx); err_alloc: /* * If event_file is set, the fput() above will have called ->release() * and that will take care of freeing the event. */ if (!event_file) free_event(event); err_cred: if (task) mutex_unlock(&task->signal->cred_guard_mutex); err_cpus: put_online_cpus(); err_task: if (task) put_task_struct(task); err_group_fd: fdput(group); err_fd: put_unused_fd(event_fd); return err; } /** * perf_event_create_kernel_counter * * @attr: attributes of the counter to create * @cpu: cpu in which the counter is bound * @task: task to profile (NULL for percpu) */ struct perf_event * perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, struct task_struct *task, perf_overflow_handler_t overflow_handler, void *context) { struct perf_event_context *ctx; struct perf_event *event; int err; /* * Get the target context (task or percpu): */ event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler, context, -1); if (IS_ERR(event)) { err = PTR_ERR(event); goto err; } /* Mark owner so we could distinguish it from user events. */ event->owner = TASK_TOMBSTONE; ctx = find_get_context(event->pmu, task, event); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto err_free; } WARN_ON_ONCE(ctx->parent_ctx); mutex_lock(&ctx->mutex); if (ctx->task == TASK_TOMBSTONE) { err = -ESRCH; goto err_unlock; } if (!exclusive_event_installable(event, ctx)) { err = -EBUSY; goto err_unlock; } perf_install_in_context(ctx, event, cpu); perf_unpin_context(ctx); mutex_unlock(&ctx->mutex); return event; err_unlock: mutex_unlock(&ctx->mutex); perf_unpin_context(ctx); put_ctx(ctx); err_free: free_event(event); err: return ERR_PTR(err); } EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) { struct perf_event_context *src_ctx; struct perf_event_context *dst_ctx; struct perf_event *event, *tmp; LIST_HEAD(events); src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; /* * See perf_event_ctx_lock() for comments on the details * of swizzling perf_event::ctx. */ mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex); list_for_each_entry_safe(event, tmp, &src_ctx->event_list, event_entry) { perf_remove_from_context(event, 0); unaccount_event_cpu(event, src_cpu); put_ctx(src_ctx); list_add(&event->migrate_entry, &events); } /* * Wait for the events to quiesce before re-instating them. */ synchronize_rcu(); /* * Re-instate events in 2 passes. * * Skip over group leaders and only install siblings on this first * pass, siblings will not get enabled without a leader, however a * leader will enable its siblings, even if those are still on the old * context. */ list_for_each_entry_safe(event, tmp, &events, migrate_entry) { if (event->group_leader == event) continue; list_del(&event->migrate_entry); if (event->state >= PERF_EVENT_STATE_OFF) event->state = PERF_EVENT_STATE_INACTIVE; account_event_cpu(event, dst_cpu); perf_install_in_context(dst_ctx, event, dst_cpu); get_ctx(dst_ctx); } /* * Once all the siblings are setup properly, install the group leaders * to make it go. */ list_for_each_entry_safe(event, tmp, &events, migrate_entry) { list_del(&event->migrate_entry); if (event->state >= PERF_EVENT_STATE_OFF) event->state = PERF_EVENT_STATE_INACTIVE; account_event_cpu(event, dst_cpu); perf_install_in_context(dst_ctx, event, dst_cpu); get_ctx(dst_ctx); } mutex_unlock(&dst_ctx->mutex); mutex_unlock(&src_ctx->mutex); } EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); static void sync_child_event(struct perf_event *child_event, struct task_struct *child) { struct perf_event *parent_event = child_event->parent; u64 child_val; if (child_event->attr.inherit_stat) perf_event_read_event(child_event, child); child_val = perf_event_count(child_event); /* * Add back the child's count to the parent's count: */ atomic64_add(child_val, &parent_event->child_count); atomic64_add(child_event->total_time_enabled, &parent_event->child_total_time_enabled); atomic64_add(child_event->total_time_running, &parent_event->child_total_time_running); } static void perf_event_exit_event(struct perf_event *child_event, struct perf_event_context *child_ctx, struct task_struct *child) { struct perf_event *parent_event = child_event->parent; /* * Do not destroy the 'original' grouping; because of the context * switch optimization the original events could've ended up in a * random child task. * * If we were to destroy the original group, all group related * operations would cease to function properly after this random * child dies. * * Do destroy all inherited groups, we don't care about those * and being thorough is better. */ raw_spin_lock_irq(&child_ctx->lock); WARN_ON_ONCE(child_ctx->is_active); if (parent_event) perf_group_detach(child_event); list_del_event(child_event, child_ctx); child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */ raw_spin_unlock_irq(&child_ctx->lock); /* * Parent events are governed by their filedesc, retain them. */ if (!parent_event) { perf_event_wakeup(child_event); return; } /* * Child events can be cleaned up. */ sync_child_event(child_event, child); /* * Remove this event from the parent's list */ WARN_ON_ONCE(parent_event->ctx->parent_ctx); mutex_lock(&parent_event->child_mutex); list_del_init(&child_event->child_list); mutex_unlock(&parent_event->child_mutex); /* * Kick perf_poll() for is_event_hup(). */ perf_event_wakeup(parent_event); free_event(child_event); put_event(parent_event); } static void perf_event_exit_task_context(struct task_struct *child, int ctxn) { struct perf_event_context *child_ctx, *clone_ctx = NULL; struct perf_event *child_event, *next; WARN_ON_ONCE(child != current); child_ctx = perf_pin_task_context(child, ctxn); if (!child_ctx) return; /* * In order to reduce the amount of tricky in ctx tear-down, we hold * ctx::mutex over the entire thing. This serializes against almost * everything that wants to access the ctx. * * The exception is sys_perf_event_open() / * perf_event_create_kernel_count() which does find_get_context() * without ctx::mutex (it cannot because of the move_group double mutex * lock thing). See the comments in perf_install_in_context(). */ mutex_lock(&child_ctx->mutex); /* * In a single ctx::lock section, de-schedule the events and detach the * context from the task such that we cannot ever get it scheduled back * in. */ raw_spin_lock_irq(&child_ctx->lock); task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx); /* * Now that the context is inactive, destroy the task <-> ctx relation * and mark the context dead. */ RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL); put_ctx(child_ctx); /* cannot be last */ WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE); put_task_struct(current); /* cannot be last */ clone_ctx = unclone_ctx(child_ctx); raw_spin_unlock_irq(&child_ctx->lock); if (clone_ctx) put_ctx(clone_ctx); /* * Report the task dead after unscheduling the events so that we * won't get any samples after PERF_RECORD_EXIT. We can however still * get a few PERF_RECORD_READ events. */ perf_event_task(child, child_ctx, 0); list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) perf_event_exit_event(child_event, child_ctx, child); mutex_unlock(&child_ctx->mutex); put_ctx(child_ctx); } /* * When a child task exits, feed back event values to parent events. * * Can be called with cred_guard_mutex held when called from * install_exec_creds(). */ void perf_event_exit_task(struct task_struct *child) { struct perf_event *event, *tmp; int ctxn; mutex_lock(&child->perf_event_mutex); list_for_each_entry_safe(event, tmp, &child->perf_event_list, owner_entry) { list_del_init(&event->owner_entry); /* * Ensure the list deletion is visible before we clear * the owner, closes a race against perf_release() where * we need to serialize on the owner->perf_event_mutex. */ smp_store_release(&event->owner, NULL); } mutex_unlock(&child->perf_event_mutex); for_each_task_context_nr(ctxn) perf_event_exit_task_context(child, ctxn); /* * The perf_event_exit_task_context calls perf_event_task * with child's task_ctx, which generates EXIT events for * child contexts and sets child->perf_event_ctxp[] to NULL. * At this point we need to send EXIT events to cpu contexts. */ perf_event_task(child, NULL, 0); } static void perf_free_event(struct perf_event *event, struct perf_event_context *ctx) { struct perf_event *parent = event->parent; if (WARN_ON_ONCE(!parent)) return; mutex_lock(&parent->child_mutex); list_del_init(&event->child_list); mutex_unlock(&parent->child_mutex); put_event(parent); raw_spin_lock_irq(&ctx->lock); perf_group_detach(event); list_del_event(event, ctx); raw_spin_unlock_irq(&ctx->lock); free_event(event); } /* * Free an unexposed, unused context as created by inheritance by * perf_event_init_task below, used by fork() in case of fail. * * Not all locks are strictly required, but take them anyway to be nice and * help out with the lockdep assertions. */ void perf_event_free_task(struct task_struct *task) { struct perf_event_context *ctx; struct perf_event *event, *tmp; int ctxn; for_each_task_context_nr(ctxn) { ctx = task->perf_event_ctxp[ctxn]; if (!ctx) continue; mutex_lock(&ctx->mutex); again: list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) perf_free_event(event, ctx); list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) perf_free_event(event, ctx); if (!list_empty(&ctx->pinned_groups) || !list_empty(&ctx->flexible_groups)) goto again; mutex_unlock(&ctx->mutex); put_ctx(ctx); } } void perf_event_delayed_put(struct task_struct *task) { int ctxn; for_each_task_context_nr(ctxn) WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); } struct file *perf_event_get(unsigned int fd) { struct file *file; file = fget_raw(fd); if (!file) return ERR_PTR(-EBADF); if (file->f_op != &perf_fops) { fput(file); return ERR_PTR(-EBADF); } return file; } const struct perf_event_attr *perf_event_attrs(struct perf_event *event) { if (!event) return ERR_PTR(-EINVAL); return &event->attr; } /* * inherit a event from parent task to child task: */ static struct perf_event * inherit_event(struct perf_event *parent_event, struct task_struct *parent, struct perf_event_context *parent_ctx, struct task_struct *child, struct perf_event *group_leader, struct perf_event_context *child_ctx) { enum perf_event_active_state parent_state = parent_event->state; struct perf_event *child_event; unsigned long flags; /* * Instead of creating recursive hierarchies of events, * we link inherited events back to the original parent, * which has a filp for sure, which we use as the reference * count: */ if (parent_event->parent) parent_event = parent_event->parent; child_event = perf_event_alloc(&parent_event->attr, parent_event->cpu, child, group_leader, parent_event, NULL, NULL, -1); if (IS_ERR(child_event)) return child_event; /* * is_orphaned_event() and list_add_tail(&parent_event->child_list) * must be under the same lock in order to serialize against * perf_event_release_kernel(), such that either we must observe * is_orphaned_event() or they will observe us on the child_list. */ mutex_lock(&parent_event->child_mutex); if (is_orphaned_event(parent_event) || !atomic_long_inc_not_zero(&parent_event->refcount)) { mutex_unlock(&parent_event->child_mutex); free_event(child_event); return NULL; } get_ctx(child_ctx); /* * Make the child state follow the state of the parent event, * not its attr.disabled bit. We hold the parent's mutex, * so we won't race with perf_event_{en, dis}able_family. */ if (parent_state >= PERF_EVENT_STATE_INACTIVE) child_event->state = PERF_EVENT_STATE_INACTIVE; else child_event->state = PERF_EVENT_STATE_OFF; if (parent_event->attr.freq) { u64 sample_period = parent_event->hw.sample_period; struct hw_perf_event *hwc = &child_event->hw; hwc->sample_period = sample_period; hwc->last_period = sample_period; local64_set(&hwc->period_left, sample_period); } child_event->ctx = child_ctx; child_event->overflow_handler = parent_event->overflow_handler; child_event->overflow_handler_context = parent_event->overflow_handler_context; /* * Precalculate sample_data sizes */ perf_event__header_size(child_event); perf_event__id_header_size(child_event); /* * Link it up in the child's context: */ raw_spin_lock_irqsave(&child_ctx->lock, flags); add_event_to_ctx(child_event, child_ctx); raw_spin_unlock_irqrestore(&child_ctx->lock, flags); /* * Link this into the parent event's child list */ list_add_tail(&child_event->child_list, &parent_event->child_list); mutex_unlock(&parent_event->child_mutex); return child_event; } static int inherit_group(struct perf_event *parent_event, struct task_struct *parent, struct perf_event_context *parent_ctx, struct task_struct *child, struct perf_event_context *child_ctx) { struct perf_event *leader; struct perf_event *sub; struct perf_event *child_ctr; leader = inherit_event(parent_event, parent, parent_ctx, child, NULL, child_ctx); if (IS_ERR(leader)) return PTR_ERR(leader); list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { child_ctr = inherit_event(sub, parent, parent_ctx, child, leader, child_ctx); if (IS_ERR(child_ctr)) return PTR_ERR(child_ctr); } return 0; } static int inherit_task_group(struct perf_event *event, struct task_struct *parent, struct perf_event_context *parent_ctx, struct task_struct *child, int ctxn, int *inherited_all) { int ret; struct perf_event_context *child_ctx; if (!event->attr.inherit) { *inherited_all = 0; return 0; } child_ctx = child->perf_event_ctxp[ctxn]; if (!child_ctx) { /* * This is executed from the parent task context, so * inherit events that have been marked for cloning. * First allocate and initialize a context for the * child. */ child_ctx = alloc_perf_context(parent_ctx->pmu, child); if (!child_ctx) return -ENOMEM; child->perf_event_ctxp[ctxn] = child_ctx; } ret = inherit_group(event, parent, parent_ctx, child, child_ctx); if (ret) *inherited_all = 0; return ret; } /* * Initialize the perf_event context in task_struct */ static int perf_event_init_context(struct task_struct *child, int ctxn) { struct perf_event_context *child_ctx, *parent_ctx; struct perf_event_context *cloned_ctx; struct perf_event *event; struct task_struct *parent = current; int inherited_all = 1; unsigned long flags; int ret = 0; if (likely(!parent->perf_event_ctxp[ctxn])) return 0; /* * If the parent's context is a clone, pin it so it won't get * swapped under us. */ parent_ctx = perf_pin_task_context(parent, ctxn); if (!parent_ctx) return 0; /* * No need to check if parent_ctx != NULL here; since we saw * it non-NULL earlier, the only reason for it to become NULL * is if we exit, and since we're currently in the middle of * a fork we can't be exiting at the same time. */ /* * Lock the parent list. No need to lock the child - not PID * hashed yet and not running, so nobody can access it. */ mutex_lock(&parent_ctx->mutex); /* * We dont have to disable NMIs - we are only looking at * the list, not manipulating it: */ list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { ret = inherit_task_group(event, parent, parent_ctx, child, ctxn, &inherited_all); if (ret) break; } /* * We can't hold ctx->lock when iterating the ->flexible_group list due * to allocations, but we need to prevent rotation because * rotate_ctx() will change the list from interrupt context. */ raw_spin_lock_irqsave(&parent_ctx->lock, flags); parent_ctx->rotate_disable = 1; raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { ret = inherit_task_group(event, parent, parent_ctx, child, ctxn, &inherited_all); if (ret) break; } raw_spin_lock_irqsave(&parent_ctx->lock, flags); parent_ctx->rotate_disable = 0; child_ctx = child->perf_event_ctxp[ctxn]; if (child_ctx && inherited_all) { /* * Mark the child context as a clone of the parent * context, or of whatever the parent is a clone of. * * Note that if the parent is a clone, the holding of * parent_ctx->lock avoids it from being uncloned. */ cloned_ctx = parent_ctx->parent_ctx; if (cloned_ctx) { child_ctx->parent_ctx = cloned_ctx; child_ctx->parent_gen = parent_ctx->parent_gen; } else { child_ctx->parent_ctx = parent_ctx; child_ctx->parent_gen = parent_ctx->generation; } get_ctx(child_ctx->parent_ctx); } raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); mutex_unlock(&parent_ctx->mutex); perf_unpin_context(parent_ctx); put_ctx(parent_ctx); return ret; } /* * Initialize the perf_event context in task_struct */ int perf_event_init_task(struct task_struct *child) { int ctxn, ret; memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); mutex_init(&child->perf_event_mutex); INIT_LIST_HEAD(&child->perf_event_list); for_each_task_context_nr(ctxn) { ret = perf_event_init_context(child, ctxn); if (ret) { perf_event_free_task(child); return ret; } } return 0; } static void __init perf_event_init_all_cpus(void) { struct swevent_htable *swhash; int cpu; for_each_possible_cpu(cpu) { swhash = &per_cpu(swevent_htable, cpu); mutex_init(&swhash->hlist_mutex); INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu)); INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu)); raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu)); INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu)); } } int perf_event_init_cpu(unsigned int cpu) { struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); mutex_lock(&swhash->hlist_mutex); if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) { struct swevent_hlist *hlist; hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); WARN_ON(!hlist); rcu_assign_pointer(swhash->swevent_hlist, hlist); } mutex_unlock(&swhash->hlist_mutex); return 0; } #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE static void __perf_event_exit_context(void *__info) { struct perf_event_context *ctx = __info; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); struct perf_event *event; raw_spin_lock(&ctx->lock); list_for_each_entry(event, &ctx->event_list, event_entry) __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); raw_spin_unlock(&ctx->lock); } static void perf_event_exit_cpu_context(int cpu) { struct perf_event_context *ctx; struct pmu *pmu; int idx; idx = srcu_read_lock(&pmus_srcu); list_for_each_entry_rcu(pmu, &pmus, entry) { ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; mutex_lock(&ctx->mutex); smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); mutex_unlock(&ctx->mutex); } srcu_read_unlock(&pmus_srcu, idx); } #else static void perf_event_exit_cpu_context(int cpu) { } #endif int perf_event_exit_cpu(unsigned int cpu) { perf_event_exit_cpu_context(cpu); return 0; } static int perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) { int cpu; for_each_online_cpu(cpu) perf_event_exit_cpu(cpu); return NOTIFY_OK; } /* * Run the perf reboot notifier at the very last possible moment so that * the generic watchdog code runs as long as possible. */ static struct notifier_block perf_reboot_notifier = { .notifier_call = perf_reboot, .priority = INT_MIN, }; void __init perf_event_init(void) { int ret; idr_init(&pmu_idr); perf_event_init_all_cpus(); init_srcu_struct(&pmus_srcu); perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); perf_pmu_register(&perf_cpu_clock, NULL, -1); perf_pmu_register(&perf_task_clock, NULL, -1); perf_tp_register(); perf_event_init_cpu(smp_processor_id()); register_reboot_notifier(&perf_reboot_notifier); ret = init_hw_breakpoint(); WARN(ret, "hw_breakpoint initialization failed with: %d", ret); /* * Build time assertion that we keep the data_head at the intended * location. IOW, validation we got the __reserved[] size right. */ BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head)) != 1024); } ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, char *page) { struct perf_pmu_events_attr *pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); if (pmu_attr->event_str) return sprintf(page, "%s\n", pmu_attr->event_str); return 0; } EXPORT_SYMBOL_GPL(perf_event_sysfs_show); static int __init perf_event_sysfs_init(void) { struct pmu *pmu; int ret; mutex_lock(&pmus_lock); ret = bus_register(&pmu_bus); if (ret) goto unlock; list_for_each_entry(pmu, &pmus, entry) { if (!pmu->name || pmu->type < 0) continue; ret = pmu_dev_alloc(pmu); WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); } pmu_bus_running = 1; ret = 0; unlock: mutex_unlock(&pmus_lock); return ret; } device_initcall(perf_event_sysfs_init); #ifdef CONFIG_CGROUP_PERF static struct cgroup_subsys_state * perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) { struct perf_cgroup *jc; jc = kzalloc(sizeof(*jc), GFP_KERNEL); if (!jc) return ERR_PTR(-ENOMEM); jc->info = alloc_percpu(struct perf_cgroup_info); if (!jc->info) { kfree(jc); return ERR_PTR(-ENOMEM); } return &jc->css; } static void perf_cgroup_css_free(struct cgroup_subsys_state *css) { struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css); free_percpu(jc->info); kfree(jc); } static int __perf_cgroup_move(void *info) { struct task_struct *task = info; rcu_read_lock(); perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); rcu_read_unlock(); return 0; } static void perf_cgroup_attach(struct cgroup_taskset *tset) { struct task_struct *task; struct cgroup_subsys_state *css; cgroup_taskset_for_each(task, css, tset) task_function_call(task, __perf_cgroup_move, task); } struct cgroup_subsys perf_event_cgrp_subsys = { .css_alloc = perf_cgroup_css_alloc, .css_free = perf_cgroup_css_free, .attach = perf_cgroup_attach, }; #endif /* CONFIG_CGROUP_PERF */
./CrossVul/dataset_final_sorted/CWE-362/c/bad_3159_0
crossvul-cpp_data_good_2599_0
/* * platform.c - platform 'pseudo' bus for legacy devices * * Copyright (c) 2002-3 Patrick Mochel * Copyright (c) 2002-3 Open Source Development Labs * * This file is released under the GPLv2 * * Please see Documentation/driver-model/platform.txt for more * information. */ #include <linux/string.h> #include <linux/platform_device.h> #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/module.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/bootmem.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/pm_domain.h> #include <linux/idr.h> #include <linux/acpi.h> #include <linux/clk/clk-conf.h> #include <linux/limits.h> #include <linux/property.h> #include "base.h" #include "power/power.h" /* For automatically allocated device IDs */ static DEFINE_IDA(platform_devid_ida); struct device platform_bus = { .init_name = "platform", }; EXPORT_SYMBOL_GPL(platform_bus); /** * arch_setup_pdev_archdata - Allow manipulation of archdata before its used * @pdev: platform device * * This is called before platform_device_add() such that any pdev_archdata may * be setup before the platform_notifier is called. So if a user needs to * manipulate any relevant information in the pdev_archdata they can do: * * platform_device_alloc() * ... manipulate ... * platform_device_add() * * And if they don't care they can just call platform_device_register() and * everything will just work out. */ void __weak arch_setup_pdev_archdata(struct platform_device *pdev) { } /** * platform_get_resource - get a resource for a device * @dev: platform device * @type: resource type * @num: resource index */ struct resource *platform_get_resource(struct platform_device *dev, unsigned int type, unsigned int num) { int i; for (i = 0; i < dev->num_resources; i++) { struct resource *r = &dev->resource[i]; if (type == resource_type(r) && num-- == 0) return r; } return NULL; } EXPORT_SYMBOL_GPL(platform_get_resource); /** * platform_get_irq - get an IRQ for a device * @dev: platform device * @num: IRQ number index */ int platform_get_irq(struct platform_device *dev, unsigned int num) { #ifdef CONFIG_SPARC /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ if (!dev || num >= dev->archdata.num_irqs) return -ENXIO; return dev->archdata.irqs[num]; #else struct resource *r; if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { int ret; ret = of_irq_get(dev->dev.of_node, num); if (ret > 0 || ret == -EPROBE_DEFER) return ret; } r = platform_get_resource(dev, IORESOURCE_IRQ, num); if (has_acpi_companion(&dev->dev)) { if (r && r->flags & IORESOURCE_DISABLED) { int ret; ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r); if (ret) return ret; } } /* * The resources may pass trigger flags to the irqs that need * to be set up. It so happens that the trigger flags for * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER* * settings. */ if (r && r->flags & IORESOURCE_BITS) { struct irq_data *irqd; irqd = irq_get_irq_data(r->start); if (!irqd) return -ENXIO; irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); } return r ? r->start : -ENXIO; #endif } EXPORT_SYMBOL_GPL(platform_get_irq); /** * platform_irq_count - Count the number of IRQs a platform device uses * @dev: platform device * * Return: Number of IRQs a platform device uses or EPROBE_DEFER */ int platform_irq_count(struct platform_device *dev) { int ret, nr = 0; while ((ret = platform_get_irq(dev, nr)) >= 0) nr++; if (ret == -EPROBE_DEFER) return ret; return nr; } EXPORT_SYMBOL_GPL(platform_irq_count); /** * platform_get_resource_byname - get a resource for a device by name * @dev: platform device * @type: resource type * @name: resource name */ struct resource *platform_get_resource_byname(struct platform_device *dev, unsigned int type, const char *name) { int i; for (i = 0; i < dev->num_resources; i++) { struct resource *r = &dev->resource[i]; if (unlikely(!r->name)) continue; if (type == resource_type(r) && !strcmp(r->name, name)) return r; } return NULL; } EXPORT_SYMBOL_GPL(platform_get_resource_byname); /** * platform_get_irq_byname - get an IRQ for a device by name * @dev: platform device * @name: IRQ name */ int platform_get_irq_byname(struct platform_device *dev, const char *name) { struct resource *r; if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { int ret; ret = of_irq_get_byname(dev->dev.of_node, name); if (ret > 0 || ret == -EPROBE_DEFER) return ret; } r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); return r ? r->start : -ENXIO; } EXPORT_SYMBOL_GPL(platform_get_irq_byname); /** * platform_add_devices - add a numbers of platform devices * @devs: array of platform devices to add * @num: number of platform devices in array */ int platform_add_devices(struct platform_device **devs, int num) { int i, ret = 0; for (i = 0; i < num; i++) { ret = platform_device_register(devs[i]); if (ret) { while (--i >= 0) platform_device_unregister(devs[i]); break; } } return ret; } EXPORT_SYMBOL_GPL(platform_add_devices); struct platform_object { struct platform_device pdev; char name[]; }; /** * platform_device_put - destroy a platform device * @pdev: platform device to free * * Free all memory associated with a platform device. This function must * _only_ be externally called in error cases. All other usage is a bug. */ void platform_device_put(struct platform_device *pdev) { if (pdev) put_device(&pdev->dev); } EXPORT_SYMBOL_GPL(platform_device_put); static void platform_device_release(struct device *dev) { struct platform_object *pa = container_of(dev, struct platform_object, pdev.dev); of_device_node_put(&pa->pdev.dev); kfree(pa->pdev.dev.platform_data); kfree(pa->pdev.mfd_cell); kfree(pa->pdev.resource); kfree(pa->pdev.driver_override); kfree(pa); } /** * platform_device_alloc - create a platform device * @name: base name of the device we're adding * @id: instance id * * Create a platform device object which can have other objects attached * to it, and which will have attached objects freed when it is released. */ struct platform_device *platform_device_alloc(const char *name, int id) { struct platform_object *pa; pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); if (pa) { strcpy(pa->name, name); pa->pdev.name = pa->name; pa->pdev.id = id; device_initialize(&pa->pdev.dev); pa->pdev.dev.release = platform_device_release; arch_setup_pdev_archdata(&pa->pdev); } return pa ? &pa->pdev : NULL; } EXPORT_SYMBOL_GPL(platform_device_alloc); /** * platform_device_add_resources - add resources to a platform device * @pdev: platform device allocated by platform_device_alloc to add resources to * @res: set of resources that needs to be allocated for the device * @num: number of resources * * Add a copy of the resources to the platform device. The memory * associated with the resources will be freed when the platform device is * released. */ int platform_device_add_resources(struct platform_device *pdev, const struct resource *res, unsigned int num) { struct resource *r = NULL; if (res) { r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); if (!r) return -ENOMEM; } kfree(pdev->resource); pdev->resource = r; pdev->num_resources = num; return 0; } EXPORT_SYMBOL_GPL(platform_device_add_resources); /** * platform_device_add_data - add platform-specific data to a platform device * @pdev: platform device allocated by platform_device_alloc to add resources to * @data: platform specific data for this platform device * @size: size of platform specific data * * Add a copy of platform specific data to the platform device's * platform_data pointer. The memory associated with the platform data * will be freed when the platform device is released. */ int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size) { void *d = NULL; if (data) { d = kmemdup(data, size, GFP_KERNEL); if (!d) return -ENOMEM; } kfree(pdev->dev.platform_data); pdev->dev.platform_data = d; return 0; } EXPORT_SYMBOL_GPL(platform_device_add_data); /** * platform_device_add_properties - add built-in properties to a platform device * @pdev: platform device to add properties to * @properties: null terminated array of properties to add * * The function will take deep copy of @properties and attach the copy to the * platform device. The memory associated with properties will be freed when the * platform device is released. */ int platform_device_add_properties(struct platform_device *pdev, struct property_entry *properties) { return device_add_properties(&pdev->dev, properties); } EXPORT_SYMBOL_GPL(platform_device_add_properties); /** * platform_device_add - add a platform device to device hierarchy * @pdev: platform device we're adding * * This is part 2 of platform_device_register(), though may be called * separately _iff_ pdev was allocated by platform_device_alloc(). */ int platform_device_add(struct platform_device *pdev) { int i, ret; if (!pdev) return -EINVAL; if (!pdev->dev.parent) pdev->dev.parent = &platform_bus; pdev->dev.bus = &platform_bus_type; switch (pdev->id) { default: dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); break; case PLATFORM_DEVID_NONE: dev_set_name(&pdev->dev, "%s", pdev->name); break; case PLATFORM_DEVID_AUTO: /* * Automatically allocated device ID. We mark it as such so * that we remember it must be freed, and we append a suffix * to avoid namespace collision with explicit IDs. */ ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL); if (ret < 0) goto err_out; pdev->id = ret; pdev->id_auto = true; dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id); break; } for (i = 0; i < pdev->num_resources; i++) { struct resource *p, *r = &pdev->resource[i]; if (r->name == NULL) r->name = dev_name(&pdev->dev); p = r->parent; if (!p) { if (resource_type(r) == IORESOURCE_MEM) p = &iomem_resource; else if (resource_type(r) == IORESOURCE_IO) p = &ioport_resource; } if (p && insert_resource(p, r)) { dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r); ret = -EBUSY; goto failed; } } pr_debug("Registering platform device '%s'. Parent at %s\n", dev_name(&pdev->dev), dev_name(pdev->dev.parent)); ret = device_add(&pdev->dev); if (ret == 0) return ret; failed: if (pdev->id_auto) { ida_simple_remove(&platform_devid_ida, pdev->id); pdev->id = PLATFORM_DEVID_AUTO; } while (--i >= 0) { struct resource *r = &pdev->resource[i]; if (r->parent) release_resource(r); } err_out: return ret; } EXPORT_SYMBOL_GPL(platform_device_add); /** * platform_device_del - remove a platform-level device * @pdev: platform device we're removing * * Note that this function will also release all memory- and port-based * resources owned by the device (@dev->resource). This function must * _only_ be externally called in error cases. All other usage is a bug. */ void platform_device_del(struct platform_device *pdev) { int i; if (pdev) { device_remove_properties(&pdev->dev); device_del(&pdev->dev); if (pdev->id_auto) { ida_simple_remove(&platform_devid_ida, pdev->id); pdev->id = PLATFORM_DEVID_AUTO; } for (i = 0; i < pdev->num_resources; i++) { struct resource *r = &pdev->resource[i]; if (r->parent) release_resource(r); } } } EXPORT_SYMBOL_GPL(platform_device_del); /** * platform_device_register - add a platform-level device * @pdev: platform device we're adding */ int platform_device_register(struct platform_device *pdev) { device_initialize(&pdev->dev); arch_setup_pdev_archdata(pdev); return platform_device_add(pdev); } EXPORT_SYMBOL_GPL(platform_device_register); /** * platform_device_unregister - unregister a platform-level device * @pdev: platform device we're unregistering * * Unregistration is done in 2 steps. First we release all resources * and remove it from the subsystem, then we drop reference count by * calling platform_device_put(). */ void platform_device_unregister(struct platform_device *pdev) { platform_device_del(pdev); platform_device_put(pdev); } EXPORT_SYMBOL_GPL(platform_device_unregister); /** * platform_device_register_full - add a platform-level device with * resources and platform-specific data * * @pdevinfo: data used to create device * * Returns &struct platform_device pointer on success, or ERR_PTR() on error. */ struct platform_device *platform_device_register_full( const struct platform_device_info *pdevinfo) { int ret = -ENOMEM; struct platform_device *pdev; pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); if (!pdev) goto err_alloc; pdev->dev.parent = pdevinfo->parent; pdev->dev.fwnode = pdevinfo->fwnode; if (pdevinfo->dma_mask) { /* * This memory isn't freed when the device is put, * I don't have a nice idea for that though. Conceptually * dma_mask in struct device should not be a pointer. * See http://thread.gmane.org/gmane.linux.kernel.pci/9081 */ pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL); if (!pdev->dev.dma_mask) goto err; *pdev->dev.dma_mask = pdevinfo->dma_mask; pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; } ret = platform_device_add_resources(pdev, pdevinfo->res, pdevinfo->num_res); if (ret) goto err; ret = platform_device_add_data(pdev, pdevinfo->data, pdevinfo->size_data); if (ret) goto err; if (pdevinfo->properties) { ret = platform_device_add_properties(pdev, pdevinfo->properties); if (ret) goto err; } ret = platform_device_add(pdev); if (ret) { err: ACPI_COMPANION_SET(&pdev->dev, NULL); kfree(pdev->dev.dma_mask); err_alloc: platform_device_put(pdev); return ERR_PTR(ret); } return pdev; } EXPORT_SYMBOL_GPL(platform_device_register_full); static int platform_drv_probe(struct device *_dev) { struct platform_driver *drv = to_platform_driver(_dev->driver); struct platform_device *dev = to_platform_device(_dev); int ret; ret = of_clk_set_defaults(_dev->of_node, false); if (ret < 0) return ret; ret = dev_pm_domain_attach(_dev, true); if (ret != -EPROBE_DEFER) { if (drv->probe) { ret = drv->probe(dev); if (ret) dev_pm_domain_detach(_dev, true); } else { /* don't fail if just dev_pm_domain_attach failed */ ret = 0; } } if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { dev_warn(_dev, "probe deferral not supported\n"); ret = -ENXIO; } return ret; } static int platform_drv_probe_fail(struct device *_dev) { return -ENXIO; } static int platform_drv_remove(struct device *_dev) { struct platform_driver *drv = to_platform_driver(_dev->driver); struct platform_device *dev = to_platform_device(_dev); int ret = 0; if (drv->remove) ret = drv->remove(dev); dev_pm_domain_detach(_dev, true); return ret; } static void platform_drv_shutdown(struct device *_dev) { struct platform_driver *drv = to_platform_driver(_dev->driver); struct platform_device *dev = to_platform_device(_dev); if (drv->shutdown) drv->shutdown(dev); } /** * __platform_driver_register - register a driver for platform-level devices * @drv: platform driver structure * @owner: owning module/driver */ int __platform_driver_register(struct platform_driver *drv, struct module *owner) { drv->driver.owner = owner; drv->driver.bus = &platform_bus_type; drv->driver.probe = platform_drv_probe; drv->driver.remove = platform_drv_remove; drv->driver.shutdown = platform_drv_shutdown; return driver_register(&drv->driver); } EXPORT_SYMBOL_GPL(__platform_driver_register); /** * platform_driver_unregister - unregister a driver for platform-level devices * @drv: platform driver structure */ void platform_driver_unregister(struct platform_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(platform_driver_unregister); /** * __platform_driver_probe - register driver for non-hotpluggable device * @drv: platform driver structure * @probe: the driver probe routine, probably from an __init section * @module: module which will be the owner of the driver * * Use this instead of platform_driver_register() when you know the device * is not hotpluggable and has already been registered, and you want to * remove its run-once probe() infrastructure from memory after the driver * has bound to the device. * * One typical use for this would be with drivers for controllers integrated * into system-on-chip processors, where the controller devices have been * configured as part of board setup. * * Note that this is incompatible with deferred probing. * * Returns zero if the driver registered and bound to a device, else returns * a negative error code and with the driver not registered. */ int __init_or_module __platform_driver_probe(struct platform_driver *drv, int (*probe)(struct platform_device *), struct module *module) { int retval, code; if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) { pr_err("%s: drivers registered with %s can not be probed asynchronously\n", drv->driver.name, __func__); return -EINVAL; } /* * We have to run our probes synchronously because we check if * we find any devices to bind to and exit with error if there * are any. */ drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; /* * Prevent driver from requesting probe deferral to avoid further * futile probe attempts. */ drv->prevent_deferred_probe = true; /* make sure driver won't have bind/unbind attributes */ drv->driver.suppress_bind_attrs = true; /* temporary section violation during probe() */ drv->probe = probe; retval = code = __platform_driver_register(drv, module); /* * Fixup that section violation, being paranoid about code scanning * the list of drivers in order to probe new devices. Check to see * if the probe was successful, and make sure any forced probes of * new devices fail. */ spin_lock(&drv->driver.bus->p->klist_drivers.k_lock); drv->probe = NULL; if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) retval = -ENODEV; drv->driver.probe = platform_drv_probe_fail; spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock); if (code != retval) platform_driver_unregister(drv); return retval; } EXPORT_SYMBOL_GPL(__platform_driver_probe); /** * __platform_create_bundle - register driver and create corresponding device * @driver: platform driver structure * @probe: the driver probe routine, probably from an __init section * @res: set of resources that needs to be allocated for the device * @n_res: number of resources * @data: platform specific data for this platform device * @size: size of platform specific data * @module: module which will be the owner of the driver * * Use this in legacy-style modules that probe hardware directly and * register a single platform device and corresponding platform driver. * * Returns &struct platform_device pointer on success, or ERR_PTR() on error. */ struct platform_device * __init_or_module __platform_create_bundle( struct platform_driver *driver, int (*probe)(struct platform_device *), struct resource *res, unsigned int n_res, const void *data, size_t size, struct module *module) { struct platform_device *pdev; int error; pdev = platform_device_alloc(driver->driver.name, -1); if (!pdev) { error = -ENOMEM; goto err_out; } error = platform_device_add_resources(pdev, res, n_res); if (error) goto err_pdev_put; error = platform_device_add_data(pdev, data, size); if (error) goto err_pdev_put; error = platform_device_add(pdev); if (error) goto err_pdev_put; error = __platform_driver_probe(driver, probe, module); if (error) goto err_pdev_del; return pdev; err_pdev_del: platform_device_del(pdev); err_pdev_put: platform_device_put(pdev); err_out: return ERR_PTR(error); } EXPORT_SYMBOL_GPL(__platform_create_bundle); /** * __platform_register_drivers - register an array of platform drivers * @drivers: an array of drivers to register * @count: the number of drivers to register * @owner: module owning the drivers * * Registers platform drivers specified by an array. On failure to register a * driver, all previously registered drivers will be unregistered. Callers of * this API should use platform_unregister_drivers() to unregister drivers in * the reverse order. * * Returns: 0 on success or a negative error code on failure. */ int __platform_register_drivers(struct platform_driver * const *drivers, unsigned int count, struct module *owner) { unsigned int i; int err; for (i = 0; i < count; i++) { pr_debug("registering platform driver %ps\n", drivers[i]); err = __platform_driver_register(drivers[i], owner); if (err < 0) { pr_err("failed to register platform driver %ps: %d\n", drivers[i], err); goto error; } } return 0; error: while (i--) { pr_debug("unregistering platform driver %ps\n", drivers[i]); platform_driver_unregister(drivers[i]); } return err; } EXPORT_SYMBOL_GPL(__platform_register_drivers); /** * platform_unregister_drivers - unregister an array of platform drivers * @drivers: an array of drivers to unregister * @count: the number of drivers to unregister * * Unegisters platform drivers specified by an array. This is typically used * to complement an earlier call to platform_register_drivers(). Drivers are * unregistered in the reverse order in which they were registered. */ void platform_unregister_drivers(struct platform_driver * const *drivers, unsigned int count) { while (count--) { pr_debug("unregistering platform driver %ps\n", drivers[count]); platform_driver_unregister(drivers[count]); } } EXPORT_SYMBOL_GPL(platform_unregister_drivers); /* modalias support enables more hands-off userspace setup: * (a) environment variable lets new-style hotplug events work once system is * fully running: "modprobe $MODALIAS" * (b) sysfs attribute lets new-style coldplug recover from hotplug events * mishandled before system is fully running: "modprobe $(cat modalias)" */ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, char *buf) { struct platform_device *pdev = to_platform_device(dev); int len; len = of_device_modalias(dev, buf, PAGE_SIZE); if (len != -ENODEV) return len; len = acpi_device_modalias(dev, buf, PAGE_SIZE -1); if (len != -ENODEV) return len; len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name); return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; } static DEVICE_ATTR_RO(modalias); static ssize_t driver_override_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); char *driver_override, *old, *cp; if (count > PATH_MAX) return -EINVAL; driver_override = kstrndup(buf, count, GFP_KERNEL); if (!driver_override) return -ENOMEM; cp = strchr(driver_override, '\n'); if (cp) *cp = '\0'; device_lock(dev); old = pdev->driver_override; if (strlen(driver_override)) { pdev->driver_override = driver_override; } else { kfree(driver_override); pdev->driver_override = NULL; } device_unlock(dev); kfree(old); return count; } static ssize_t driver_override_show(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); ssize_t len; device_lock(dev); len = sprintf(buf, "%s\n", pdev->driver_override); device_unlock(dev); return len; } static DEVICE_ATTR_RW(driver_override); static struct attribute *platform_dev_attrs[] = { &dev_attr_modalias.attr, &dev_attr_driver_override.attr, NULL, }; ATTRIBUTE_GROUPS(platform_dev); static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) { struct platform_device *pdev = to_platform_device(dev); int rc; /* Some devices have extra OF data and an OF-style MODALIAS */ rc = of_device_uevent_modalias(dev, env); if (rc != -ENODEV) return rc; rc = acpi_device_uevent_modalias(dev, env); if (rc != -ENODEV) return rc; add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, pdev->name); return 0; } static const struct platform_device_id *platform_match_id( const struct platform_device_id *id, struct platform_device *pdev) { while (id->name[0]) { if (strcmp(pdev->name, id->name) == 0) { pdev->id_entry = id; return id; } id++; } return NULL; } /** * platform_match - bind platform device to platform driver. * @dev: device. * @drv: driver. * * Platform device IDs are assumed to be encoded like this: * "<name><instance>", where <name> is a short description of the type of * device, like "pci" or "floppy", and <instance> is the enumerated * instance of the device, like '0' or '42'. Driver IDs are simply * "<name>". So, extract the <name> from the platform_device structure, * and compare it against the name of the driver. Return whether they match * or not. */ static int platform_match(struct device *dev, struct device_driver *drv) { struct platform_device *pdev = to_platform_device(dev); struct platform_driver *pdrv = to_platform_driver(drv); /* When driver_override is set, only bind to the matching driver */ if (pdev->driver_override) return !strcmp(pdev->driver_override, drv->name); /* Attempt an OF style match first */ if (of_driver_match_device(dev, drv)) return 1; /* Then try ACPI style match */ if (acpi_driver_match_device(dev, drv)) return 1; /* Then try to match against the id table */ if (pdrv->id_table) return platform_match_id(pdrv->id_table, pdev) != NULL; /* fall-back to driver name match */ return (strcmp(pdev->name, drv->name) == 0); } #ifdef CONFIG_PM_SLEEP static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) { struct platform_driver *pdrv = to_platform_driver(dev->driver); struct platform_device *pdev = to_platform_device(dev); int ret = 0; if (dev->driver && pdrv->suspend) ret = pdrv->suspend(pdev, mesg); return ret; } static int platform_legacy_resume(struct device *dev) { struct platform_driver *pdrv = to_platform_driver(dev->driver); struct platform_device *pdev = to_platform_device(dev); int ret = 0; if (dev->driver && pdrv->resume) ret = pdrv->resume(pdev); return ret; } #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_SUSPEND int platform_pm_suspend(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->suspend) ret = drv->pm->suspend(dev); } else { ret = platform_legacy_suspend(dev, PMSG_SUSPEND); } return ret; } int platform_pm_resume(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->resume) ret = drv->pm->resume(dev); } else { ret = platform_legacy_resume(dev); } return ret; } #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATE_CALLBACKS int platform_pm_freeze(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->freeze) ret = drv->pm->freeze(dev); } else { ret = platform_legacy_suspend(dev, PMSG_FREEZE); } return ret; } int platform_pm_thaw(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->thaw) ret = drv->pm->thaw(dev); } else { ret = platform_legacy_resume(dev); } return ret; } int platform_pm_poweroff(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->poweroff) ret = drv->pm->poweroff(dev); } else { ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); } return ret; } int platform_pm_restore(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->restore) ret = drv->pm->restore(dev); } else { ret = platform_legacy_resume(dev); } return ret; } #endif /* CONFIG_HIBERNATE_CALLBACKS */ static const struct dev_pm_ops platform_dev_pm_ops = { .runtime_suspend = pm_generic_runtime_suspend, .runtime_resume = pm_generic_runtime_resume, USE_PLATFORM_PM_SLEEP_OPS }; struct bus_type platform_bus_type = { .name = "platform", .dev_groups = platform_dev_groups, .match = platform_match, .uevent = platform_uevent, .pm = &platform_dev_pm_ops, }; EXPORT_SYMBOL_GPL(platform_bus_type); int __init platform_bus_init(void) { int error; early_platform_cleanup(); error = device_register(&platform_bus); if (error) return error; error = bus_register(&platform_bus_type); if (error) device_unregister(&platform_bus); of_platform_register_reconfig_notifier(); return error; } #ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK u64 dma_get_required_mask(struct device *dev) { u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT); u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT)); u64 mask; if (!high_totalram) { /* convert to mask just covering totalram */ low_totalram = (1 << (fls(low_totalram) - 1)); low_totalram += low_totalram - 1; mask = low_totalram; } else { high_totalram = (1 << (fls(high_totalram) - 1)); high_totalram += high_totalram - 1; mask = (((u64)high_totalram) << 32) + 0xffffffff; } return mask; } EXPORT_SYMBOL_GPL(dma_get_required_mask); #endif static __initdata LIST_HEAD(early_platform_driver_list); static __initdata LIST_HEAD(early_platform_device_list); /** * early_platform_driver_register - register early platform driver * @epdrv: early_platform driver structure * @buf: string passed from early_param() * * Helper function for early_platform_init() / early_platform_init_buffer() */ int __init early_platform_driver_register(struct early_platform_driver *epdrv, char *buf) { char *tmp; int n; /* Simply add the driver to the end of the global list. * Drivers will by default be put on the list in compiled-in order. */ if (!epdrv->list.next) { INIT_LIST_HEAD(&epdrv->list); list_add_tail(&epdrv->list, &early_platform_driver_list); } /* If the user has specified device then make sure the driver * gets prioritized. The driver of the last device specified on * command line will be put first on the list. */ n = strlen(epdrv->pdrv->driver.name); if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) { list_move(&epdrv->list, &early_platform_driver_list); /* Allow passing parameters after device name */ if (buf[n] == '\0' || buf[n] == ',') epdrv->requested_id = -1; else { epdrv->requested_id = simple_strtoul(&buf[n + 1], &tmp, 10); if (buf[n] != '.' || (tmp == &buf[n + 1])) { epdrv->requested_id = EARLY_PLATFORM_ID_ERROR; n = 0; } else n += strcspn(&buf[n + 1], ",") + 1; } if (buf[n] == ',') n++; if (epdrv->bufsize) { memcpy(epdrv->buffer, &buf[n], min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1)); epdrv->buffer[epdrv->bufsize - 1] = '\0'; } } return 0; } /** * early_platform_add_devices - adds a number of early platform devices * @devs: array of early platform devices to add * @num: number of early platform devices in array * * Used by early architecture code to register early platform devices and * their platform data. */ void __init early_platform_add_devices(struct platform_device **devs, int num) { struct device *dev; int i; /* simply add the devices to list */ for (i = 0; i < num; i++) { dev = &devs[i]->dev; if (!dev->devres_head.next) { pm_runtime_early_init(dev); INIT_LIST_HEAD(&dev->devres_head); list_add_tail(&dev->devres_head, &early_platform_device_list); } } } /** * early_platform_driver_register_all - register early platform drivers * @class_str: string to identify early platform driver class * * Used by architecture code to register all early platform drivers * for a certain class. If omitted then only early platform drivers * with matching kernel command line class parameters will be registered. */ void __init early_platform_driver_register_all(char *class_str) { /* The "class_str" parameter may or may not be present on the kernel * command line. If it is present then there may be more than one * matching parameter. * * Since we register our early platform drivers using early_param() * we need to make sure that they also get registered in the case * when the parameter is missing from the kernel command line. * * We use parse_early_options() to make sure the early_param() gets * called at least once. The early_param() may be called more than * once since the name of the preferred device may be specified on * the kernel command line. early_platform_driver_register() handles * this case for us. */ parse_early_options(class_str); } /** * early_platform_match - find early platform device matching driver * @epdrv: early platform driver structure * @id: id to match against */ static struct platform_device * __init early_platform_match(struct early_platform_driver *epdrv, int id) { struct platform_device *pd; list_for_each_entry(pd, &early_platform_device_list, dev.devres_head) if (platform_match(&pd->dev, &epdrv->pdrv->driver)) if (pd->id == id) return pd; return NULL; } /** * early_platform_left - check if early platform driver has matching devices * @epdrv: early platform driver structure * @id: return true if id or above exists */ static int __init early_platform_left(struct early_platform_driver *epdrv, int id) { struct platform_device *pd; list_for_each_entry(pd, &early_platform_device_list, dev.devres_head) if (platform_match(&pd->dev, &epdrv->pdrv->driver)) if (pd->id >= id) return 1; return 0; } /** * early_platform_driver_probe_id - probe drivers matching class_str and id * @class_str: string to identify early platform driver class * @id: id to match against * @nr_probe: number of platform devices to successfully probe before exiting */ static int __init early_platform_driver_probe_id(char *class_str, int id, int nr_probe) { struct early_platform_driver *epdrv; struct platform_device *match; int match_id; int n = 0; int left = 0; list_for_each_entry(epdrv, &early_platform_driver_list, list) { /* only use drivers matching our class_str */ if (strcmp(class_str, epdrv->class_str)) continue; if (id == -2) { match_id = epdrv->requested_id; left = 1; } else { match_id = id; left += early_platform_left(epdrv, id); /* skip requested id */ switch (epdrv->requested_id) { case EARLY_PLATFORM_ID_ERROR: case EARLY_PLATFORM_ID_UNSET: break; default: if (epdrv->requested_id == id) match_id = EARLY_PLATFORM_ID_UNSET; } } switch (match_id) { case EARLY_PLATFORM_ID_ERROR: pr_warn("%s: unable to parse %s parameter\n", class_str, epdrv->pdrv->driver.name); /* fall-through */ case EARLY_PLATFORM_ID_UNSET: match = NULL; break; default: match = early_platform_match(epdrv, match_id); } if (match) { /* * Set up a sensible init_name to enable * dev_name() and others to be used before the * rest of the driver core is initialized. */ if (!match->dev.init_name && slab_is_available()) { if (match->id != -1) match->dev.init_name = kasprintf(GFP_KERNEL, "%s.%d", match->name, match->id); else match->dev.init_name = kasprintf(GFP_KERNEL, "%s", match->name); if (!match->dev.init_name) return -ENOMEM; } if (epdrv->pdrv->probe(match)) pr_warn("%s: unable to probe %s early.\n", class_str, match->name); else n++; } if (n >= nr_probe) break; } if (left) return n; else return -ENODEV; } /** * early_platform_driver_probe - probe a class of registered drivers * @class_str: string to identify early platform driver class * @nr_probe: number of platform devices to successfully probe before exiting * @user_only: only probe user specified early platform devices * * Used by architecture code to probe registered early platform drivers * within a certain class. For probe to happen a registered early platform * device matching a registered early platform driver is needed. */ int __init early_platform_driver_probe(char *class_str, int nr_probe, int user_only) { int k, n, i; n = 0; for (i = -2; n < nr_probe; i++) { k = early_platform_driver_probe_id(class_str, i, nr_probe - n); if (k < 0) break; n += k; if (user_only) break; } return n; } /** * early_platform_cleanup - clean up early platform code */ void __init early_platform_cleanup(void) { struct platform_device *pd, *pd2; /* clean up the devres list used to chain devices */ list_for_each_entry_safe(pd, pd2, &early_platform_device_list, dev.devres_head) { list_del(&pd->dev.devres_head); memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head)); } }
./CrossVul/dataset_final_sorted/CWE-362/c/good_2599_0
crossvul-cpp_data_good_5221_2
/* Copyright (c) 2000-2008 MySQL AB, 2009 Sun Microsystems, Inc. Use is subject to license terms. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* Static variables for mysys library. All definied here for easy making of a shared library */ #include "mysys_priv.h" #include "my_static.h" #include "my_alarm.h" my_bool timed_mutexes= 0; /* from my_init */ char * home_dir=0; char *mysql_data_home= (char*) "."; const char *my_progname= NULL, *my_progname_short= NULL; char curr_dir[FN_REFLEN]= {0}, home_dir_buff[FN_REFLEN]= {0}; ulong my_stream_opened=0,my_file_opened=0, my_tmp_file_created=0; ulong my_file_total_opened= 0; int my_umask=0664, my_umask_dir=0777; myf my_global_flags= 0; my_bool my_assert_on_error= 0; struct st_my_file_info my_file_info_default[MY_NFILE]; uint my_file_limit= MY_NFILE; struct st_my_file_info *my_file_info= my_file_info_default; /* From mf_brkhant */ int my_dont_interrupt=0; volatile int _my_signals=0; struct st_remember _my_sig_remember[MAX_SIGNALS]={{0,0}}; /* from mf_reccache.c */ ulong my_default_record_cache_size=RECORD_CACHE_SIZE; /* from soundex.c */ /* ABCDEFGHIJKLMNOPQRSTUVWXYZ */ /* :::::::::::::::::::::::::: */ const char *soundex_map= "01230120022455012623010202"; /* from my_malloc */ USED_MEM* my_once_root_block=0; /* pointer to first block */ uint my_once_extra=ONCE_ALLOC_INIT; /* Memory to alloc / block */ /* from my_largepage.c */ #ifdef HAVE_LARGE_PAGES my_bool my_use_large_pages= 0; uint my_large_page_size= 0; #endif /* from my_alarm */ int volatile my_have_got_alarm=0; /* declare variable to reset */ ulong my_time_to_wait_for_lock=2; /* In seconds */ /* from errors.c */ #ifdef SHARED_LIBRARY const char *globerrs[GLOBERRS]; /* my_error_messages is here */ #endif void (*my_abort_hook)(int) = (void(*)(int)) exit; void (*error_handler_hook)(uint error, const char *str, myf MyFlags)= my_message_stderr; void (*fatal_error_handler_hook)(uint error, const char *str, myf MyFlags)= my_message_stderr; static const char *proc_info_dummy(void *a __attribute__((unused)), const char *b __attribute__((unused)), const char *c __attribute__((unused)), const char *d __attribute__((unused)), const unsigned int e __attribute__((unused))) { return 0; } /* this is to be able to call set_thd_proc_info from the C code */ const char *(*proc_info_hook)(void *, const char *, const char *, const char *, const unsigned int)= proc_info_dummy; void (*debug_sync_C_callback_ptr)(MYSQL_THD, const char *, size_t)= 0; /* How to disable options */ my_bool my_disable_locking=0; my_bool my_disable_sync=0; my_bool my_disable_async_io=0; my_bool my_disable_flush_key_blocks=0; my_bool my_disable_symlinks=0; my_bool my_disable_copystat_in_redel=0; /* Note that PSI_hook and PSI_server are unconditionally (no ifdef HAVE_PSI_INTERFACE) defined. This is to ensure binary compatibility between the server and plugins, in the case when: - the server is not compiled with HAVE_PSI_INTERFACE - a plugin is compiled with HAVE_PSI_INTERFACE See the doxygen documentation for the performance schema. */ /** Hook for the instrumentation interface. Code implementing the instrumentation interface should register here. */ struct PSI_bootstrap *PSI_hook= NULL; /** Instance of the instrumentation interface for the MySQL server. @todo This is currently a global variable, which is handy when compiling instrumented code that is bundled with the server. When dynamic plugin are truly supported, this variable will need to be replaced by a macro, so that each XYZ plugin can have it's own xyz_psi_server variable, obtained from PSI_bootstrap::get_interface() with the version used at compile time for plugin XYZ. */ PSI *PSI_server= NULL;
./CrossVul/dataset_final_sorted/CWE-362/c/good_5221_2
crossvul-cpp_data_good_5572_1
/* * linux/kernel/ptrace.c * * (C) Copyright 1999 Linus Torvalds * * Common interfaces for "ptrace()" which we do not want * to continually duplicate across every architecture. */ #include <linux/capability.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/ptrace.h> #include <linux/security.h> #include <linux/signal.h> #include <linux/audit.h> #include <linux/pid_namespace.h> #include <linux/syscalls.h> #include <linux/uaccess.h> #include <linux/regset.h> #include <linux/hw_breakpoint.h> #include <linux/cn_proc.h> static int ptrace_trapping_sleep_fn(void *flags) { schedule(); return 0; } /* * ptrace a task: make the debugger its new parent and * move it to the ptrace list. * * Must be called with the tasklist lock write-held. */ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) { BUG_ON(!list_empty(&child->ptrace_entry)); list_add(&child->ptrace_entry, &new_parent->ptraced); child->parent = new_parent; } /** * __ptrace_unlink - unlink ptracee and restore its execution state * @child: ptracee to be unlinked * * Remove @child from the ptrace list, move it back to the original parent, * and restore the execution state so that it conforms to the group stop * state. * * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer * exiting. For PTRACE_DETACH, unless the ptracee has been killed between * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. * If the ptracer is exiting, the ptracee can be in any state. * * After detach, the ptracee should be in a state which conforms to the * group stop. If the group is stopped or in the process of stopping, the * ptracee should be put into TASK_STOPPED; otherwise, it should be woken * up from TASK_TRACED. * * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, * it goes through TRACED -> RUNNING -> STOPPED transition which is similar * to but in the opposite direction of what happens while attaching to a * stopped task. However, in this direction, the intermediate RUNNING * state is not hidden even from the current ptracer and if it immediately * re-attaches and performs a WNOHANG wait(2), it may fail. * * CONTEXT: * write_lock_irq(tasklist_lock) */ void __ptrace_unlink(struct task_struct *child) { BUG_ON(!child->ptrace); child->ptrace = 0; child->parent = child->real_parent; list_del_init(&child->ptrace_entry); spin_lock(&child->sighand->siglock); /* * Clear all pending traps and TRAPPING. TRAPPING should be * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly. */ task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK); task_clear_jobctl_trapping(child); /* * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and * @child isn't dead. */ if (!(child->flags & PF_EXITING) && (child->signal->flags & SIGNAL_STOP_STOPPED || child->signal->group_stop_count)) { child->jobctl |= JOBCTL_STOP_PENDING; /* * This is only possible if this thread was cloned by the * traced task running in the stopped group, set the signal * for the future reports. * FIXME: we should change ptrace_init_task() to handle this * case. */ if (!(child->jobctl & JOBCTL_STOP_SIGMASK)) child->jobctl |= SIGSTOP; } /* * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick * @child in the butt. Note that @resume should be used iff @child * is in TASK_TRACED; otherwise, we might unduly disrupt * TASK_KILLABLE sleeps. */ if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) ptrace_signal_wake_up(child, true); spin_unlock(&child->sighand->siglock); } /* Ensure that nothing can wake it up, even SIGKILL */ static bool ptrace_freeze_traced(struct task_struct *task) { bool ret = false; /* Lockless, nobody but us can set this flag */ if (task->jobctl & JOBCTL_LISTENING) return ret; spin_lock_irq(&task->sighand->siglock); if (task_is_traced(task) && !__fatal_signal_pending(task)) { task->state = __TASK_TRACED; ret = true; } spin_unlock_irq(&task->sighand->siglock); return ret; } static void ptrace_unfreeze_traced(struct task_struct *task) { if (task->state != __TASK_TRACED) return; WARN_ON(!task->ptrace || task->parent != current); spin_lock_irq(&task->sighand->siglock); if (__fatal_signal_pending(task)) wake_up_state(task, __TASK_TRACED); else task->state = TASK_TRACED; spin_unlock_irq(&task->sighand->siglock); } /** * ptrace_check_attach - check whether ptracee is ready for ptrace operation * @child: ptracee to check for * @ignore_state: don't check whether @child is currently %TASK_TRACED * * Check whether @child is being ptraced by %current and ready for further * ptrace operations. If @ignore_state is %false, @child also should be in * %TASK_TRACED state and on return the child is guaranteed to be traced * and not executing. If @ignore_state is %true, @child can be in any * state. * * CONTEXT: * Grabs and releases tasklist_lock and @child->sighand->siglock. * * RETURNS: * 0 on success, -ESRCH if %child is not ready. */ static int ptrace_check_attach(struct task_struct *child, bool ignore_state) { int ret = -ESRCH; /* * We take the read lock around doing both checks to close a * possible race where someone else was tracing our child and * detached between these two checks. After this locked check, * we are sure that this is our traced child and that can only * be changed by us so it's not changing right after this. */ read_lock(&tasklist_lock); if (child->ptrace && child->parent == current) { WARN_ON(child->state == __TASK_TRACED); /* * child->sighand can't be NULL, release_task() * does ptrace_unlink() before __exit_signal(). */ if (ignore_state || ptrace_freeze_traced(child)) ret = 0; } read_unlock(&tasklist_lock); if (!ret && !ignore_state) { if (!wait_task_inactive(child, __TASK_TRACED)) { /* * This can only happen if may_ptrace_stop() fails and * ptrace_stop() changes ->state back to TASK_RUNNING, * so we should not worry about leaking __TASK_TRACED. */ WARN_ON(child->state == __TASK_TRACED); ret = -ESRCH; } } return ret; } static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) { if (mode & PTRACE_MODE_NOAUDIT) return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); else return has_ns_capability(current, ns, CAP_SYS_PTRACE); } /* Returns 0 on success, -errno on denial. */ static int __ptrace_may_access(struct task_struct *task, unsigned int mode) { const struct cred *cred = current_cred(), *tcred; /* May we inspect the given task? * This check is used both for attaching with ptrace * and for allowing access to sensitive information in /proc. * * ptrace_attach denies several cases that /proc allows * because setting up the necessary parent/child relationship * or halting the specified task is impossible. */ int dumpable = 0; /* Don't let security modules deny introspection */ if (task == current) return 0; rcu_read_lock(); tcred = __task_cred(task); if (uid_eq(cred->uid, tcred->euid) && uid_eq(cred->uid, tcred->suid) && uid_eq(cred->uid, tcred->uid) && gid_eq(cred->gid, tcred->egid) && gid_eq(cred->gid, tcred->sgid) && gid_eq(cred->gid, tcred->gid)) goto ok; if (ptrace_has_cap(tcred->user_ns, mode)) goto ok; rcu_read_unlock(); return -EPERM; ok: rcu_read_unlock(); smp_rmb(); if (task->mm) dumpable = get_dumpable(task->mm); rcu_read_lock(); if (!dumpable && !ptrace_has_cap(__task_cred(task)->user_ns, mode)) { rcu_read_unlock(); return -EPERM; } rcu_read_unlock(); return security_ptrace_access_check(task, mode); } bool ptrace_may_access(struct task_struct *task, unsigned int mode) { int err; task_lock(task); err = __ptrace_may_access(task, mode); task_unlock(task); return !err; } static int ptrace_attach(struct task_struct *task, long request, unsigned long addr, unsigned long flags) { bool seize = (request == PTRACE_SEIZE); int retval; retval = -EIO; if (seize) { if (addr != 0) goto out; if (flags & ~(unsigned long)PTRACE_O_MASK) goto out; flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT); } else { flags = PT_PTRACED; } audit_ptrace(task); retval = -EPERM; if (unlikely(task->flags & PF_KTHREAD)) goto out; if (same_thread_group(task, current)) goto out; /* * Protect exec's credential calculations against our interference; * SUID, SGID and LSM creds get determined differently * under ptrace. */ retval = -ERESTARTNOINTR; if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) goto out; task_lock(task); retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); task_unlock(task); if (retval) goto unlock_creds; write_lock_irq(&tasklist_lock); retval = -EPERM; if (unlikely(task->exit_state)) goto unlock_tasklist; if (task->ptrace) goto unlock_tasklist; if (seize) flags |= PT_SEIZED; rcu_read_lock(); if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE)) flags |= PT_PTRACE_CAP; rcu_read_unlock(); task->ptrace = flags; __ptrace_link(task, current); /* SEIZE doesn't trap tracee on attach */ if (!seize) send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); spin_lock(&task->sighand->siglock); /* * If the task is already STOPPED, set JOBCTL_TRAP_STOP and * TRAPPING, and kick it so that it transits to TRACED. TRAPPING * will be cleared if the child completes the transition or any * event which clears the group stop states happens. We'll wait * for the transition to complete before returning from this * function. * * This hides STOPPED -> RUNNING -> TRACED transition from the * attaching thread but a different thread in the same group can * still observe the transient RUNNING state. IOW, if another * thread's WNOHANG wait(2) on the stopped tracee races against * ATTACH, the wait(2) may fail due to the transient RUNNING. * * The following task_is_stopped() test is safe as both transitions * in and out of STOPPED are protected by siglock. */ if (task_is_stopped(task) && task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) signal_wake_up_state(task, __TASK_STOPPED); spin_unlock(&task->sighand->siglock); retval = 0; unlock_tasklist: write_unlock_irq(&tasklist_lock); unlock_creds: mutex_unlock(&task->signal->cred_guard_mutex); out: if (!retval) { wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE); proc_ptrace_connector(task, PTRACE_ATTACH); } return retval; } /** * ptrace_traceme -- helper for PTRACE_TRACEME * * Performs checks and sets PT_PTRACED. * Should be used by all ptrace implementations for PTRACE_TRACEME. */ static int ptrace_traceme(void) { int ret = -EPERM; write_lock_irq(&tasklist_lock); /* Are we already being traced? */ if (!current->ptrace) { ret = security_ptrace_traceme(current->parent); /* * Check PF_EXITING to ensure ->real_parent has not passed * exit_ptrace(). Otherwise we don't report the error but * pretend ->real_parent untraces us right after return. */ if (!ret && !(current->real_parent->flags & PF_EXITING)) { current->ptrace = PT_PTRACED; __ptrace_link(current, current->real_parent); } } write_unlock_irq(&tasklist_lock); return ret; } /* * Called with irqs disabled, returns true if childs should reap themselves. */ static int ignoring_children(struct sighand_struct *sigh) { int ret; spin_lock(&sigh->siglock); ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); spin_unlock(&sigh->siglock); return ret; } /* * Called with tasklist_lock held for writing. * Unlink a traced task, and clean it up if it was a traced zombie. * Return true if it needs to be reaped with release_task(). * (We can't call release_task() here because we already hold tasklist_lock.) * * If it's a zombie, our attachedness prevented normal parent notification * or self-reaping. Do notification now if it would have happened earlier. * If it should reap itself, return true. * * If it's our own child, there is no notification to do. But if our normal * children self-reap, then this child was prevented by ptrace and we must * reap it now, in that case we must also wake up sub-threads sleeping in * do_wait(). */ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) { bool dead; __ptrace_unlink(p); if (p->exit_state != EXIT_ZOMBIE) return false; dead = !thread_group_leader(p); if (!dead && thread_group_empty(p)) { if (!same_thread_group(p->real_parent, tracer)) dead = do_notify_parent(p, p->exit_signal); else if (ignoring_children(tracer->sighand)) { __wake_up_parent(p, tracer); dead = true; } } /* Mark it as in the process of being reaped. */ if (dead) p->exit_state = EXIT_DEAD; return dead; } static int ptrace_detach(struct task_struct *child, unsigned int data) { bool dead = false; if (!valid_signal(data)) return -EIO; /* Architecture-specific hardware disable .. */ ptrace_disable(child); clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); write_lock_irq(&tasklist_lock); /* * This child can be already killed. Make sure de_thread() or * our sub-thread doing do_wait() didn't do release_task() yet. */ if (child->ptrace) { child->exit_code = data; dead = __ptrace_detach(current, child); } write_unlock_irq(&tasklist_lock); proc_ptrace_connector(child, PTRACE_DETACH); if (unlikely(dead)) release_task(child); return 0; } /* * Detach all tasks we were using ptrace on. Called with tasklist held * for writing, and returns with it held too. But note it can release * and reacquire the lock. */ void exit_ptrace(struct task_struct *tracer) __releases(&tasklist_lock) __acquires(&tasklist_lock) { struct task_struct *p, *n; LIST_HEAD(ptrace_dead); if (likely(list_empty(&tracer->ptraced))) return; list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { if (unlikely(p->ptrace & PT_EXITKILL)) send_sig_info(SIGKILL, SEND_SIG_FORCED, p); if (__ptrace_detach(tracer, p)) list_add(&p->ptrace_entry, &ptrace_dead); } write_unlock_irq(&tasklist_lock); BUG_ON(!list_empty(&tracer->ptraced)); list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { list_del_init(&p->ptrace_entry); release_task(p); } write_lock_irq(&tasklist_lock); } int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) { int copied = 0; while (len > 0) { char buf[128]; int this_len, retval; this_len = (len > sizeof(buf)) ? sizeof(buf) : len; retval = access_process_vm(tsk, src, buf, this_len, 0); if (!retval) { if (copied) break; return -EIO; } if (copy_to_user(dst, buf, retval)) return -EFAULT; copied += retval; src += retval; dst += retval; len -= retval; } return copied; } int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) { int copied = 0; while (len > 0) { char buf[128]; int this_len, retval; this_len = (len > sizeof(buf)) ? sizeof(buf) : len; if (copy_from_user(buf, src, this_len)) return -EFAULT; retval = access_process_vm(tsk, dst, buf, this_len, 1); if (!retval) { if (copied) break; return -EIO; } copied += retval; src += retval; dst += retval; len -= retval; } return copied; } static int ptrace_setoptions(struct task_struct *child, unsigned long data) { unsigned flags; if (data & ~(unsigned long)PTRACE_O_MASK) return -EINVAL; /* Avoid intermediate state when all opts are cleared */ flags = child->ptrace; flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT); flags |= (data << PT_OPT_FLAG_SHIFT); child->ptrace = flags; return 0; } static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) { unsigned long flags; int error = -ESRCH; if (lock_task_sighand(child, &flags)) { error = -EINVAL; if (likely(child->last_siginfo != NULL)) { *info = *child->last_siginfo; error = 0; } unlock_task_sighand(child, &flags); } return error; } static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) { unsigned long flags; int error = -ESRCH; if (lock_task_sighand(child, &flags)) { error = -EINVAL; if (likely(child->last_siginfo != NULL)) { *child->last_siginfo = *info; error = 0; } unlock_task_sighand(child, &flags); } return error; } #ifdef PTRACE_SINGLESTEP #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) #else #define is_singlestep(request) 0 #endif #ifdef PTRACE_SINGLEBLOCK #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) #else #define is_singleblock(request) 0 #endif #ifdef PTRACE_SYSEMU #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) #else #define is_sysemu_singlestep(request) 0 #endif static int ptrace_resume(struct task_struct *child, long request, unsigned long data) { if (!valid_signal(data)) return -EIO; if (request == PTRACE_SYSCALL) set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); else clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); #ifdef TIF_SYSCALL_EMU if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) set_tsk_thread_flag(child, TIF_SYSCALL_EMU); else clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); #endif if (is_singleblock(request)) { if (unlikely(!arch_has_block_step())) return -EIO; user_enable_block_step(child); } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { if (unlikely(!arch_has_single_step())) return -EIO; user_enable_single_step(child); } else { user_disable_single_step(child); } child->exit_code = data; wake_up_state(child, __TASK_TRACED); return 0; } #ifdef CONFIG_HAVE_ARCH_TRACEHOOK static const struct user_regset * find_regset(const struct user_regset_view *view, unsigned int type) { const struct user_regset *regset; int n; for (n = 0; n < view->n; ++n) { regset = view->regsets + n; if (regset->core_note_type == type) return regset; } return NULL; } static int ptrace_regset(struct task_struct *task, int req, unsigned int type, struct iovec *kiov) { const struct user_regset_view *view = task_user_regset_view(task); const struct user_regset *regset = find_regset(view, type); int regset_no; if (!regset || (kiov->iov_len % regset->size) != 0) return -EINVAL; regset_no = regset - view->regsets; kiov->iov_len = min(kiov->iov_len, (__kernel_size_t) (regset->n * regset->size)); if (req == PTRACE_GETREGSET) return copy_regset_to_user(task, view, regset_no, 0, kiov->iov_len, kiov->iov_base); else return copy_regset_from_user(task, view, regset_no, 0, kiov->iov_len, kiov->iov_base); } #endif int ptrace_request(struct task_struct *child, long request, unsigned long addr, unsigned long data) { bool seized = child->ptrace & PT_SEIZED; int ret = -EIO; siginfo_t siginfo, *si; void __user *datavp = (void __user *) data; unsigned long __user *datalp = datavp; unsigned long flags; switch (request) { case PTRACE_PEEKTEXT: case PTRACE_PEEKDATA: return generic_ptrace_peekdata(child, addr, data); case PTRACE_POKETEXT: case PTRACE_POKEDATA: return generic_ptrace_pokedata(child, addr, data); #ifdef PTRACE_OLDSETOPTIONS case PTRACE_OLDSETOPTIONS: #endif case PTRACE_SETOPTIONS: ret = ptrace_setoptions(child, data); break; case PTRACE_GETEVENTMSG: ret = put_user(child->ptrace_message, datalp); break; case PTRACE_GETSIGINFO: ret = ptrace_getsiginfo(child, &siginfo); if (!ret) ret = copy_siginfo_to_user(datavp, &siginfo); break; case PTRACE_SETSIGINFO: if (copy_from_user(&siginfo, datavp, sizeof siginfo)) ret = -EFAULT; else ret = ptrace_setsiginfo(child, &siginfo); break; case PTRACE_INTERRUPT: /* * Stop tracee without any side-effect on signal or job * control. At least one trap is guaranteed to happen * after this request. If @child is already trapped, the * current trap is not disturbed and another trap will * happen after the current trap is ended with PTRACE_CONT. * * The actual trap might not be PTRACE_EVENT_STOP trap but * the pending condition is cleared regardless. */ if (unlikely(!seized || !lock_task_sighand(child, &flags))) break; /* * INTERRUPT doesn't disturb existing trap sans one * exception. If ptracer issued LISTEN for the current * STOP, this INTERRUPT should clear LISTEN and re-trap * tracee into STOP. */ if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); unlock_task_sighand(child, &flags); ret = 0; break; case PTRACE_LISTEN: /* * Listen for events. Tracee must be in STOP. It's not * resumed per-se but is not considered to be in TRACED by * wait(2) or ptrace(2). If an async event (e.g. group * stop state change) happens, tracee will enter STOP trap * again. Alternatively, ptracer can issue INTERRUPT to * finish listening and re-trap tracee into STOP. */ if (unlikely(!seized || !lock_task_sighand(child, &flags))) break; si = child->last_siginfo; if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { child->jobctl |= JOBCTL_LISTENING; /* * If NOTIFY is set, it means event happened between * start of this trap and now. Trigger re-trap. */ if (child->jobctl & JOBCTL_TRAP_NOTIFY) ptrace_signal_wake_up(child, true); ret = 0; } unlock_task_sighand(child, &flags); break; case PTRACE_DETACH: /* detach a process that was attached. */ ret = ptrace_detach(child, data); break; #ifdef CONFIG_BINFMT_ELF_FDPIC case PTRACE_GETFDPIC: { struct mm_struct *mm = get_task_mm(child); unsigned long tmp = 0; ret = -ESRCH; if (!mm) break; switch (addr) { case PTRACE_GETFDPIC_EXEC: tmp = mm->context.exec_fdpic_loadmap; break; case PTRACE_GETFDPIC_INTERP: tmp = mm->context.interp_fdpic_loadmap; break; default: break; } mmput(mm); ret = put_user(tmp, datalp); break; } #endif #ifdef PTRACE_SINGLESTEP case PTRACE_SINGLESTEP: #endif #ifdef PTRACE_SINGLEBLOCK case PTRACE_SINGLEBLOCK: #endif #ifdef PTRACE_SYSEMU case PTRACE_SYSEMU: case PTRACE_SYSEMU_SINGLESTEP: #endif case PTRACE_SYSCALL: case PTRACE_CONT: return ptrace_resume(child, request, data); case PTRACE_KILL: if (child->exit_state) /* already dead */ return 0; return ptrace_resume(child, request, SIGKILL); #ifdef CONFIG_HAVE_ARCH_TRACEHOOK case PTRACE_GETREGSET: case PTRACE_SETREGSET: { struct iovec kiov; struct iovec __user *uiov = datavp; if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) return -EFAULT; if (__get_user(kiov.iov_base, &uiov->iov_base) || __get_user(kiov.iov_len, &uiov->iov_len)) return -EFAULT; ret = ptrace_regset(child, request, addr, &kiov); if (!ret) ret = __put_user(kiov.iov_len, &uiov->iov_len); break; } #endif default: break; } return ret; } static struct task_struct *ptrace_get_task_struct(pid_t pid) { struct task_struct *child; rcu_read_lock(); child = find_task_by_vpid(pid); if (child) get_task_struct(child); rcu_read_unlock(); if (!child) return ERR_PTR(-ESRCH); return child; } #ifndef arch_ptrace_attach #define arch_ptrace_attach(child) do { } while (0) #endif SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, unsigned long, data) { struct task_struct *child; long ret; if (request == PTRACE_TRACEME) { ret = ptrace_traceme(); if (!ret) arch_ptrace_attach(current); goto out; } child = ptrace_get_task_struct(pid); if (IS_ERR(child)) { ret = PTR_ERR(child); goto out; } if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { ret = ptrace_attach(child, request, addr, data); /* * Some architectures need to do book-keeping after * a ptrace attach. */ if (!ret) arch_ptrace_attach(child); goto out_put_task_struct; } ret = ptrace_check_attach(child, request == PTRACE_KILL || request == PTRACE_INTERRUPT); if (ret < 0) goto out_put_task_struct; ret = arch_ptrace(child, request, addr, data); if (ret || request != PTRACE_DETACH) ptrace_unfreeze_traced(child); out_put_task_struct: put_task_struct(child); out: return ret; } int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, unsigned long data) { unsigned long tmp; int copied; copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); if (copied != sizeof(tmp)) return -EIO; return put_user(tmp, (unsigned long __user *)data); } int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, unsigned long data) { int copied; copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); return (copied == sizeof(data)) ? 0 : -EIO; } #if defined CONFIG_COMPAT #include <linux/compat.h> int compat_ptrace_request(struct task_struct *child, compat_long_t request, compat_ulong_t addr, compat_ulong_t data) { compat_ulong_t __user *datap = compat_ptr(data); compat_ulong_t word; siginfo_t siginfo; int ret; switch (request) { case PTRACE_PEEKTEXT: case PTRACE_PEEKDATA: ret = access_process_vm(child, addr, &word, sizeof(word), 0); if (ret != sizeof(word)) ret = -EIO; else ret = put_user(word, datap); break; case PTRACE_POKETEXT: case PTRACE_POKEDATA: ret = access_process_vm(child, addr, &data, sizeof(data), 1); ret = (ret != sizeof(data) ? -EIO : 0); break; case PTRACE_GETEVENTMSG: ret = put_user((compat_ulong_t) child->ptrace_message, datap); break; case PTRACE_GETSIGINFO: ret = ptrace_getsiginfo(child, &siginfo); if (!ret) ret = copy_siginfo_to_user32( (struct compat_siginfo __user *) datap, &siginfo); break; case PTRACE_SETSIGINFO: memset(&siginfo, 0, sizeof siginfo); if (copy_siginfo_from_user32( &siginfo, (struct compat_siginfo __user *) datap)) ret = -EFAULT; else ret = ptrace_setsiginfo(child, &siginfo); break; #ifdef CONFIG_HAVE_ARCH_TRACEHOOK case PTRACE_GETREGSET: case PTRACE_SETREGSET: { struct iovec kiov; struct compat_iovec __user *uiov = (struct compat_iovec __user *) datap; compat_uptr_t ptr; compat_size_t len; if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) return -EFAULT; if (__get_user(ptr, &uiov->iov_base) || __get_user(len, &uiov->iov_len)) return -EFAULT; kiov.iov_base = compat_ptr(ptr); kiov.iov_len = len; ret = ptrace_regset(child, request, addr, &kiov); if (!ret) ret = __put_user(kiov.iov_len, &uiov->iov_len); break; } #endif default: ret = ptrace_request(child, request, addr, data); } return ret; } asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, compat_long_t addr, compat_long_t data) { struct task_struct *child; long ret; if (request == PTRACE_TRACEME) { ret = ptrace_traceme(); goto out; } child = ptrace_get_task_struct(pid); if (IS_ERR(child)) { ret = PTR_ERR(child); goto out; } if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { ret = ptrace_attach(child, request, addr, data); /* * Some architectures need to do book-keeping after * a ptrace attach. */ if (!ret) arch_ptrace_attach(child); goto out_put_task_struct; } ret = ptrace_check_attach(child, request == PTRACE_KILL || request == PTRACE_INTERRUPT); if (!ret) { ret = compat_arch_ptrace(child, request, addr, data); if (ret || request != PTRACE_DETACH) ptrace_unfreeze_traced(child); } out_put_task_struct: put_task_struct(child); out: return ret; } #endif /* CONFIG_COMPAT */ #ifdef CONFIG_HAVE_HW_BREAKPOINT int ptrace_get_breakpoints(struct task_struct *tsk) { if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt)) return 0; return -1; } void ptrace_put_breakpoints(struct task_struct *tsk) { if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt)) flush_ptrace_hw_breakpoint(tsk); } #endif /* CONFIG_HAVE_HW_BREAKPOINT */
./CrossVul/dataset_final_sorted/CWE-362/c/good_5572_1
crossvul-cpp_data_bad_2872_0
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * PACKET - implements raw packet sockets. * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Alan Cox, <gw4pts@gw4pts.ampr.org> * * Fixes: * Alan Cox : verify_area() now used correctly * Alan Cox : new skbuff lists, look ma no backlogs! * Alan Cox : tidied skbuff lists. * Alan Cox : Now uses generic datagram routines I * added. Also fixed the peek/read crash * from all old Linux datagram code. * Alan Cox : Uses the improved datagram code. * Alan Cox : Added NULL's for socket options. * Alan Cox : Re-commented the code. * Alan Cox : Use new kernel side addressing * Rob Janssen : Correct MTU usage. * Dave Platt : Counter leaks caused by incorrect * interrupt locking and some slightly * dubious gcc output. Can you read * compiler: it said _VOLATILE_ * Richard Kooijman : Timestamp fixes. * Alan Cox : New buffers. Use sk->mac.raw. * Alan Cox : sendmsg/recvmsg support. * Alan Cox : Protocol setting support * Alexey Kuznetsov : Untied from IPv4 stack. * Cyrus Durgin : Fixed kerneld for kmod. * Michal Ostrowski : Module initialization cleanup. * Ulises Alonso : Frame number limit removal and * packet_set_ring memory leak. * Eric Biederman : Allow for > 8 byte hardware addresses. * The convention is that longer addresses * will simply extend the hardware address * byte arrays at the end of sockaddr_ll * and packet_mreq. * Johann Baudy : Added TX RING. * Chetan Loke : Implemented TPACKET_V3 block abstraction * layer. * Copyright (C) 2011, <lokec@ccs.neu.edu> * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/types.h> #include <linux/mm.h> #include <linux/capability.h> #include <linux/fcntl.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_packet.h> #include <linux/wireless.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <net/net_namespace.h> #include <net/ip.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/uaccess.h> #include <asm/ioctls.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <asm/io.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/poll.h> #include <linux/module.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/if_vlan.h> #include <linux/virtio_net.h> #include <linux/errqueue.h> #include <linux/net_tstamp.h> #include <linux/percpu.h> #ifdef CONFIG_INET #include <net/inet_common.h> #endif #include <linux/bpf.h> #include <net/compat.h> #include "internal.h" /* Assumptions: - if device has no dev->hard_header routine, it adds and removes ll header inside itself. In this case ll header is invisible outside of device, but higher levels still should reserve dev->hard_header_len. Some devices are enough clever to reallocate skb, when header will not fit to reserved space (tunnel), another ones are silly (PPP). - packet socket receives packets with pulled ll header, so that SOCK_RAW should push it back. On receive: ----------- Incoming, dev->hard_header!=NULL mac_header -> ll header data -> data Outgoing, dev->hard_header!=NULL mac_header -> ll header data -> ll header Incoming, dev->hard_header==NULL mac_header -> UNKNOWN position. It is very likely, that it points to ll header. PPP makes it, that is wrong, because introduce assymetry between rx and tx paths. data -> data Outgoing, dev->hard_header==NULL mac_header -> data. ll header is still not built! data -> data Resume If dev->hard_header==NULL we are unlikely to restore sensible ll header. On transmit: ------------ dev->hard_header != NULL mac_header -> ll header data -> ll header dev->hard_header == NULL (ll header is added by device, we cannot control it) mac_header -> data data -> data We should set nh.raw on output to correct posistion, packet classifier depends on it. */ /* Private packet socket structures. */ /* identical to struct packet_mreq except it has * a longer address field. */ struct packet_mreq_max { int mr_ifindex; unsigned short mr_type; unsigned short mr_alen; unsigned char mr_address[MAX_ADDR_LEN]; }; union tpacket_uhdr { struct tpacket_hdr *h1; struct tpacket2_hdr *h2; struct tpacket3_hdr *h3; void *raw; }; static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, int closing, int tx_ring); #define V3_ALIGNMENT (8) #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) #define BLK_PLUS_PRIV(sz_of_priv) \ (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x))) struct packet_sock; static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); static void *packet_previous_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status); static void packet_increment_head(struct packet_ring_buffer *buff); static int prb_curr_blk_in_use(struct tpacket_block_desc *); static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, struct packet_sock *); static void prb_retire_current_block(struct tpacket_kbdq_core *, struct packet_sock *, unsigned int status); static int prb_queue_frozen(struct tpacket_kbdq_core *); static void prb_open_block(struct tpacket_kbdq_core *, struct tpacket_block_desc *); static void prb_retire_rx_blk_timer_expired(unsigned long); static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); static void prb_init_blk_timer(struct packet_sock *, struct tpacket_kbdq_core *, void (*func) (unsigned long)); static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); static void prb_clear_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); static void prb_fill_vlan_info(struct tpacket_kbdq_core *, struct tpacket3_hdr *); static void packet_flush_mclist(struct sock *sk); static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb); struct packet_skb_cb { union { struct sockaddr_pkt pkt; union { /* Trick: alias skb original length with * ll.sll_family and ll.protocol in order * to save room. */ unsigned int origlen; struct sockaddr_ll ll; }; } sa; }; #define vio_le() virtio_legacy_is_little_endian() #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) #define GET_PBLOCK_DESC(x, bid) \ ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) #define GET_NEXT_PRB_BLK_NUM(x) \ (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ ((x)->kactive_blk_num+1) : 0) static void __fanout_unlink(struct sock *sk, struct packet_sock *po); static void __fanout_link(struct sock *sk, struct packet_sock *po); static int packet_direct_xmit(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct sk_buff *orig_skb = skb; struct netdev_queue *txq; int ret = NETDEV_TX_BUSY; if (unlikely(!netif_running(dev) || !netif_carrier_ok(dev))) goto drop; skb = validate_xmit_skb_list(skb, dev); if (skb != orig_skb) goto drop; packet_pick_tx_queue(dev, skb); txq = skb_get_tx_queue(dev, skb); local_bh_disable(); HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_drv_stopped(txq)) ret = netdev_start_xmit(skb, dev, txq, false); HARD_TX_UNLOCK(dev, txq); local_bh_enable(); if (!dev_xmit_complete(ret)) kfree_skb(skb); return ret; drop: atomic_long_inc(&dev->tx_dropped); kfree_skb_list(skb); return NET_XMIT_DROP; } static struct net_device *packet_cached_dev_get(struct packet_sock *po) { struct net_device *dev; rcu_read_lock(); dev = rcu_dereference(po->cached_dev); if (likely(dev)) dev_hold(dev); rcu_read_unlock(); return dev; } static void packet_cached_dev_assign(struct packet_sock *po, struct net_device *dev) { rcu_assign_pointer(po->cached_dev, dev); } static void packet_cached_dev_reset(struct packet_sock *po) { RCU_INIT_POINTER(po->cached_dev, NULL); } static bool packet_use_direct_xmit(const struct packet_sock *po) { return po->xmit == packet_direct_xmit; } static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) { return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; } static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) { const struct net_device_ops *ops = dev->netdev_ops; u16 queue_index; if (ops->ndo_select_queue) { queue_index = ops->ndo_select_queue(dev, skb, NULL, __packet_pick_tx_queue); queue_index = netdev_cap_txqueue(dev, queue_index); } else { queue_index = __packet_pick_tx_queue(dev, skb); } skb_set_queue_mapping(skb, queue_index); } /* register_prot_hook must be invoked with the po->bind_lock held, * or from a context in which asynchronous accesses to the packet * socket is not possible (packet_create()). */ static void register_prot_hook(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); if (!po->running) { if (po->fanout) __fanout_link(sk, po); else dev_add_pack(&po->prot_hook); sock_hold(sk); po->running = 1; } } /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock * held. If the sync parameter is true, we will temporarily drop * the po->bind_lock and do a synchronize_net to make sure no * asynchronous packet processing paths still refer to the elements * of po->prot_hook. If the sync parameter is false, it is the * callers responsibility to take care of this. */ static void __unregister_prot_hook(struct sock *sk, bool sync) { struct packet_sock *po = pkt_sk(sk); po->running = 0; if (po->fanout) __fanout_unlink(sk, po); else __dev_remove_pack(&po->prot_hook); __sock_put(sk); if (sync) { spin_unlock(&po->bind_lock); synchronize_net(); spin_lock(&po->bind_lock); } } static void unregister_prot_hook(struct sock *sk, bool sync) { struct packet_sock *po = pkt_sk(sk); if (po->running) __unregister_prot_hook(sk, sync); } static inline struct page * __pure pgv_to_page(void *addr) { if (is_vmalloc_addr(addr)) return vmalloc_to_page(addr); return virt_to_page(addr); } static void __packet_set_status(struct packet_sock *po, void *frame, int status) { union tpacket_uhdr h; h.raw = frame; switch (po->tp_version) { case TPACKET_V1: h.h1->tp_status = status; flush_dcache_page(pgv_to_page(&h.h1->tp_status)); break; case TPACKET_V2: h.h2->tp_status = status; flush_dcache_page(pgv_to_page(&h.h2->tp_status)); break; case TPACKET_V3: h.h3->tp_status = status; flush_dcache_page(pgv_to_page(&h.h3->tp_status)); break; default: WARN(1, "TPACKET version not supported.\n"); BUG(); } smp_wmb(); } static int __packet_get_status(struct packet_sock *po, void *frame) { union tpacket_uhdr h; smp_rmb(); h.raw = frame; switch (po->tp_version) { case TPACKET_V1: flush_dcache_page(pgv_to_page(&h.h1->tp_status)); return h.h1->tp_status; case TPACKET_V2: flush_dcache_page(pgv_to_page(&h.h2->tp_status)); return h.h2->tp_status; case TPACKET_V3: flush_dcache_page(pgv_to_page(&h.h3->tp_status)); return h.h3->tp_status; default: WARN(1, "TPACKET version not supported.\n"); BUG(); return 0; } } static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts, unsigned int flags) { struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); if (shhwtstamps && (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts)) return TP_STATUS_TS_RAW_HARDWARE; if (ktime_to_timespec_cond(skb->tstamp, ts)) return TP_STATUS_TS_SOFTWARE; return 0; } static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, struct sk_buff *skb) { union tpacket_uhdr h; struct timespec ts; __u32 ts_status; if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) return 0; h.raw = frame; switch (po->tp_version) { case TPACKET_V1: h.h1->tp_sec = ts.tv_sec; h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; break; case TPACKET_V2: h.h2->tp_sec = ts.tv_sec; h.h2->tp_nsec = ts.tv_nsec; break; case TPACKET_V3: h.h3->tp_sec = ts.tv_sec; h.h3->tp_nsec = ts.tv_nsec; break; default: WARN(1, "TPACKET version not supported.\n"); BUG(); } /* one flush is safe, as both fields always lie on the same cacheline */ flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); smp_wmb(); return ts_status; } static void *packet_lookup_frame(struct packet_sock *po, struct packet_ring_buffer *rb, unsigned int position, int status) { unsigned int pg_vec_pos, frame_offset; union tpacket_uhdr h; pg_vec_pos = position / rb->frames_per_block; frame_offset = position % rb->frames_per_block; h.raw = rb->pg_vec[pg_vec_pos].buffer + (frame_offset * rb->frame_size); if (status != __packet_get_status(po, h.raw)) return NULL; return h.raw; } static void *packet_current_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { return packet_lookup_frame(po, rb, rb->head, status); } static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) { del_timer_sync(&pkc->retire_blk_timer); } static void prb_shutdown_retire_blk_timer(struct packet_sock *po, struct sk_buff_head *rb_queue) { struct tpacket_kbdq_core *pkc; pkc = GET_PBDQC_FROM_RB(&po->rx_ring); spin_lock_bh(&rb_queue->lock); pkc->delete_blk_timer = 1; spin_unlock_bh(&rb_queue->lock); prb_del_retire_blk_timer(pkc); } static void prb_init_blk_timer(struct packet_sock *po, struct tpacket_kbdq_core *pkc, void (*func) (unsigned long)) { init_timer(&pkc->retire_blk_timer); pkc->retire_blk_timer.data = (long)po; pkc->retire_blk_timer.function = func; pkc->retire_blk_timer.expires = jiffies; } static void prb_setup_retire_blk_timer(struct packet_sock *po) { struct tpacket_kbdq_core *pkc; pkc = GET_PBDQC_FROM_RB(&po->rx_ring); prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired); } static int prb_calc_retire_blk_tmo(struct packet_sock *po, int blk_size_in_bytes) { struct net_device *dev; unsigned int mbits = 0, msec = 0, div = 0, tmo = 0; struct ethtool_link_ksettings ecmd; int err; rtnl_lock(); dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); if (unlikely(!dev)) { rtnl_unlock(); return DEFAULT_PRB_RETIRE_TOV; } err = __ethtool_get_link_ksettings(dev, &ecmd); rtnl_unlock(); if (!err) { /* * If the link speed is so slow you don't really * need to worry about perf anyways */ if (ecmd.base.speed < SPEED_1000 || ecmd.base.speed == SPEED_UNKNOWN) { return DEFAULT_PRB_RETIRE_TOV; } else { msec = 1; div = ecmd.base.speed / 1000; } } mbits = (blk_size_in_bytes * 8) / (1024 * 1024); if (div) mbits /= div; tmo = mbits * msec; if (div) return tmo+1; return tmo; } static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, union tpacket_req_u *req_u) { p1->feature_req_word = req_u->req3.tp_feature_req_word; } static void init_prb_bdqc(struct packet_sock *po, struct packet_ring_buffer *rb, struct pgv *pg_vec, union tpacket_req_u *req_u) { struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); struct tpacket_block_desc *pbd; memset(p1, 0x0, sizeof(*p1)); p1->knxt_seq_num = 1; p1->pkbdq = pg_vec; pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; p1->pkblk_start = pg_vec[0].buffer; p1->kblk_size = req_u->req3.tp_block_size; p1->knum_blocks = req_u->req3.tp_block_nr; p1->hdrlen = po->tp_hdrlen; p1->version = po->tp_version; p1->last_kactive_blk_num = 0; po->stats.stats3.tp_freeze_q_cnt = 0; if (req_u->req3.tp_retire_blk_tov) p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; else p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, req_u->req3.tp_block_size); p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); prb_init_ft_ops(p1, req_u); prb_setup_retire_blk_timer(po); prb_open_block(p1, pbd); } /* Do NOT update the last_blk_num first. * Assumes sk_buff_head lock is held. */ static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) { mod_timer(&pkc->retire_blk_timer, jiffies + pkc->tov_in_jiffies); pkc->last_kactive_blk_num = pkc->kactive_blk_num; } /* * Timer logic: * 1) We refresh the timer only when we open a block. * By doing this we don't waste cycles refreshing the timer * on packet-by-packet basis. * * With a 1MB block-size, on a 1Gbps line, it will take * i) ~8 ms to fill a block + ii) memcpy etc. * In this cut we are not accounting for the memcpy time. * * So, if the user sets the 'tmo' to 10ms then the timer * will never fire while the block is still getting filled * (which is what we want). However, the user could choose * to close a block early and that's fine. * * But when the timer does fire, we check whether or not to refresh it. * Since the tmo granularity is in msecs, it is not too expensive * to refresh the timer, lets say every '8' msecs. * Either the user can set the 'tmo' or we can derive it based on * a) line-speed and b) block-size. * prb_calc_retire_blk_tmo() calculates the tmo. * */ static void prb_retire_rx_blk_timer_expired(unsigned long data) { struct packet_sock *po = (struct packet_sock *)data; struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); unsigned int frozen; struct tpacket_block_desc *pbd; spin_lock(&po->sk.sk_receive_queue.lock); frozen = prb_queue_frozen(pkc); pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); if (unlikely(pkc->delete_blk_timer)) goto out; /* We only need to plug the race when the block is partially filled. * tpacket_rcv: * lock(); increment BLOCK_NUM_PKTS; unlock() * copy_bits() is in progress ... * timer fires on other cpu: * we can't retire the current block because copy_bits * is in progress. * */ if (BLOCK_NUM_PKTS(pbd)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ cpu_relax(); } } if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { if (!frozen) { if (!BLOCK_NUM_PKTS(pbd)) { /* An empty block. Just refresh the timer. */ goto refresh_timer; } prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); if (!prb_dispatch_next_block(pkc, po)) goto refresh_timer; else goto out; } else { /* Case 1. Queue was frozen because user-space was * lagging behind. */ if (prb_curr_blk_in_use(pbd)) { /* * Ok, user-space is still behind. * So just refresh the timer. */ goto refresh_timer; } else { /* Case 2. queue was frozen,user-space caught up, * now the link went idle && the timer fired. * We don't have a block to close.So we open this * block and restart the timer. * opening a block thaws the queue,restarts timer * Thawing/timer-refresh is a side effect. */ prb_open_block(pkc, pbd); goto out; } } } refresh_timer: _prb_refresh_rx_retire_blk_timer(pkc); out: spin_unlock(&po->sk.sk_receive_queue.lock); } static void prb_flush_block(struct tpacket_kbdq_core *pkc1, struct tpacket_block_desc *pbd1, __u32 status) { /* Flush everything minus the block header */ #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 u8 *start, *end; start = (u8 *)pbd1; /* Skip the block header(we know header WILL fit in 4K) */ start += PAGE_SIZE; end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); for (; start < end; start += PAGE_SIZE) flush_dcache_page(pgv_to_page(start)); smp_wmb(); #endif /* Now update the block status. */ BLOCK_STATUS(pbd1) = status; /* Flush the block header */ #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 start = (u8 *)pbd1; flush_dcache_page(pgv_to_page(start)); smp_wmb(); #endif } /* * Side effect: * * 1) flush the block * 2) Increment active_blk_num * * Note:We DONT refresh the timer on purpose. * Because almost always the next block will be opened. */ static void prb_close_block(struct tpacket_kbdq_core *pkc1, struct tpacket_block_desc *pbd1, struct packet_sock *po, unsigned int stat) { __u32 status = TP_STATUS_USER | stat; struct tpacket3_hdr *last_pkt; struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; struct sock *sk = &po->sk; if (po->stats.stats3.tp_drops) status |= TP_STATUS_LOSING; last_pkt = (struct tpacket3_hdr *)pkc1->prev; last_pkt->tp_next_offset = 0; /* Get the ts of the last pkt */ if (BLOCK_NUM_PKTS(pbd1)) { h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; } else { /* Ok, we tmo'd - so get the current time. * * It shouldn't really happen as we don't close empty * blocks. See prb_retire_rx_blk_timer_expired(). */ struct timespec ts; getnstimeofday(&ts); h1->ts_last_pkt.ts_sec = ts.tv_sec; h1->ts_last_pkt.ts_nsec = ts.tv_nsec; } smp_wmb(); /* Flush the block */ prb_flush_block(pkc1, pbd1, status); sk->sk_data_ready(sk); pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); } static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) { pkc->reset_pending_on_curr_blk = 0; } /* * Side effect of opening a block: * * 1) prb_queue is thawed. * 2) retire_blk_timer is refreshed. * */ static void prb_open_block(struct tpacket_kbdq_core *pkc1, struct tpacket_block_desc *pbd1) { struct timespec ts; struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; smp_rmb(); /* We could have just memset this but we will lose the * flexibility of making the priv area sticky */ BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; BLOCK_NUM_PKTS(pbd1) = 0; BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); getnstimeofday(&ts); h1->ts_first_pkt.ts_sec = ts.tv_sec; h1->ts_first_pkt.ts_nsec = ts.tv_nsec; pkc1->pkblk_start = (char *)pbd1; pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; pbd1->version = pkc1->version; pkc1->prev = pkc1->nxt_offset; pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; prb_thaw_queue(pkc1); _prb_refresh_rx_retire_blk_timer(pkc1); smp_wmb(); } /* * Queue freeze logic: * 1) Assume tp_block_nr = 8 blocks. * 2) At time 't0', user opens Rx ring. * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 * 4) user-space is either sleeping or processing block '0'. * 5) tpacket_rcv is currently filling block '7', since there is no space left, * it will close block-7,loop around and try to fill block '0'. * call-flow: * __packet_lookup_frame_in_block * prb_retire_current_block() * prb_dispatch_next_block() * |->(BLOCK_STATUS == USER) evaluates to true * 5.1) Since block-0 is currently in-use, we just freeze the queue. * 6) Now there are two cases: * 6.1) Link goes idle right after the queue is frozen. * But remember, the last open_block() refreshed the timer. * When this timer expires,it will refresh itself so that we can * re-open block-0 in near future. * 6.2) Link is busy and keeps on receiving packets. This is a simple * case and __packet_lookup_frame_in_block will check if block-0 * is free and can now be re-used. */ static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, struct packet_sock *po) { pkc->reset_pending_on_curr_blk = 1; po->stats.stats3.tp_freeze_q_cnt++; } #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) /* * If the next block is free then we will dispatch it * and return a good offset. * Else, we will freeze the queue. * So, caller must check the return value. */ static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, struct packet_sock *po) { struct tpacket_block_desc *pbd; smp_rmb(); /* 1. Get current block num */ pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); /* 2. If this block is currently in_use then freeze the queue */ if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { prb_freeze_queue(pkc, po); return NULL; } /* * 3. * open this block and return the offset where the first packet * needs to get stored. */ prb_open_block(pkc, pbd); return (void *)pkc->nxt_offset; } static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, struct packet_sock *po, unsigned int status) { struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); /* retire/close the current block */ if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { /* * Plug the case where copy_bits() is in progress on * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't * have space to copy the pkt in the current block and * called prb_retire_current_block() * * We don't need to worry about the TMO case because * the timer-handler already handled this case. */ if (!(status & TP_STATUS_BLK_TMO)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ cpu_relax(); } } prb_close_block(pkc, pbd, po, status); return; } } static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd) { return TP_STATUS_USER & BLOCK_STATUS(pbd); } static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) { return pkc->reset_pending_on_curr_blk; } static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) { struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); atomic_dec(&pkc->blk_fill_in_prog); } static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); } static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { ppd->hv1.tp_rxhash = 0; } static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { if (skb_vlan_tag_present(pkc->skb)) { ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { ppd->hv1.tp_vlan_tci = 0; ppd->hv1.tp_vlan_tpid = 0; ppd->tp_status = TP_STATUS_AVAILABLE; } } static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { ppd->hv1.tp_padding = 0; prb_fill_vlan_info(pkc, ppd); if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) prb_fill_rxhash(pkc, ppd); else prb_clear_rxhash(pkc, ppd); } static void prb_fill_curr_block(char *curr, struct tpacket_kbdq_core *pkc, struct tpacket_block_desc *pbd, unsigned int len) { struct tpacket3_hdr *ppd; ppd = (struct tpacket3_hdr *)curr; ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); pkc->prev = curr; pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); BLOCK_NUM_PKTS(pbd) += 1; atomic_inc(&pkc->blk_fill_in_prog); prb_run_all_ft_ops(pkc, ppd); } /* Assumes caller has the sk->rx_queue.lock */ static void *__packet_lookup_frame_in_block(struct packet_sock *po, struct sk_buff *skb, int status, unsigned int len ) { struct tpacket_kbdq_core *pkc; struct tpacket_block_desc *pbd; char *curr, *end; pkc = GET_PBDQC_FROM_RB(&po->rx_ring); pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); /* Queue is frozen when user space is lagging behind */ if (prb_queue_frozen(pkc)) { /* * Check if that last block which caused the queue to freeze, * is still in_use by user-space. */ if (prb_curr_blk_in_use(pbd)) { /* Can't record this packet */ return NULL; } else { /* * Ok, the block was released by user-space. * Now let's open that block. * opening a block also thaws the queue. * Thawing is a side effect. */ prb_open_block(pkc, pbd); } } smp_mb(); curr = pkc->nxt_offset; pkc->skb = skb; end = (char *)pbd + pkc->kblk_size; /* first try the current block */ if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { prb_fill_curr_block(curr, pkc, pbd, len); return (void *)curr; } /* Ok, close the current block */ prb_retire_current_block(pkc, po, 0); /* Now, try to dispatch the next block */ curr = (char *)prb_dispatch_next_block(pkc, po); if (curr) { pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); prb_fill_curr_block(curr, pkc, pbd, len); return (void *)curr; } /* * No free blocks are available.user_space hasn't caught up yet. * Queue was just frozen and now this packet will get dropped. */ return NULL; } static void *packet_current_rx_frame(struct packet_sock *po, struct sk_buff *skb, int status, unsigned int len) { char *curr = NULL; switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: curr = packet_lookup_frame(po, &po->rx_ring, po->rx_ring.head, status); return curr; case TPACKET_V3: return __packet_lookup_frame_in_block(po, skb, status, len); default: WARN(1, "TPACKET version not supported\n"); BUG(); return NULL; } } static void *prb_lookup_block(struct packet_sock *po, struct packet_ring_buffer *rb, unsigned int idx, int status) { struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); if (status != BLOCK_STATUS(pbd)) return NULL; return pbd; } static int prb_previous_blk_num(struct packet_ring_buffer *rb) { unsigned int prev; if (rb->prb_bdqc.kactive_blk_num) prev = rb->prb_bdqc.kactive_blk_num-1; else prev = rb->prb_bdqc.knum_blocks-1; return prev; } /* Assumes caller has held the rx_queue.lock */ static void *__prb_previous_block(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { unsigned int previous = prb_previous_blk_num(rb); return prb_lookup_block(po, rb, previous, status); } static void *packet_previous_rx_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { if (po->tp_version <= TPACKET_V2) return packet_previous_frame(po, rb, status); return __prb_previous_block(po, rb, status); } static void packet_increment_rx_head(struct packet_sock *po, struct packet_ring_buffer *rb) { switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: return packet_increment_head(rb); case TPACKET_V3: default: WARN(1, "TPACKET version not supported.\n"); BUG(); return; } } static void *packet_previous_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; return packet_lookup_frame(po, rb, previous, status); } static void packet_increment_head(struct packet_ring_buffer *buff) { buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; } static void packet_inc_pending(struct packet_ring_buffer *rb) { this_cpu_inc(*rb->pending_refcnt); } static void packet_dec_pending(struct packet_ring_buffer *rb) { this_cpu_dec(*rb->pending_refcnt); } static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) { unsigned int refcnt = 0; int cpu; /* We don't use pending refcount in rx_ring. */ if (rb->pending_refcnt == NULL) return 0; for_each_possible_cpu(cpu) refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); return refcnt; } static int packet_alloc_pending(struct packet_sock *po) { po->rx_ring.pending_refcnt = NULL; po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); if (unlikely(po->tx_ring.pending_refcnt == NULL)) return -ENOBUFS; return 0; } static void packet_free_pending(struct packet_sock *po) { free_percpu(po->tx_ring.pending_refcnt); } #define ROOM_POW_OFF 2 #define ROOM_NONE 0x0 #define ROOM_LOW 0x1 #define ROOM_NORMAL 0x2 static bool __tpacket_has_room(struct packet_sock *po, int pow_off) { int idx, len; len = po->rx_ring.frame_max + 1; idx = po->rx_ring.head; if (pow_off) idx += len >> pow_off; if (idx >= len) idx -= len; return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); } static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off) { int idx, len; len = po->rx_ring.prb_bdqc.knum_blocks; idx = po->rx_ring.prb_bdqc.kactive_blk_num; if (pow_off) idx += len >> pow_off; if (idx >= len) idx -= len; return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); } static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) { struct sock *sk = &po->sk; int ret = ROOM_NONE; if (po->prot_hook.func != tpacket_rcv) { int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc) - (skb ? skb->truesize : 0); if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF)) return ROOM_NORMAL; else if (avail > 0) return ROOM_LOW; else return ROOM_NONE; } if (po->tp_version == TPACKET_V3) { if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) ret = ROOM_NORMAL; else if (__tpacket_v3_has_room(po, 0)) ret = ROOM_LOW; } else { if (__tpacket_has_room(po, ROOM_POW_OFF)) ret = ROOM_NORMAL; else if (__tpacket_has_room(po, 0)) ret = ROOM_LOW; } return ret; } static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) { int ret; bool has_room; spin_lock_bh(&po->sk.sk_receive_queue.lock); ret = __packet_rcv_has_room(po, skb); has_room = ret == ROOM_NORMAL; if (po->pressure == has_room) po->pressure = !has_room; spin_unlock_bh(&po->sk.sk_receive_queue.lock); return ret; } static void packet_sock_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_error_queue); WARN_ON(atomic_read(&sk->sk_rmem_alloc)); WARN_ON(refcount_read(&sk->sk_wmem_alloc)); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Attempt to release alive packet socket: %p\n", sk); return; } sk_refcnt_debug_dec(sk); } static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) { u32 rxhash; int i, count = 0; rxhash = skb_get_hash(skb); for (i = 0; i < ROLLOVER_HLEN; i++) if (po->rollover->history[i] == rxhash) count++; po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash; return count > (ROLLOVER_HLEN >> 1); } static unsigned int fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return reciprocal_scale(__skb_get_hash_symmetric(skb), num); } static unsigned int fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { unsigned int val = atomic_inc_return(&f->rr_cur); return val % num; } static unsigned int fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return smp_processor_id() % num; } static unsigned int fanout_demux_rnd(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return prandom_u32_max(num); } static unsigned int fanout_demux_rollover(struct packet_fanout *f, struct sk_buff *skb, unsigned int idx, bool try_self, unsigned int num) { struct packet_sock *po, *po_next, *po_skip = NULL; unsigned int i, j, room = ROOM_NONE; po = pkt_sk(f->arr[idx]); if (try_self) { room = packet_rcv_has_room(po, skb); if (room == ROOM_NORMAL || (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) return idx; po_skip = po; } i = j = min_t(int, po->rollover->sock, num - 1); do { po_next = pkt_sk(f->arr[i]); if (po_next != po_skip && !po_next->pressure && packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) { if (i != j) po->rollover->sock = i; atomic_long_inc(&po->rollover->num); if (room == ROOM_LOW) atomic_long_inc(&po->rollover->num_huge); return i; } if (++i == num) i = 0; } while (i != j); atomic_long_inc(&po->rollover->num_failed); return idx; } static unsigned int fanout_demux_qm(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return skb_get_queue_mapping(skb) % num; } static unsigned int fanout_demux_bpf(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { struct bpf_prog *prog; unsigned int ret = 0; rcu_read_lock(); prog = rcu_dereference(f->bpf_prog); if (prog) ret = bpf_prog_run_clear_cb(prog, skb) % num; rcu_read_unlock(); return ret; } static bool fanout_has_flag(struct packet_fanout *f, u16 flag) { return f->flags & (flag >> 8); } static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct packet_fanout *f = pt->af_packet_priv; unsigned int num = READ_ONCE(f->num_members); struct net *net = read_pnet(&f->net); struct packet_sock *po; unsigned int idx; if (!net_eq(dev_net(dev), net) || !num) { kfree_skb(skb); return 0; } if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET); if (!skb) return 0; } switch (f->type) { case PACKET_FANOUT_HASH: default: idx = fanout_demux_hash(f, skb, num); break; case PACKET_FANOUT_LB: idx = fanout_demux_lb(f, skb, num); break; case PACKET_FANOUT_CPU: idx = fanout_demux_cpu(f, skb, num); break; case PACKET_FANOUT_RND: idx = fanout_demux_rnd(f, skb, num); break; case PACKET_FANOUT_QM: idx = fanout_demux_qm(f, skb, num); break; case PACKET_FANOUT_ROLLOVER: idx = fanout_demux_rollover(f, skb, 0, false, num); break; case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: idx = fanout_demux_bpf(f, skb, num); break; } if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER)) idx = fanout_demux_rollover(f, skb, idx, true, num); po = pkt_sk(f->arr[idx]); return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); } DEFINE_MUTEX(fanout_mutex); EXPORT_SYMBOL_GPL(fanout_mutex); static LIST_HEAD(fanout_list); static u16 fanout_next_id; static void __fanout_link(struct sock *sk, struct packet_sock *po) { struct packet_fanout *f = po->fanout; spin_lock(&f->lock); f->arr[f->num_members] = sk; smp_wmb(); f->num_members++; if (f->num_members == 1) dev_add_pack(&f->prot_hook); spin_unlock(&f->lock); } static void __fanout_unlink(struct sock *sk, struct packet_sock *po) { struct packet_fanout *f = po->fanout; int i; spin_lock(&f->lock); for (i = 0; i < f->num_members; i++) { if (f->arr[i] == sk) break; } BUG_ON(i >= f->num_members); f->arr[i] = f->arr[f->num_members - 1]; f->num_members--; if (f->num_members == 0) __dev_remove_pack(&f->prot_hook); spin_unlock(&f->lock); } static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) { if (sk->sk_family != PF_PACKET) return false; return ptype->af_packet_priv == pkt_sk(sk)->fanout; } static void fanout_init_data(struct packet_fanout *f) { switch (f->type) { case PACKET_FANOUT_LB: atomic_set(&f->rr_cur, 0); break; case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: RCU_INIT_POINTER(f->bpf_prog, NULL); break; } } static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) { struct bpf_prog *old; spin_lock(&f->lock); old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); rcu_assign_pointer(f->bpf_prog, new); spin_unlock(&f->lock); if (old) { synchronize_net(); bpf_prog_destroy(old); } } static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data, unsigned int len) { struct bpf_prog *new; struct sock_fprog fprog; int ret; if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) return -EPERM; if (len != sizeof(fprog)) return -EINVAL; if (copy_from_user(&fprog, data, len)) return -EFAULT; ret = bpf_prog_create_from_user(&new, &fprog, NULL, false); if (ret) return ret; __fanout_set_data_bpf(po->fanout, new); return 0; } static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data, unsigned int len) { struct bpf_prog *new; u32 fd; if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) return -EPERM; if (len != sizeof(fd)) return -EINVAL; if (copy_from_user(&fd, data, len)) return -EFAULT; new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); if (IS_ERR(new)) return PTR_ERR(new); __fanout_set_data_bpf(po->fanout, new); return 0; } static int fanout_set_data(struct packet_sock *po, char __user *data, unsigned int len) { switch (po->fanout->type) { case PACKET_FANOUT_CBPF: return fanout_set_data_cbpf(po, data, len); case PACKET_FANOUT_EBPF: return fanout_set_data_ebpf(po, data, len); default: return -EINVAL; }; } static void fanout_release_data(struct packet_fanout *f) { switch (f->type) { case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: __fanout_set_data_bpf(f, NULL); }; } static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id) { struct packet_fanout *f; list_for_each_entry(f, &fanout_list, list) { if (f->id == candidate_id && read_pnet(&f->net) == sock_net(sk)) { return false; } } return true; } static bool fanout_find_new_id(struct sock *sk, u16 *new_id) { u16 id = fanout_next_id; do { if (__fanout_id_is_free(sk, id)) { *new_id = id; fanout_next_id = id + 1; return true; } id++; } while (id != fanout_next_id); return false; } static int fanout_add(struct sock *sk, u16 id, u16 type_flags) { struct packet_rollover *rollover = NULL; struct packet_sock *po = pkt_sk(sk); struct packet_fanout *f, *match; u8 type = type_flags & 0xff; u8 flags = type_flags >> 8; int err; switch (type) { case PACKET_FANOUT_ROLLOVER: if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) return -EINVAL; case PACKET_FANOUT_HASH: case PACKET_FANOUT_LB: case PACKET_FANOUT_CPU: case PACKET_FANOUT_RND: case PACKET_FANOUT_QM: case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: break; default: return -EINVAL; } mutex_lock(&fanout_mutex); err = -EALREADY; if (po->fanout) goto out; if (type == PACKET_FANOUT_ROLLOVER || (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { err = -ENOMEM; rollover = kzalloc(sizeof(*rollover), GFP_KERNEL); if (!rollover) goto out; atomic_long_set(&rollover->num, 0); atomic_long_set(&rollover->num_huge, 0); atomic_long_set(&rollover->num_failed, 0); po->rollover = rollover; } if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) { if (id != 0) { err = -EINVAL; goto out; } if (!fanout_find_new_id(sk, &id)) { err = -ENOMEM; goto out; } /* ephemeral flag for the first socket in the group: drop it */ flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8); } match = NULL; list_for_each_entry(f, &fanout_list, list) { if (f->id == id && read_pnet(&f->net) == sock_net(sk)) { match = f; break; } } err = -EINVAL; if (match && match->flags != flags) goto out; if (!match) { err = -ENOMEM; match = kzalloc(sizeof(*match), GFP_KERNEL); if (!match) goto out; write_pnet(&match->net, sock_net(sk)); match->id = id; match->type = type; match->flags = flags; INIT_LIST_HEAD(&match->list); spin_lock_init(&match->lock); refcount_set(&match->sk_ref, 0); fanout_init_data(match); match->prot_hook.type = po->prot_hook.type; match->prot_hook.dev = po->prot_hook.dev; match->prot_hook.func = packet_rcv_fanout; match->prot_hook.af_packet_priv = match; match->prot_hook.id_match = match_fanout_group; list_add(&match->list, &fanout_list); } err = -EINVAL; spin_lock(&po->bind_lock); if (po->running && match->type == type && match->prot_hook.type == po->prot_hook.type && match->prot_hook.dev == po->prot_hook.dev) { err = -ENOSPC; if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) { __dev_remove_pack(&po->prot_hook); po->fanout = match; refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); __fanout_link(sk, po); err = 0; } } spin_unlock(&po->bind_lock); if (err && !refcount_read(&match->sk_ref)) { list_del(&match->list); kfree(match); } out: if (err && rollover) { kfree(rollover); po->rollover = NULL; } mutex_unlock(&fanout_mutex); return err; } /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout. * It is the responsibility of the caller to call fanout_release_data() and * free the returned packet_fanout (after synchronize_net()) */ static struct packet_fanout *fanout_release(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); struct packet_fanout *f; mutex_lock(&fanout_mutex); f = po->fanout; if (f) { po->fanout = NULL; if (refcount_dec_and_test(&f->sk_ref)) list_del(&f->list); else f = NULL; if (po->rollover) kfree_rcu(po->rollover, rcu); } mutex_unlock(&fanout_mutex); return f; } static bool packet_extra_vlan_len_allowed(const struct net_device *dev, struct sk_buff *skb) { /* Earlier code assumed this would be a VLAN pkt, double-check * this now that we have the actual packet in hand. We can only * do this check on Ethernet devices. */ if (unlikely(dev->type != ARPHRD_ETHER)) return false; skb_reset_mac_header(skb); return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)); } static const struct proto_ops packet_ops; static const struct proto_ops packet_ops_spkt; static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct sockaddr_pkt *spkt; /* * When we registered the protocol we saved the socket in the data * field for just this event. */ sk = pt->af_packet_priv; /* * Yank back the headers [hope the device set this * right or kerboom...] * * Incoming packets have ll header pulled, * push it back. * * For outgoing ones skb->data == skb_mac_header(skb) * so that this procedure is noop. */ if (skb->pkt_type == PACKET_LOOPBACK) goto out; if (!net_eq(dev_net(dev), sock_net(sk))) goto out; skb = skb_share_check(skb, GFP_ATOMIC); if (skb == NULL) goto oom; /* drop any routing info */ skb_dst_drop(skb); /* drop conntrack reference */ nf_reset(skb); spkt = &PACKET_SKB_CB(skb)->sa.pkt; skb_push(skb, skb->data - skb_mac_header(skb)); /* * The SOCK_PACKET socket receives _all_ frames. */ spkt->spkt_family = dev->type; strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); spkt->spkt_protocol = skb->protocol; /* * Charge the memory to the socket. This is done specifically * to prevent sockets using all the memory up. */ if (sock_queue_rcv_skb(sk, skb) == 0) return 0; out: kfree_skb(skb); oom: return 0; } /* * Output a raw packet to a device layer. This bypasses all the other * protocol layers and you must therefore supply it with a complete frame */ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); struct sk_buff *skb = NULL; struct net_device *dev; struct sockcm_cookie sockc; __be16 proto = 0; int err; int extra_len = 0; /* * Get and verify the address. */ if (saddr) { if (msg->msg_namelen < sizeof(struct sockaddr)) return -EINVAL; if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) proto = saddr->spkt_protocol; } else return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ /* * Find the device first to size check it */ saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; retry: rcu_read_lock(); dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); err = -ENODEV; if (dev == NULL) goto out_unlock; err = -ENETDOWN; if (!(dev->flags & IFF_UP)) goto out_unlock; /* * You may not queue a frame bigger than the mtu. This is the lowest level * raw protocol and you must do your own fragmentation at this level. */ if (unlikely(sock_flag(sk, SOCK_NOFCS))) { if (!netif_supports_nofcs(dev)) { err = -EPROTONOSUPPORT; goto out_unlock; } extra_len = 4; /* We're doing our own CRC */ } err = -EMSGSIZE; if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) goto out_unlock; if (!skb) { size_t reserved = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; rcu_read_unlock(); skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); if (skb == NULL) return -ENOBUFS; /* FIXME: Save some space for broken drivers that write a hard * header at transmission time by themselves. PPP is the notable * one here. This should really be fixed at the driver level. */ skb_reserve(skb, reserved); skb_reset_network_header(skb); /* Try to align data part correctly */ if (hhlen) { skb->data -= hhlen; skb->tail -= hhlen; if (len < hhlen) skb_reset_network_header(skb); } err = memcpy_from_msg(skb_put(skb, len), msg, len); if (err) goto out_free; goto retry; } if (!dev_validate_header(dev, skb->data, len)) { err = -EINVAL; goto out_unlock; } if (len > (dev->mtu + dev->hard_header_len + extra_len) && !packet_extra_vlan_len_allowed(dev, skb)) { err = -EMSGSIZE; goto out_unlock; } sockc.tsflags = sk->sk_tsflags; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) goto out_unlock; } skb->protocol = proto; skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); if (unlikely(extra_len == 4)) skb->no_fcs = 1; skb_probe_transport_header(skb, 0); dev_queue_xmit(skb); rcu_read_unlock(); return len; out_unlock: rcu_read_unlock(); out_free: kfree_skb(skb); return err; } static unsigned int run_filter(struct sk_buff *skb, const struct sock *sk, unsigned int res) { struct sk_filter *filter; rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); if (filter != NULL) res = bpf_prog_run_clear_cb(filter->prog, skb); rcu_read_unlock(); return res; } static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, size_t *len) { struct virtio_net_hdr vnet_hdr; if (*len < sizeof(vnet_hdr)) return -EINVAL; *len -= sizeof(vnet_hdr); if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true)) return -EINVAL; return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); } /* * This function makes lazy skb cloning in hope that most of packets * are discarded by BPF. * * Note tricky part: we DO mangle shared skb! skb->data, skb->len * and skb->cb are mangled. It works because (and until) packets * falling here are owned by current CPU. Output packets are cloned * by dev_queue_xmit_nit(), input packets are processed by net_bh * sequencially, so that if we return skb to original state on exit, * we will not harm anyone. */ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct sockaddr_ll *sll; struct packet_sock *po; u8 *skb_head = skb->data; int skb_len = skb->len; unsigned int snaplen, res; bool is_drop_n_account = false; if (skb->pkt_type == PACKET_LOOPBACK) goto drop; sk = pt->af_packet_priv; po = pkt_sk(sk); if (!net_eq(dev_net(dev), sock_net(sk))) goto drop; skb->dev = dev; if (dev->header_ops) { /* The device has an explicit notion of ll header, * exported to higher levels. * * Otherwise, the device hides details of its frame * structure, so that corresponding packet head is * never delivered to user. */ if (sk->sk_type != SOCK_DGRAM) skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ skb_pull(skb, skb_network_offset(skb)); } } snaplen = skb->len; res = run_filter(skb, sk, snaplen); if (!res) goto drop_n_restore; if (snaplen > res) snaplen = res; if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) goto drop_n_acct; if (skb_shared(skb)) { struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); if (nskb == NULL) goto drop_n_acct; if (skb_head != skb->data) { skb->data = skb_head; skb->len = skb_len; } consume_skb(skb); skb = nskb; } sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); sll = &PACKET_SKB_CB(skb)->sa.ll; sll->sll_hatype = dev->type; sll->sll_pkttype = skb->pkt_type; if (unlikely(po->origdev)) sll->sll_ifindex = orig_dev->ifindex; else sll->sll_ifindex = dev->ifindex; sll->sll_halen = dev_parse_header(skb, sll->sll_addr); /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). * Use their space for storing the original skb length. */ PACKET_SKB_CB(skb)->sa.origlen = skb->len; if (pskb_trim(skb, snaplen)) goto drop_n_acct; skb_set_owner_r(skb, sk); skb->dev = NULL; skb_dst_drop(skb); /* drop conntrack reference */ nf_reset(skb); spin_lock(&sk->sk_receive_queue.lock); po->stats.stats1.tp_packets++; sock_skb_set_dropcount(sk, skb); __skb_queue_tail(&sk->sk_receive_queue, skb); spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); return 0; drop_n_acct: is_drop_n_account = true; spin_lock(&sk->sk_receive_queue.lock); po->stats.stats1.tp_drops++; atomic_inc(&sk->sk_drops); spin_unlock(&sk->sk_receive_queue.lock); drop_n_restore: if (skb_head != skb->data && skb_shared(skb)) { skb->data = skb_head; skb->len = skb_len; } drop: if (!is_drop_n_account) consume_skb(skb); else kfree_skb(skb); return 0; } static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct packet_sock *po; struct sockaddr_ll *sll; union tpacket_uhdr h; u8 *skb_head = skb->data; int skb_len = skb->len; unsigned int snaplen, res; unsigned long status = TP_STATUS_USER; unsigned short macoff, netoff, hdrlen; struct sk_buff *copy_skb = NULL; struct timespec ts; __u32 ts_status; bool is_drop_n_account = false; bool do_vnet = false; /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. * We may add members to them until current aligned size without forcing * userspace to call getsockopt(..., PACKET_HDRLEN, ...). */ BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); if (skb->pkt_type == PACKET_LOOPBACK) goto drop; sk = pt->af_packet_priv; po = pkt_sk(sk); if (!net_eq(dev_net(dev), sock_net(sk))) goto drop; if (dev->header_ops) { if (sk->sk_type != SOCK_DGRAM) skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ skb_pull(skb, skb_network_offset(skb)); } } snaplen = skb->len; res = run_filter(skb, sk, snaplen); if (!res) goto drop_n_restore; if (skb->ip_summed == CHECKSUM_PARTIAL) status |= TP_STATUS_CSUMNOTREADY; else if (skb->pkt_type != PACKET_OUTGOING && (skb->ip_summed == CHECKSUM_COMPLETE || skb_csum_unnecessary(skb))) status |= TP_STATUS_CSUM_VALID; if (snaplen > res) snaplen = res; if (sk->sk_type == SOCK_DGRAM) { macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + po->tp_reserve; } else { unsigned int maclen = skb_network_offset(skb); netoff = TPACKET_ALIGN(po->tp_hdrlen + (maclen < 16 ? 16 : maclen)) + po->tp_reserve; if (po->has_vnet_hdr) { netoff += sizeof(struct virtio_net_hdr); do_vnet = true; } macoff = netoff - maclen; } if (po->tp_version <= TPACKET_V2) { if (macoff + snaplen > po->rx_ring.frame_size) { if (po->copy_thresh && atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { if (skb_shared(skb)) { copy_skb = skb_clone(skb, GFP_ATOMIC); } else { copy_skb = skb_get(skb); skb_head = skb->data; } if (copy_skb) skb_set_owner_r(copy_skb, sk); } snaplen = po->rx_ring.frame_size - macoff; if ((int)snaplen < 0) { snaplen = 0; do_vnet = false; } } } else if (unlikely(macoff + snaplen > GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { u32 nval; nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", snaplen, nval, macoff); snaplen = nval; if (unlikely((int)snaplen < 0)) { snaplen = 0; macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; do_vnet = false; } } spin_lock(&sk->sk_receive_queue.lock); h.raw = packet_current_rx_frame(po, skb, TP_STATUS_KERNEL, (macoff+snaplen)); if (!h.raw) goto drop_n_account; if (po->tp_version <= TPACKET_V2) { packet_increment_rx_head(po, &po->rx_ring); /* * LOSING will be reported till you read the stats, * because it's COR - Clear On Read. * Anyways, moving it for V1/V2 only as V3 doesn't need this * at packet level. */ if (po->stats.stats1.tp_drops) status |= TP_STATUS_LOSING; } po->stats.stats1.tp_packets++; if (copy_skb) { status |= TP_STATUS_COPY; __skb_queue_tail(&sk->sk_receive_queue, copy_skb); } spin_unlock(&sk->sk_receive_queue.lock); if (do_vnet) { if (virtio_net_hdr_from_skb(skb, h.raw + macoff - sizeof(struct virtio_net_hdr), vio_le(), true)) { spin_lock(&sk->sk_receive_queue.lock); goto drop_n_account; } } skb_copy_bits(skb, 0, h.raw + macoff, snaplen); if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) getnstimeofday(&ts); status |= ts_status; switch (po->tp_version) { case TPACKET_V1: h.h1->tp_len = skb->len; h.h1->tp_snaplen = snaplen; h.h1->tp_mac = macoff; h.h1->tp_net = netoff; h.h1->tp_sec = ts.tv_sec; h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; hdrlen = sizeof(*h.h1); break; case TPACKET_V2: h.h2->tp_len = skb->len; h.h2->tp_snaplen = snaplen; h.h2->tp_mac = macoff; h.h2->tp_net = netoff; h.h2->tp_sec = ts.tv_sec; h.h2->tp_nsec = ts.tv_nsec; if (skb_vlan_tag_present(skb)) { h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { h.h2->tp_vlan_tci = 0; h.h2->tp_vlan_tpid = 0; } memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); hdrlen = sizeof(*h.h2); break; case TPACKET_V3: /* tp_nxt_offset,vlan are already populated above. * So DONT clear those fields here */ h.h3->tp_status |= status; h.h3->tp_len = skb->len; h.h3->tp_snaplen = snaplen; h.h3->tp_mac = macoff; h.h3->tp_net = netoff; h.h3->tp_sec = ts.tv_sec; h.h3->tp_nsec = ts.tv_nsec; memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); hdrlen = sizeof(*h.h3); break; default: BUG(); } sll = h.raw + TPACKET_ALIGN(hdrlen); sll->sll_halen = dev_parse_header(skb, sll->sll_addr); sll->sll_family = AF_PACKET; sll->sll_hatype = dev->type; sll->sll_protocol = skb->protocol; sll->sll_pkttype = skb->pkt_type; if (unlikely(po->origdev)) sll->sll_ifindex = orig_dev->ifindex; else sll->sll_ifindex = dev->ifindex; smp_mb(); #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 if (po->tp_version <= TPACKET_V2) { u8 *start, *end; end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + macoff + snaplen); for (start = h.raw; start < end; start += PAGE_SIZE) flush_dcache_page(pgv_to_page(start)); } smp_wmb(); #endif if (po->tp_version <= TPACKET_V2) { __packet_set_status(po, h.raw, status); sk->sk_data_ready(sk); } else { prb_clear_blk_fill_status(&po->rx_ring); } drop_n_restore: if (skb_head != skb->data && skb_shared(skb)) { skb->data = skb_head; skb->len = skb_len; } drop: if (!is_drop_n_account) consume_skb(skb); else kfree_skb(skb); return 0; drop_n_account: is_drop_n_account = true; po->stats.stats1.tp_drops++; spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); kfree_skb(copy_skb); goto drop_n_restore; } static void tpacket_destruct_skb(struct sk_buff *skb) { struct packet_sock *po = pkt_sk(skb->sk); if (likely(po->tx_ring.pg_vec)) { void *ph; __u32 ts; ph = skb_shinfo(skb)->destructor_arg; packet_dec_pending(&po->tx_ring); ts = __packet_set_timestamp(po, ph, skb); __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); } sock_wfree(skb); } static void tpacket_set_protocol(const struct net_device *dev, struct sk_buff *skb) { if (dev->type == ARPHRD_ETHER) { skb_reset_mac_header(skb); skb->protocol = eth_hdr(skb)->h_proto; } } static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len) { if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 > __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len))) vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(), __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2); if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len) return -EINVAL; return 0; } static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len, struct virtio_net_hdr *vnet_hdr) { if (*len < sizeof(*vnet_hdr)) return -EINVAL; *len -= sizeof(*vnet_hdr); if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter)) return -EFAULT; return __packet_snd_vnet_parse(vnet_hdr, *len); } static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, void *frame, struct net_device *dev, void *data, int tp_len, __be16 proto, unsigned char *addr, int hlen, int copylen, const struct sockcm_cookie *sockc) { union tpacket_uhdr ph; int to_write, offset, len, nr_frags, len_max; struct socket *sock = po->sk.sk_socket; struct page *page; int err; ph.raw = frame; skb->protocol = proto; skb->dev = dev; skb->priority = po->sk.sk_priority; skb->mark = po->sk.sk_mark; sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); skb_shinfo(skb)->destructor_arg = ph.raw; skb_reserve(skb, hlen); skb_reset_network_header(skb); to_write = tp_len; if (sock->type == SOCK_DGRAM) { err = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, tp_len); if (unlikely(err < 0)) return -EINVAL; } else if (copylen) { int hdrlen = min_t(int, copylen, tp_len); skb_push(skb, dev->hard_header_len); skb_put(skb, copylen - dev->hard_header_len); err = skb_store_bits(skb, 0, data, hdrlen); if (unlikely(err)) return err; if (!dev_validate_header(dev, skb->data, hdrlen)) return -EINVAL; if (!skb->protocol) tpacket_set_protocol(dev, skb); data += hdrlen; to_write -= hdrlen; } offset = offset_in_page(data); len_max = PAGE_SIZE - offset; len = ((to_write > len_max) ? len_max : to_write); skb->data_len = to_write; skb->len += to_write; skb->truesize += to_write; refcount_add(to_write, &po->sk.sk_wmem_alloc); while (likely(to_write)) { nr_frags = skb_shinfo(skb)->nr_frags; if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { pr_err("Packet exceed the number of skb frags(%lu)\n", MAX_SKB_FRAGS); return -EFAULT; } page = pgv_to_page(data); data += len; flush_dcache_page(page); get_page(page); skb_fill_page_desc(skb, nr_frags, page, offset, len); to_write -= len; offset = 0; len_max = PAGE_SIZE; len = ((to_write > len_max) ? len_max : to_write); } skb_probe_transport_header(skb, 0); return tp_len; } static int tpacket_parse_header(struct packet_sock *po, void *frame, int size_max, void **data) { union tpacket_uhdr ph; int tp_len, off; ph.raw = frame; switch (po->tp_version) { case TPACKET_V3: if (ph.h3->tp_next_offset != 0) { pr_warn_once("variable sized slot not supported"); return -EINVAL; } tp_len = ph.h3->tp_len; break; case TPACKET_V2: tp_len = ph.h2->tp_len; break; default: tp_len = ph.h1->tp_len; break; } if (unlikely(tp_len > size_max)) { pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); return -EMSGSIZE; } if (unlikely(po->tp_tx_has_off)) { int off_min, off_max; off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); off_max = po->tx_ring.frame_size - tp_len; if (po->sk.sk_type == SOCK_DGRAM) { switch (po->tp_version) { case TPACKET_V3: off = ph.h3->tp_net; break; case TPACKET_V2: off = ph.h2->tp_net; break; default: off = ph.h1->tp_net; break; } } else { switch (po->tp_version) { case TPACKET_V3: off = ph.h3->tp_mac; break; case TPACKET_V2: off = ph.h2->tp_mac; break; default: off = ph.h1->tp_mac; break; } } if (unlikely((off < off_min) || (off_max < off))) return -EINVAL; } else { off = po->tp_hdrlen - sizeof(struct sockaddr_ll); } *data = frame + off; return tp_len; } static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) { struct sk_buff *skb; struct net_device *dev; struct virtio_net_hdr *vnet_hdr = NULL; struct sockcm_cookie sockc; __be16 proto; int err, reserve = 0; void *ph; DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); int tp_len, size_max; unsigned char *addr; void *data; int len_sum = 0; int status = TP_STATUS_AVAILABLE; int hlen, tlen, copylen = 0; mutex_lock(&po->pg_vec_lock); if (likely(saddr == NULL)) { dev = packet_cached_dev_get(po); proto = po->num; addr = NULL; } else { err = -EINVAL; if (msg->msg_namelen < sizeof(struct sockaddr_ll)) goto out; if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) goto out; proto = saddr->sll_protocol; addr = saddr->sll_addr; dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); } err = -ENXIO; if (unlikely(dev == NULL)) goto out; err = -ENETDOWN; if (unlikely(!(dev->flags & IFF_UP))) goto out_put; sockc.tsflags = po->sk.sk_tsflags; if (msg->msg_controllen) { err = sock_cmsg_send(&po->sk, msg, &sockc); if (unlikely(err)) goto out_put; } if (po->sk.sk_socket->type == SOCK_RAW) reserve = dev->hard_header_len; size_max = po->tx_ring.frame_size - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr) size_max = dev->mtu + reserve + VLAN_HLEN; do { ph = packet_current_frame(po, &po->tx_ring, TP_STATUS_SEND_REQUEST); if (unlikely(ph == NULL)) { if (need_wait && need_resched()) schedule(); continue; } skb = NULL; tp_len = tpacket_parse_header(po, ph, size_max, &data); if (tp_len < 0) goto tpacket_error; status = TP_STATUS_SEND_REQUEST; hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; if (po->has_vnet_hdr) { vnet_hdr = data; data += sizeof(*vnet_hdr); tp_len -= sizeof(*vnet_hdr); if (tp_len < 0 || __packet_snd_vnet_parse(vnet_hdr, tp_len)) { tp_len = -EINVAL; goto tpacket_error; } copylen = __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len); } copylen = max_t(int, copylen, dev->hard_header_len); skb = sock_alloc_send_skb(&po->sk, hlen + tlen + sizeof(struct sockaddr_ll) + (copylen - dev->hard_header_len), !need_wait, &err); if (unlikely(skb == NULL)) { /* we assume the socket was initially writeable ... */ if (likely(len_sum > 0)) err = len_sum; goto out_status; } tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto, addr, hlen, copylen, &sockc); if (likely(tp_len >= 0) && tp_len > dev->mtu + reserve && !po->has_vnet_hdr && !packet_extra_vlan_len_allowed(dev, skb)) tp_len = -EMSGSIZE; if (unlikely(tp_len < 0)) { tpacket_error: if (po->tp_loss) { __packet_set_status(po, ph, TP_STATUS_AVAILABLE); packet_increment_head(&po->tx_ring); kfree_skb(skb); continue; } else { status = TP_STATUS_WRONG_FORMAT; err = tp_len; goto out_status; } } if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) { tp_len = -EINVAL; goto tpacket_error; } skb->destructor = tpacket_destruct_skb; __packet_set_status(po, ph, TP_STATUS_SENDING); packet_inc_pending(&po->tx_ring); status = TP_STATUS_SEND_REQUEST; err = po->xmit(skb); if (unlikely(err > 0)) { err = net_xmit_errno(err); if (err && __packet_get_status(po, ph) == TP_STATUS_AVAILABLE) { /* skb was destructed already */ skb = NULL; goto out_status; } /* * skb was dropped but not destructed yet; * let's treat it like congestion or err < 0 */ err = 0; } packet_increment_head(&po->tx_ring); len_sum += tp_len; } while (likely((ph != NULL) || /* Note: packet_read_pending() might be slow if we have * to call it as it's per_cpu variable, but in fast-path * we already short-circuit the loop with the first * condition, and luckily don't have to go that path * anyway. */ (need_wait && packet_read_pending(&po->tx_ring)))); err = len_sum; goto out_put; out_status: __packet_set_status(po, ph, status); kfree_skb(skb); out_put: dev_put(dev); out: mutex_unlock(&po->pg_vec_lock); return err; } static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, size_t reserve, size_t len, size_t linear, int noblock, int *err) { struct sk_buff *skb; /* Under a page? Don't bother with paged skb. */ if (prepad + len < PAGE_SIZE || !linear) linear = len; skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, err, 0); if (!skb) return NULL; skb_reserve(skb, reserve); skb_put(skb, linear); skb->data_len = len - linear; skb->len += len - linear; return skb; } static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); struct sk_buff *skb; struct net_device *dev; __be16 proto; unsigned char *addr; int err, reserve = 0; struct sockcm_cookie sockc; struct virtio_net_hdr vnet_hdr = { 0 }; int offset = 0; struct packet_sock *po = pkt_sk(sk); int hlen, tlen, linear; int extra_len = 0; /* * Get and verify the address. */ if (likely(saddr == NULL)) { dev = packet_cached_dev_get(po); proto = po->num; addr = NULL; } else { err = -EINVAL; if (msg->msg_namelen < sizeof(struct sockaddr_ll)) goto out; if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) goto out; proto = saddr->sll_protocol; addr = saddr->sll_addr; dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); } err = -ENXIO; if (unlikely(dev == NULL)) goto out_unlock; err = -ENETDOWN; if (unlikely(!(dev->flags & IFF_UP))) goto out_unlock; sockc.tsflags = sk->sk_tsflags; sockc.mark = sk->sk_mark; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) goto out_unlock; } if (sock->type == SOCK_RAW) reserve = dev->hard_header_len; if (po->has_vnet_hdr) { err = packet_snd_vnet_parse(msg, &len, &vnet_hdr); if (err) goto out_unlock; } if (unlikely(sock_flag(sk, SOCK_NOFCS))) { if (!netif_supports_nofcs(dev)) { err = -EPROTONOSUPPORT; goto out_unlock; } extra_len = 4; /* We're doing our own CRC */ } err = -EMSGSIZE; if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) goto out_unlock; err = -ENOBUFS; hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len); linear = max(linear, min_t(int, len, dev->hard_header_len)); skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out_unlock; skb_set_network_header(skb, reserve); err = -EINVAL; if (sock->type == SOCK_DGRAM) { offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); if (unlikely(offset < 0)) goto out_free; } /* Returns -EFAULT on error */ err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); if (err) goto out_free; if (sock->type == SOCK_RAW && !dev_validate_header(dev, skb->data, len)) { err = -EINVAL; goto out_free; } sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && !packet_extra_vlan_len_allowed(dev, skb)) { err = -EMSGSIZE; goto out_free; } skb->protocol = proto; skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sockc.mark; if (po->has_vnet_hdr) { err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); if (err) goto out_free; len += sizeof(vnet_hdr); } skb_probe_transport_header(skb, reserve); if (unlikely(extra_len == 4)) skb->no_fcs = 1; err = po->xmit(skb); if (err > 0 && (err = net_xmit_errno(err)) != 0) goto out_unlock; dev_put(dev); return len; out_free: kfree_skb(skb); out_unlock: if (dev) dev_put(dev); out: return err; } static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); if (po->tx_ring.pg_vec) return tpacket_snd(po, msg); else return packet_snd(sock, msg, len); } /* * Close a PACKET socket. This is fairly simple. We immediately go * to 'closed' state and remove our protocol entry in the device list. */ static int packet_release(struct socket *sock) { struct sock *sk = sock->sk; struct packet_sock *po; struct packet_fanout *f; struct net *net; union tpacket_req_u req_u; if (!sk) return 0; net = sock_net(sk); po = pkt_sk(sk); mutex_lock(&net->packet.sklist_lock); sk_del_node_init_rcu(sk); mutex_unlock(&net->packet.sklist_lock); preempt_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); preempt_enable(); spin_lock(&po->bind_lock); unregister_prot_hook(sk, false); packet_cached_dev_reset(po); if (po->prot_hook.dev) { dev_put(po->prot_hook.dev); po->prot_hook.dev = NULL; } spin_unlock(&po->bind_lock); packet_flush_mclist(sk); if (po->rx_ring.pg_vec) { memset(&req_u, 0, sizeof(req_u)); packet_set_ring(sk, &req_u, 1, 0); } if (po->tx_ring.pg_vec) { memset(&req_u, 0, sizeof(req_u)); packet_set_ring(sk, &req_u, 1, 1); } f = fanout_release(sk); synchronize_net(); if (f) { fanout_release_data(f); kfree(f); } /* * Now the socket is dead. No more input will appear. */ sock_orphan(sk); sock->sk = NULL; /* Purge queues */ skb_queue_purge(&sk->sk_receive_queue); packet_free_pending(po); sk_refcnt_debug_release(sk); sock_put(sk); return 0; } /* * Attach a packet hook. */ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, __be16 proto) { struct packet_sock *po = pkt_sk(sk); struct net_device *dev_curr; __be16 proto_curr; bool need_rehook; struct net_device *dev = NULL; int ret = 0; bool unlisted = false; if (po->fanout) return -EINVAL; lock_sock(sk); spin_lock(&po->bind_lock); rcu_read_lock(); if (name) { dev = dev_get_by_name_rcu(sock_net(sk), name); if (!dev) { ret = -ENODEV; goto out_unlock; } } else if (ifindex) { dev = dev_get_by_index_rcu(sock_net(sk), ifindex); if (!dev) { ret = -ENODEV; goto out_unlock; } } if (dev) dev_hold(dev); proto_curr = po->prot_hook.type; dev_curr = po->prot_hook.dev; need_rehook = proto_curr != proto || dev_curr != dev; if (need_rehook) { if (po->running) { rcu_read_unlock(); __unregister_prot_hook(sk, true); rcu_read_lock(); dev_curr = po->prot_hook.dev; if (dev) unlisted = !dev_get_by_index_rcu(sock_net(sk), dev->ifindex); } po->num = proto; po->prot_hook.type = proto; if (unlikely(unlisted)) { dev_put(dev); po->prot_hook.dev = NULL; po->ifindex = -1; packet_cached_dev_reset(po); } else { po->prot_hook.dev = dev; po->ifindex = dev ? dev->ifindex : 0; packet_cached_dev_assign(po, dev); } } if (dev_curr) dev_put(dev_curr); if (proto == 0 || !need_rehook) goto out_unlock; if (!unlisted && (!dev || (dev->flags & IFF_UP))) { register_prot_hook(sk); } else { sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } out_unlock: rcu_read_unlock(); spin_unlock(&po->bind_lock); release_sock(sk); return ret; } /* * Bind a packet socket to a device */ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; char name[sizeof(uaddr->sa_data) + 1]; /* * Check legality */ if (addr_len != sizeof(struct sockaddr)) return -EINVAL; /* uaddr->sa_data comes from the userspace, it's not guaranteed to be * zero-terminated. */ memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); name[sizeof(uaddr->sa_data)] = 0; return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); } static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; struct sock *sk = sock->sk; /* * Check legality */ if (addr_len < sizeof(struct sockaddr_ll)) return -EINVAL; if (sll->sll_family != AF_PACKET) return -EINVAL; return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol ? : pkt_sk(sk)->num); } static struct proto packet_proto = { .name = "PACKET", .owner = THIS_MODULE, .obj_size = sizeof(struct packet_sock), }; /* * Create a packet of type SOCK_PACKET. */ static int packet_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct packet_sock *po; __be16 proto = (__force __be16)protocol; /* weird, but documented */ int err; if (!ns_capable(net->user_ns, CAP_NET_RAW)) return -EPERM; if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && sock->type != SOCK_PACKET) return -ESOCKTNOSUPPORT; sock->state = SS_UNCONNECTED; err = -ENOBUFS; sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern); if (sk == NULL) goto out; sock->ops = &packet_ops; if (sock->type == SOCK_PACKET) sock->ops = &packet_ops_spkt; sock_init_data(sock, sk); po = pkt_sk(sk); sk->sk_family = PF_PACKET; po->num = proto; po->xmit = dev_queue_xmit; err = packet_alloc_pending(po); if (err) goto out2; packet_cached_dev_reset(po); sk->sk_destruct = packet_sock_destruct; sk_refcnt_debug_inc(sk); /* * Attach a protocol block */ spin_lock_init(&po->bind_lock); mutex_init(&po->pg_vec_lock); po->rollover = NULL; po->prot_hook.func = packet_rcv; if (sock->type == SOCK_PACKET) po->prot_hook.func = packet_rcv_spkt; po->prot_hook.af_packet_priv = sk; if (proto) { po->prot_hook.type = proto; register_prot_hook(sk); } mutex_lock(&net->packet.sklist_lock); sk_add_node_rcu(sk, &net->packet.sklist); mutex_unlock(&net->packet.sklist_lock); preempt_disable(); sock_prot_inuse_add(net, &packet_proto, 1); preempt_enable(); return 0; out2: sk_free(sk); out: return err; } /* * Pull a packet from our receive queue and hand it to the user. * If necessary we block. */ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; int vnet_hdr_len = 0; unsigned int origlen = 0; err = -EINVAL; if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) goto out; #if 0 /* What error should we return now? EUNATTACH? */ if (pkt_sk(sk)->ifindex < 0) return -ENODEV; #endif if (flags & MSG_ERRQUEUE) { err = sock_recv_errqueue(sk, msg, len, SOL_PACKET, PACKET_TX_TIMESTAMP); goto out; } /* * Call the generic datagram receiver. This handles all sorts * of horrible races and re-entrancy so we can forget about it * in the protocol layers. * * Now it will return ENETDOWN, if device have just gone down, * but then it will block. */ skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); /* * An error occurred so return it. Because skb_recv_datagram() * handles the blocking we don't see and worry about blocking * retries. */ if (skb == NULL) goto out; if (pkt_sk(sk)->pressure) packet_rcv_has_room(pkt_sk(sk), NULL); if (pkt_sk(sk)->has_vnet_hdr) { err = packet_rcv_vnet(msg, skb, &len); if (err) goto out_free; vnet_hdr_len = sizeof(struct virtio_net_hdr); } /* You lose any data beyond the buffer you gave. If it worries * a user program they can ask the device for its MTU * anyway. */ copied = skb->len; if (copied > len) { copied = len; msg->msg_flags |= MSG_TRUNC; } err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free; if (sock->type != SOCK_PACKET) { struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; /* Original length was stored in sockaddr_ll fields */ origlen = PACKET_SKB_CB(skb)->sa.origlen; sll->sll_family = AF_PACKET; sll->sll_protocol = skb->protocol; } sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) { /* If the address length field is there to be filled * in, we fill it in now. */ if (sock->type == SOCK_PACKET) { __sockaddr_check_size(sizeof(struct sockaddr_pkt)); msg->msg_namelen = sizeof(struct sockaddr_pkt); } else { struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr); } memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, msg->msg_namelen); } if (pkt_sk(sk)->auxdata) { struct tpacket_auxdata aux; aux.tp_status = TP_STATUS_USER; if (skb->ip_summed == CHECKSUM_PARTIAL) aux.tp_status |= TP_STATUS_CSUMNOTREADY; else if (skb->pkt_type != PACKET_OUTGOING && (skb->ip_summed == CHECKSUM_COMPLETE || skb_csum_unnecessary(skb))) aux.tp_status |= TP_STATUS_CSUM_VALID; aux.tp_len = origlen; aux.tp_snaplen = skb->len; aux.tp_mac = 0; aux.tp_net = skb_network_offset(skb); if (skb_vlan_tag_present(skb)) { aux.tp_vlan_tci = skb_vlan_tag_get(skb); aux.tp_vlan_tpid = ntohs(skb->vlan_proto); aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { aux.tp_vlan_tci = 0; aux.tp_vlan_tpid = 0; } put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); } /* * Free or return the buffer as appropriate. Again this * hides all the races and re-entrancy issues from us. */ err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); out_free: skb_free_datagram(sk, skb); out: return err; } static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct net_device *dev; struct sock *sk = sock->sk; if (peer) return -EOPNOTSUPP; uaddr->sa_family = AF_PACKET; memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); if (dev) strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); rcu_read_unlock(); *uaddr_len = sizeof(*uaddr); return 0; } static int packet_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct net_device *dev; struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); if (peer) return -EOPNOTSUPP; sll->sll_family = AF_PACKET; sll->sll_ifindex = po->ifindex; sll->sll_protocol = po->num; sll->sll_pkttype = 0; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); if (dev) { sll->sll_hatype = dev->type; sll->sll_halen = dev->addr_len; memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); } else { sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ sll->sll_halen = 0; } rcu_read_unlock(); *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; return 0; } static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int what) { switch (i->type) { case PACKET_MR_MULTICAST: if (i->alen != dev->addr_len) return -EINVAL; if (what > 0) return dev_mc_add(dev, i->addr); else return dev_mc_del(dev, i->addr); break; case PACKET_MR_PROMISC: return dev_set_promiscuity(dev, what); case PACKET_MR_ALLMULTI: return dev_set_allmulti(dev, what); case PACKET_MR_UNICAST: if (i->alen != dev->addr_len) return -EINVAL; if (what > 0) return dev_uc_add(dev, i->addr); else return dev_uc_del(dev, i->addr); break; default: break; } return 0; } static void packet_dev_mclist_delete(struct net_device *dev, struct packet_mclist **mlp) { struct packet_mclist *ml; while ((ml = *mlp) != NULL) { if (ml->ifindex == dev->ifindex) { packet_dev_mc(dev, ml, -1); *mlp = ml->next; kfree(ml); } else mlp = &ml->next; } } static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) { struct packet_sock *po = pkt_sk(sk); struct packet_mclist *ml, *i; struct net_device *dev; int err; rtnl_lock(); err = -ENODEV; dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); if (!dev) goto done; err = -EINVAL; if (mreq->mr_alen > dev->addr_len) goto done; err = -ENOBUFS; i = kmalloc(sizeof(*i), GFP_KERNEL); if (i == NULL) goto done; err = 0; for (ml = po->mclist; ml; ml = ml->next) { if (ml->ifindex == mreq->mr_ifindex && ml->type == mreq->mr_type && ml->alen == mreq->mr_alen && memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { ml->count++; /* Free the new element ... */ kfree(i); goto done; } } i->type = mreq->mr_type; i->ifindex = mreq->mr_ifindex; i->alen = mreq->mr_alen; memcpy(i->addr, mreq->mr_address, i->alen); memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); i->count = 1; i->next = po->mclist; po->mclist = i; err = packet_dev_mc(dev, i, 1); if (err) { po->mclist = i->next; kfree(i); } done: rtnl_unlock(); return err; } static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) { struct packet_mclist *ml, **mlp; rtnl_lock(); for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { if (ml->ifindex == mreq->mr_ifindex && ml->type == mreq->mr_type && ml->alen == mreq->mr_alen && memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { if (--ml->count == 0) { struct net_device *dev; *mlp = ml->next; dev = __dev_get_by_index(sock_net(sk), ml->ifindex); if (dev) packet_dev_mc(dev, ml, -1); kfree(ml); } break; } } rtnl_unlock(); return 0; } static void packet_flush_mclist(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); struct packet_mclist *ml; if (!po->mclist) return; rtnl_lock(); while ((ml = po->mclist) != NULL) { struct net_device *dev; po->mclist = ml->next; dev = __dev_get_by_index(sock_net(sk), ml->ifindex); if (dev != NULL) packet_dev_mc(dev, ml, -1); kfree(ml); } rtnl_unlock(); } static int packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); int ret; if (level != SOL_PACKET) return -ENOPROTOOPT; switch (optname) { case PACKET_ADD_MEMBERSHIP: case PACKET_DROP_MEMBERSHIP: { struct packet_mreq_max mreq; int len = optlen; memset(&mreq, 0, sizeof(mreq)); if (len < sizeof(struct packet_mreq)) return -EINVAL; if (len > sizeof(mreq)) len = sizeof(mreq); if (copy_from_user(&mreq, optval, len)) return -EFAULT; if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) return -EINVAL; if (optname == PACKET_ADD_MEMBERSHIP) ret = packet_mc_add(sk, &mreq); else ret = packet_mc_drop(sk, &mreq); return ret; } case PACKET_RX_RING: case PACKET_TX_RING: { union tpacket_req_u req_u; int len; switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: len = sizeof(req_u.req); break; case TPACKET_V3: default: len = sizeof(req_u.req3); break; } if (optlen < len) return -EINVAL; if (copy_from_user(&req_u.req, optval, len)) return -EFAULT; return packet_set_ring(sk, &req_u, 0, optname == PACKET_TX_RING); } case PACKET_COPY_THRESH: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; pkt_sk(sk)->copy_thresh = val; return 0; } case PACKET_VERSION: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; switch (val) { case TPACKET_V1: case TPACKET_V2: case TPACKET_V3: break; default: return -EINVAL; } lock_sock(sk); if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { ret = -EBUSY; } else { po->tp_version = val; ret = 0; } release_sock(sk); return ret; } case PACKET_RESERVE: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; if (val > INT_MAX) return -EINVAL; lock_sock(sk); if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { ret = -EBUSY; } else { po->tp_reserve = val; ret = 0; } release_sock(sk); return ret; } case PACKET_LOSS: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_loss = !!val; return 0; } case PACKET_AUXDATA: { int val; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->auxdata = !!val; return 0; } case PACKET_ORIGDEV: { int val; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->origdev = !!val; return 0; } case PACKET_VNET_HDR: { int val; if (sock->type != SOCK_RAW) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->has_vnet_hdr = !!val; return 0; } case PACKET_TIMESTAMP: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_tstamp = val; return 0; } case PACKET_FANOUT: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; return fanout_add(sk, val & 0xffff, val >> 16); } case PACKET_FANOUT_DATA: { if (!po->fanout) return -EINVAL; return fanout_set_data(po, optval, optlen); } case PACKET_TX_HAS_OFF: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_tx_has_off = !!val; return 0; } case PACKET_QDISC_BYPASS: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->xmit = val ? packet_direct_xmit : dev_queue_xmit; return 0; } default: return -ENOPROTOOPT; } } static int packet_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { int len; int val, lv = sizeof(val); struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); void *data = &val; union tpacket_stats_u st; struct tpacket_rollover_stats rstats; if (level != SOL_PACKET) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case PACKET_STATISTICS: spin_lock_bh(&sk->sk_receive_queue.lock); memcpy(&st, &po->stats, sizeof(st)); memset(&po->stats, 0, sizeof(po->stats)); spin_unlock_bh(&sk->sk_receive_queue.lock); if (po->tp_version == TPACKET_V3) { lv = sizeof(struct tpacket_stats_v3); st.stats3.tp_packets += st.stats3.tp_drops; data = &st.stats3; } else { lv = sizeof(struct tpacket_stats); st.stats1.tp_packets += st.stats1.tp_drops; data = &st.stats1; } break; case PACKET_AUXDATA: val = po->auxdata; break; case PACKET_ORIGDEV: val = po->origdev; break; case PACKET_VNET_HDR: val = po->has_vnet_hdr; break; case PACKET_VERSION: val = po->tp_version; break; case PACKET_HDRLEN: if (len > sizeof(int)) len = sizeof(int); if (len < sizeof(int)) return -EINVAL; if (copy_from_user(&val, optval, len)) return -EFAULT; switch (val) { case TPACKET_V1: val = sizeof(struct tpacket_hdr); break; case TPACKET_V2: val = sizeof(struct tpacket2_hdr); break; case TPACKET_V3: val = sizeof(struct tpacket3_hdr); break; default: return -EINVAL; } break; case PACKET_RESERVE: val = po->tp_reserve; break; case PACKET_LOSS: val = po->tp_loss; break; case PACKET_TIMESTAMP: val = po->tp_tstamp; break; case PACKET_FANOUT: val = (po->fanout ? ((u32)po->fanout->id | ((u32)po->fanout->type << 16) | ((u32)po->fanout->flags << 24)) : 0); break; case PACKET_ROLLOVER_STATS: if (!po->rollover) return -EINVAL; rstats.tp_all = atomic_long_read(&po->rollover->num); rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); data = &rstats; lv = sizeof(rstats); break; case PACKET_TX_HAS_OFF: val = po->tp_tx_has_off; break; case PACKET_QDISC_BYPASS: val = packet_use_direct_xmit(po); break; default: return -ENOPROTOOPT; } if (len > lv) len = lv; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, data, len)) return -EFAULT; return 0; } #ifdef CONFIG_COMPAT static int compat_packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct packet_sock *po = pkt_sk(sock->sk); if (level != SOL_PACKET) return -ENOPROTOOPT; if (optname == PACKET_FANOUT_DATA && po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) { optval = (char __user *)get_compat_bpf_fprog(optval); if (!optval) return -EFAULT; optlen = sizeof(struct sock_fprog); } return packet_setsockopt(sock, level, optname, optval, optlen); } #endif static int packet_notifier(struct notifier_block *this, unsigned long msg, void *ptr) { struct sock *sk; struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); rcu_read_lock(); sk_for_each_rcu(sk, &net->packet.sklist) { struct packet_sock *po = pkt_sk(sk); switch (msg) { case NETDEV_UNREGISTER: if (po->mclist) packet_dev_mclist_delete(dev, &po->mclist); /* fallthrough */ case NETDEV_DOWN: if (dev->ifindex == po->ifindex) { spin_lock(&po->bind_lock); if (po->running) { __unregister_prot_hook(sk, false); sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } if (msg == NETDEV_UNREGISTER) { packet_cached_dev_reset(po); po->ifindex = -1; if (po->prot_hook.dev) dev_put(po->prot_hook.dev); po->prot_hook.dev = NULL; } spin_unlock(&po->bind_lock); } break; case NETDEV_UP: if (dev->ifindex == po->ifindex) { spin_lock(&po->bind_lock); if (po->num) register_prot_hook(sk); spin_unlock(&po->bind_lock); } break; } } rcu_read_unlock(); return NOTIFY_DONE; } static int packet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; switch (cmd) { case SIOCOUTQ: { int amount = sk_wmem_alloc_get(sk); return put_user(amount, (int __user *)arg); } case SIOCINQ: { struct sk_buff *skb; int amount = 0; spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) amount = skb->len; spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *)arg); case SIOCGSTAMPNS: return sock_get_timestampns(sk, (struct timespec __user *)arg); #ifdef CONFIG_INET case SIOCADDRT: case SIOCDELRT: case SIOCDARP: case SIOCGARP: case SIOCSARP: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCSIFFLAGS: return inet_dgram_ops.ioctl(sock, cmd, arg); #endif default: return -ENOIOCTLCMD; } return 0; } static unsigned int packet_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); unsigned int mask = datagram_poll(file, sock, wait); spin_lock_bh(&sk->sk_receive_queue.lock); if (po->rx_ring.pg_vec) { if (!packet_previous_rx_frame(po, &po->rx_ring, TP_STATUS_KERNEL)) mask |= POLLIN | POLLRDNORM; } if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) po->pressure = 0; spin_unlock_bh(&sk->sk_receive_queue.lock); spin_lock_bh(&sk->sk_write_queue.lock); if (po->tx_ring.pg_vec) { if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) mask |= POLLOUT | POLLWRNORM; } spin_unlock_bh(&sk->sk_write_queue.lock); return mask; } /* Dirty? Well, I still did not learn better way to account * for user mmaps. */ static void packet_mm_open(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct socket *sock = file->private_data; struct sock *sk = sock->sk; if (sk) atomic_inc(&pkt_sk(sk)->mapped); } static void packet_mm_close(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct socket *sock = file->private_data; struct sock *sk = sock->sk; if (sk) atomic_dec(&pkt_sk(sk)->mapped); } static const struct vm_operations_struct packet_mmap_ops = { .open = packet_mm_open, .close = packet_mm_close, }; static void free_pg_vec(struct pgv *pg_vec, unsigned int order, unsigned int len) { int i; for (i = 0; i < len; i++) { if (likely(pg_vec[i].buffer)) { if (is_vmalloc_addr(pg_vec[i].buffer)) vfree(pg_vec[i].buffer); else free_pages((unsigned long)pg_vec[i].buffer, order); pg_vec[i].buffer = NULL; } } kfree(pg_vec); } static char *alloc_one_pg_vec_page(unsigned long order) { char *buffer; gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; buffer = (char *) __get_free_pages(gfp_flags, order); if (buffer) return buffer; /* __get_free_pages failed, fall back to vmalloc */ buffer = vzalloc((1 << order) * PAGE_SIZE); if (buffer) return buffer; /* vmalloc failed, lets dig into swap here */ gfp_flags &= ~__GFP_NORETRY; buffer = (char *) __get_free_pages(gfp_flags, order); if (buffer) return buffer; /* complete and utter failure */ return NULL; } static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) { unsigned int block_nr = req->tp_block_nr; struct pgv *pg_vec; int i; pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); if (unlikely(!pg_vec)) goto out; for (i = 0; i < block_nr; i++) { pg_vec[i].buffer = alloc_one_pg_vec_page(order); if (unlikely(!pg_vec[i].buffer)) goto out_free_pgvec; } out: return pg_vec; out_free_pgvec: free_pg_vec(pg_vec, order, block_nr); pg_vec = NULL; goto out; } static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, int closing, int tx_ring) { struct pgv *pg_vec = NULL; struct packet_sock *po = pkt_sk(sk); int was_running, order = 0; struct packet_ring_buffer *rb; struct sk_buff_head *rb_queue; __be16 num; int err = -EINVAL; /* Added to avoid minimal code churn */ struct tpacket_req *req = &req_u->req; lock_sock(sk); rb = tx_ring ? &po->tx_ring : &po->rx_ring; rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; err = -EBUSY; if (!closing) { if (atomic_read(&po->mapped)) goto out; if (packet_read_pending(rb)) goto out; } if (req->tp_block_nr) { /* Sanity tests and some calculations */ err = -EBUSY; if (unlikely(rb->pg_vec)) goto out; switch (po->tp_version) { case TPACKET_V1: po->tp_hdrlen = TPACKET_HDRLEN; break; case TPACKET_V2: po->tp_hdrlen = TPACKET2_HDRLEN; break; case TPACKET_V3: po->tp_hdrlen = TPACKET3_HDRLEN; break; } err = -EINVAL; if (unlikely((int)req->tp_block_size <= 0)) goto out; if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) goto out; if (po->tp_version >= TPACKET_V3 && req->tp_block_size <= BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv)) goto out; if (unlikely(req->tp_frame_size < po->tp_hdrlen + po->tp_reserve)) goto out; if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) goto out; rb->frames_per_block = req->tp_block_size / req->tp_frame_size; if (unlikely(rb->frames_per_block == 0)) goto out; if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr)) goto out; if (unlikely((rb->frames_per_block * req->tp_block_nr) != req->tp_frame_nr)) goto out; err = -ENOMEM; order = get_order(req->tp_block_size); pg_vec = alloc_pg_vec(req, order); if (unlikely(!pg_vec)) goto out; switch (po->tp_version) { case TPACKET_V3: /* Block transmit is not supported yet */ if (!tx_ring) { init_prb_bdqc(po, rb, pg_vec, req_u); } else { struct tpacket_req3 *req3 = &req_u->req3; if (req3->tp_retire_blk_tov || req3->tp_sizeof_priv || req3->tp_feature_req_word) { err = -EINVAL; goto out; } } break; default: break; } } /* Done */ else { err = -EINVAL; if (unlikely(req->tp_frame_nr)) goto out; } /* Detach socket from network */ spin_lock(&po->bind_lock); was_running = po->running; num = po->num; if (was_running) { po->num = 0; __unregister_prot_hook(sk, false); } spin_unlock(&po->bind_lock); synchronize_net(); err = -EBUSY; mutex_lock(&po->pg_vec_lock); if (closing || atomic_read(&po->mapped) == 0) { err = 0; spin_lock_bh(&rb_queue->lock); swap(rb->pg_vec, pg_vec); rb->frame_max = (req->tp_frame_nr - 1); rb->head = 0; rb->frame_size = req->tp_frame_size; spin_unlock_bh(&rb_queue->lock); swap(rb->pg_vec_order, order); swap(rb->pg_vec_len, req->tp_block_nr); rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; po->prot_hook.func = (po->rx_ring.pg_vec) ? tpacket_rcv : packet_rcv; skb_queue_purge(rb_queue); if (atomic_read(&po->mapped)) pr_err("packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); } mutex_unlock(&po->pg_vec_lock); spin_lock(&po->bind_lock); if (was_running) { po->num = num; register_prot_hook(sk); } spin_unlock(&po->bind_lock); if (pg_vec && (po->tp_version > TPACKET_V2)) { /* Because we don't support block-based V3 on tx-ring */ if (!tx_ring) prb_shutdown_retire_blk_timer(po, rb_queue); } if (pg_vec) free_pg_vec(pg_vec, order, req->tp_block_nr); out: release_sock(sk); return err; } static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); unsigned long size, expected_size; struct packet_ring_buffer *rb; unsigned long start; int err = -EINVAL; int i; if (vma->vm_pgoff) return -EINVAL; mutex_lock(&po->pg_vec_lock); expected_size = 0; for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { if (rb->pg_vec) { expected_size += rb->pg_vec_len * rb->pg_vec_pages * PAGE_SIZE; } } if (expected_size == 0) goto out; size = vma->vm_end - vma->vm_start; if (size != expected_size) goto out; start = vma->vm_start; for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { if (rb->pg_vec == NULL) continue; for (i = 0; i < rb->pg_vec_len; i++) { struct page *page; void *kaddr = rb->pg_vec[i].buffer; int pg_num; for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { page = pgv_to_page(kaddr); err = vm_insert_page(vma, start, page); if (unlikely(err)) goto out; start += PAGE_SIZE; kaddr += PAGE_SIZE; } } } atomic_inc(&po->mapped); vma->vm_ops = &packet_mmap_ops; err = 0; out: mutex_unlock(&po->pg_vec_lock); return err; } static const struct proto_ops packet_ops_spkt = { .family = PF_PACKET, .owner = THIS_MODULE, .release = packet_release, .bind = packet_bind_spkt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = packet_getname_spkt, .poll = datagram_poll, .ioctl = packet_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = packet_sendmsg_spkt, .recvmsg = packet_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static const struct proto_ops packet_ops = { .family = PF_PACKET, .owner = THIS_MODULE, .release = packet_release, .bind = packet_bind, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = packet_getname, .poll = packet_poll, .ioctl = packet_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = packet_setsockopt, .getsockopt = packet_getsockopt, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_packet_setsockopt, #endif .sendmsg = packet_sendmsg, .recvmsg = packet_recvmsg, .mmap = packet_mmap, .sendpage = sock_no_sendpage, }; static const struct net_proto_family packet_family_ops = { .family = PF_PACKET, .create = packet_create, .owner = THIS_MODULE, }; static struct notifier_block packet_netdev_notifier = { .notifier_call = packet_notifier, }; #ifdef CONFIG_PROC_FS static void *packet_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { struct net *net = seq_file_net(seq); rcu_read_lock(); return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); } static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net *net = seq_file_net(seq); return seq_hlist_next_rcu(v, &net->packet.sklist, pos); } static void packet_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static int packet_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); else { struct sock *s = sk_entry(v); const struct packet_sock *po = pkt_sk(s); seq_printf(seq, "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", s, refcount_read(&s->sk_refcnt), s->sk_type, ntohs(po->num), po->ifindex, po->running, atomic_read(&s->sk_rmem_alloc), from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), sock_i_ino(s)); } return 0; } static const struct seq_operations packet_seq_ops = { .start = packet_seq_start, .next = packet_seq_next, .stop = packet_seq_stop, .show = packet_seq_show, }; static int packet_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &packet_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations packet_seq_fops = { .owner = THIS_MODULE, .open = packet_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif static int __net_init packet_net_init(struct net *net) { mutex_init(&net->packet.sklist_lock); INIT_HLIST_HEAD(&net->packet.sklist); if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops)) return -ENOMEM; return 0; } static void __net_exit packet_net_exit(struct net *net) { remove_proc_entry("packet", net->proc_net); } static struct pernet_operations packet_net_ops = { .init = packet_net_init, .exit = packet_net_exit, }; static void __exit packet_exit(void) { unregister_netdevice_notifier(&packet_netdev_notifier); unregister_pernet_subsys(&packet_net_ops); sock_unregister(PF_PACKET); proto_unregister(&packet_proto); } static int __init packet_init(void) { int rc = proto_register(&packet_proto, 0); if (rc != 0) goto out; sock_register(&packet_family_ops); register_pernet_subsys(&packet_net_ops); register_netdevice_notifier(&packet_netdev_notifier); out: return rc; } module_init(packet_init); module_exit(packet_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_PACKET);
./CrossVul/dataset_final_sorted/CWE-362/c/bad_2872_0
crossvul-cpp_data_bad_1578_2
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2003 Intel Corp. * Copyright (c) 2001-2002 Nokia, Inc. * Copyright (c) 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel implementation * * These functions interface with the sockets layer to implement the * SCTP Extensions for the Sockets API. * * Note that the descriptions from the specification are USER level * functions--this file is the functions which populate the struct proto * for SCTP which is the BOTTOM of the sockets interface. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Narasimha Budihal <narsi@refcode.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * Daisy Chang <daisyc@us.ibm.com> * Sridhar Samudrala <samudrala@us.ibm.com> * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> * Ardelle Fan <ardelle.fan@intel.com> * Ryan Layer <rmlayer@us.ibm.com> * Anup Pemmaiah <pemmaiah@cc.usu.edu> * Kevin Gao <kevin.gao@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/kernel.h> #include <linux/wait.h> #include <linux/time.h> #include <linux/ip.h> #include <linux/capability.h> #include <linux/fcntl.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/crypto.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/compat.h> #include <net/ip.h> #include <net/icmp.h> #include <net/route.h> #include <net/ipv6.h> #include <net/inet_common.h> #include <net/busy_poll.h> #include <linux/socket.h> /* for sa_family_t */ #include <linux/export.h> #include <net/sock.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> /* Forward declarations for internal helper functions. */ static int sctp_writeable(struct sock *sk); static void sctp_wfree(struct sk_buff *skb); static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, size_t msg_len); static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); static int sctp_wait_for_accept(struct sock *sk, long timeo); static void sctp_wait_for_close(struct sock *sk, long timeo); static void sctp_destruct_sock(struct sock *sk); static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, union sctp_addr *addr, int len); static int sctp_bindx_add(struct sock *, struct sockaddr *, int); static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); static int sctp_send_asconf(struct sctp_association *asoc, struct sctp_chunk *chunk); static int sctp_do_bind(struct sock *, union sctp_addr *, int); static int sctp_autobind(struct sock *sk); static void sctp_sock_migrate(struct sock *, struct sock *, struct sctp_association *, sctp_socket_type_t); static int sctp_memory_pressure; static atomic_long_t sctp_memory_allocated; struct percpu_counter sctp_sockets_allocated; static void sctp_enter_memory_pressure(struct sock *sk) { sctp_memory_pressure = 1; } /* Get the sndbuf space available at the time on the association. */ static inline int sctp_wspace(struct sctp_association *asoc) { int amt; if (asoc->ep->sndbuf_policy) amt = asoc->sndbuf_used; else amt = sk_wmem_alloc_get(asoc->base.sk); if (amt >= asoc->base.sk->sk_sndbuf) { if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) amt = 0; else { amt = sk_stream_wspace(asoc->base.sk); if (amt < 0) amt = 0; } } else { amt = asoc->base.sk->sk_sndbuf - amt; } return amt; } /* Increment the used sndbuf space count of the corresponding association by * the size of the outgoing data chunk. * Also, set the skb destructor for sndbuf accounting later. * * Since it is always 1-1 between chunk and skb, and also a new skb is always * allocated for chunk bundling in sctp_packet_transmit(), we can use the * destructor in the data chunk skb for the purpose of the sndbuf space * tracking. */ static inline void sctp_set_owner_w(struct sctp_chunk *chunk) { struct sctp_association *asoc = chunk->asoc; struct sock *sk = asoc->base.sk; /* The sndbuf space is tracked per association. */ sctp_association_hold(asoc); skb_set_owner_w(chunk->skb, sk); chunk->skb->destructor = sctp_wfree; /* Save the chunk pointer in skb for sctp_wfree to use later. */ skb_shinfo(chunk->skb)->destructor_arg = chunk; asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + sizeof(struct sk_buff) + sizeof(struct sctp_chunk); atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); sk->sk_wmem_queued += chunk->skb->truesize; sk_mem_charge(sk, chunk->skb->truesize); } /* Verify that this is a valid address. */ static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, int len) { struct sctp_af *af; /* Verify basic sockaddr. */ af = sctp_sockaddr_af(sctp_sk(sk), addr, len); if (!af) return -EINVAL; /* Is this a valid SCTP address? */ if (!af->addr_valid(addr, sctp_sk(sk), NULL)) return -EINVAL; if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) return -EINVAL; return 0; } /* Look up the association by its id. If this is not a UDP-style * socket, the ID field is always ignored. */ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) { struct sctp_association *asoc = NULL; /* If this is not a UDP-style socket, assoc id should be ignored. */ if (!sctp_style(sk, UDP)) { /* Return NULL if the socket state is not ESTABLISHED. It * could be a TCP-style listening socket or a socket which * hasn't yet called connect() to establish an association. */ if (!sctp_sstate(sk, ESTABLISHED)) return NULL; /* Get the first and the only association from the list. */ if (!list_empty(&sctp_sk(sk)->ep->asocs)) asoc = list_entry(sctp_sk(sk)->ep->asocs.next, struct sctp_association, asocs); return asoc; } /* Otherwise this is a UDP-style socket. */ if (!id || (id == (sctp_assoc_t)-1)) return NULL; spin_lock_bh(&sctp_assocs_id_lock); asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); spin_unlock_bh(&sctp_assocs_id_lock); if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) return NULL; return asoc; } /* Look up the transport from an address and an assoc id. If both address and * id are specified, the associations matching the address and the id should be * the same. */ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, struct sockaddr_storage *addr, sctp_assoc_t id) { struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; struct sctp_transport *transport; union sctp_addr *laddr = (union sctp_addr *)addr; addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, laddr, &transport); if (!addr_asoc) return NULL; id_asoc = sctp_id2assoc(sk, id); if (id_asoc && (id_asoc != addr_asoc)) return NULL; sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), (union sctp_addr *)addr); return transport; } /* API 3.1.2 bind() - UDP Style Syntax * The syntax of bind() is, * * ret = bind(int sd, struct sockaddr *addr, int addrlen); * * sd - the socket descriptor returned by socket(). * addr - the address structure (struct sockaddr_in or struct * sockaddr_in6 [RFC 2553]), * addr_len - the size of the address structure. */ static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) { int retval = 0; lock_sock(sk); pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, addr, addr_len); /* Disallow binding twice. */ if (!sctp_sk(sk)->ep->base.bind_addr.port) retval = sctp_do_bind(sk, (union sctp_addr *)addr, addr_len); else retval = -EINVAL; release_sock(sk); return retval; } static long sctp_get_port_local(struct sock *, union sctp_addr *); /* Verify this is a valid sockaddr. */ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, union sctp_addr *addr, int len) { struct sctp_af *af; /* Check minimum size. */ if (len < sizeof (struct sockaddr)) return NULL; /* V4 mapped address are really of AF_INET family */ if (addr->sa.sa_family == AF_INET6 && ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { if (!opt->pf->af_supported(AF_INET, opt)) return NULL; } else { /* Does this PF support this AF? */ if (!opt->pf->af_supported(addr->sa.sa_family, opt)) return NULL; } /* If we get this far, af is valid. */ af = sctp_get_af_specific(addr->sa.sa_family); if (len < af->sockaddr_len) return NULL; return af; } /* Bind a local address either to an endpoint or to an association. */ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) { struct net *net = sock_net(sk); struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; struct sctp_bind_addr *bp = &ep->base.bind_addr; struct sctp_af *af; unsigned short snum; int ret = 0; /* Common sockaddr verification. */ af = sctp_sockaddr_af(sp, addr, len); if (!af) { pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", __func__, sk, addr, len); return -EINVAL; } snum = ntohs(addr->v4.sin_port); pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", __func__, sk, &addr->sa, bp->port, snum, len); /* PF specific bind() address verification. */ if (!sp->pf->bind_verify(sp, addr)) return -EADDRNOTAVAIL; /* We must either be unbound, or bind to the same port. * It's OK to allow 0 ports if we are already bound. * We'll just inhert an already bound port in this case */ if (bp->port) { if (!snum) snum = bp->port; else if (snum != bp->port) { pr_debug("%s: new port %d doesn't match existing port " "%d\n", __func__, snum, bp->port); return -EINVAL; } } if (snum && snum < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) return -EACCES; /* See if the address matches any of the addresses we may have * already bound before checking against other endpoints. */ if (sctp_bind_addr_match(bp, addr, sp)) return -EINVAL; /* Make sure we are allowed to bind here. * The function sctp_get_port_local() does duplicate address * detection. */ addr->v4.sin_port = htons(snum); if ((ret = sctp_get_port_local(sk, addr))) { return -EADDRINUSE; } /* Refresh ephemeral port. */ if (!bp->port) bp->port = inet_sk(sk)->inet_num; /* Add the address to the bind address list. * Use GFP_ATOMIC since BHs will be disabled. */ ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC); /* Copy back into socket for getsockname() use. */ if (!ret) { inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); sp->pf->to_sk_saddr(addr, sk); } return ret; } /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks * * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged * at any one time. If a sender, after sending an ASCONF chunk, decides * it needs to transfer another ASCONF Chunk, it MUST wait until the * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a * subsequent ASCONF. Note this restriction binds each side, so at any * time two ASCONF may be in-transit on any given association (one sent * from each endpoint). */ static int sctp_send_asconf(struct sctp_association *asoc, struct sctp_chunk *chunk) { struct net *net = sock_net(asoc->base.sk); int retval = 0; /* If there is an outstanding ASCONF chunk, queue it for later * transmission. */ if (asoc->addip_last_asconf) { list_add_tail(&chunk->list, &asoc->addip_chunk_list); goto out; } /* Hold the chunk until an ASCONF_ACK is received. */ sctp_chunk_hold(chunk); retval = sctp_primitive_ASCONF(net, asoc, chunk); if (retval) sctp_chunk_free(chunk); else asoc->addip_last_asconf = chunk; out: return retval; } /* Add a list of addresses as bind addresses to local endpoint or * association. * * Basically run through each address specified in the addrs/addrcnt * array/length pair, determine if it is IPv6 or IPv4 and call * sctp_do_bind() on it. * * If any of them fails, then the operation will be reversed and the * ones that were added will be removed. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) { int cnt; int retval = 0; void *addr_buf; struct sockaddr *sa_addr; struct sctp_af *af; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); addr_buf = addrs; for (cnt = 0; cnt < addrcnt; cnt++) { /* The list may contain either IPv4 or IPv6 address; * determine the address length for walking thru the list. */ sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa_family); if (!af) { retval = -EINVAL; goto err_bindx_add; } retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, af->sockaddr_len); addr_buf += af->sockaddr_len; err_bindx_add: if (retval < 0) { /* Failed. Cleanup the ones that have been added */ if (cnt > 0) sctp_bindx_rem(sk, addrs, cnt); return retval; } } return retval; } /* Send an ASCONF chunk with Add IP address parameters to all the peers of the * associations that are part of the endpoint indicating that a list of local * addresses are added to the endpoint. * * If any of the addresses is already in the bind address list of the * association, we do not send the chunk for that association. But it will not * affect other associations. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_send_asconf_add_ip(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc; struct sctp_bind_addr *bp; struct sctp_chunk *chunk; struct sctp_sockaddr_entry *laddr; union sctp_addr *addr; union sctp_addr saveaddr; void *addr_buf; struct sctp_af *af; struct list_head *p; int i; int retval = 0; if (!net->sctp.addip_enable) return retval; sp = sctp_sk(sk); ep = sp->ep; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); list_for_each_entry(asoc, &ep->asocs, asocs) { if (!asoc->peer.asconf_capable) continue; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) continue; if (!sctp_state(asoc, ESTABLISHED)) continue; /* Check if any address in the packed array of addresses is * in the bind address list of the association. If so, * do not send the asconf chunk to its peer, but continue with * other associations. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); if (!af) { retval = -EINVAL; goto out; } if (sctp_assoc_lookup_laddr(asoc, addr)) break; addr_buf += af->sockaddr_len; } if (i < addrcnt) continue; /* Use the first valid address in bind addr list of * association as Address Parameter of ASCONF CHUNK. */ bp = &asoc->base.bind_addr; p = bp->address_list.next; laddr = list_entry(p, struct sctp_sockaddr_entry, list); chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, addrcnt, SCTP_PARAM_ADD_IP); if (!chunk) { retval = -ENOMEM; goto out; } /* Add the new addresses to the bind address list with * use_as_src set to 0. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); memcpy(&saveaddr, addr, af->sockaddr_len); retval = sctp_add_bind_addr(bp, &saveaddr, SCTP_ADDR_NEW, GFP_ATOMIC); addr_buf += af->sockaddr_len; } if (asoc->src_out_of_asoc_ok) { struct sctp_transport *trans; list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { /* Clear the source and route cache */ dst_release(trans->dst); trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); trans->ssthresh = asoc->peer.i.a_rwnd; trans->rto = asoc->rto_initial; sctp_max_rto(asoc, trans); trans->rtt = trans->srtt = trans->rttvar = 0; sctp_transport_route(trans, NULL, sctp_sk(asoc->base.sk)); } } retval = sctp_send_asconf(asoc, chunk); } out: return retval; } /* Remove a list of addresses from bind addresses list. Do not remove the * last address. * * Basically run through each address specified in the addrs/addrcnt * array/length pair, determine if it is IPv6 or IPv4 and call * sctp_del_bind() on it. * * If any of them fails, then the operation will be reversed and the * ones that were removed will be added back. * * At least one address has to be left; if only one address is * available, the operation will return -EBUSY. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; int cnt; struct sctp_bind_addr *bp = &ep->base.bind_addr; int retval = 0; void *addr_buf; union sctp_addr *sa_addr; struct sctp_af *af; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); addr_buf = addrs; for (cnt = 0; cnt < addrcnt; cnt++) { /* If the bind address list is empty or if there is only one * bind address, there is nothing more to be removed (we need * at least one address here). */ if (list_empty(&bp->address_list) || (sctp_list_single_entry(&bp->address_list))) { retval = -EBUSY; goto err_bindx_rem; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa.sa_family); if (!af) { retval = -EINVAL; goto err_bindx_rem; } if (!af->addr_valid(sa_addr, sp, NULL)) { retval = -EADDRNOTAVAIL; goto err_bindx_rem; } if (sa_addr->v4.sin_port && sa_addr->v4.sin_port != htons(bp->port)) { retval = -EINVAL; goto err_bindx_rem; } if (!sa_addr->v4.sin_port) sa_addr->v4.sin_port = htons(bp->port); /* FIXME - There is probably a need to check if sk->sk_saddr and * sk->sk_rcv_addr are currently set to one of the addresses to * be removed. This is something which needs to be looked into * when we are fixing the outstanding issues with multi-homing * socket routing and failover schemes. Refer to comments in * sctp_do_bind(). -daisy */ retval = sctp_del_bind_addr(bp, sa_addr); addr_buf += af->sockaddr_len; err_bindx_rem: if (retval < 0) { /* Failed. Add the ones that has been removed back */ if (cnt > 0) sctp_bindx_add(sk, addrs, cnt); return retval; } } return retval; } /* Send an ASCONF chunk with Delete IP address parameters to all the peers of * the associations that are part of the endpoint indicating that a list of * local addresses are removed from the endpoint. * * If any of the addresses is already in the bind address list of the * association, we do not send the chunk for that association. But it will not * affect other associations. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_send_asconf_del_ip(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc; struct sctp_transport *transport; struct sctp_bind_addr *bp; struct sctp_chunk *chunk; union sctp_addr *laddr; void *addr_buf; struct sctp_af *af; struct sctp_sockaddr_entry *saddr; int i; int retval = 0; int stored = 0; chunk = NULL; if (!net->sctp.addip_enable) return retval; sp = sctp_sk(sk); ep = sp->ep; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); list_for_each_entry(asoc, &ep->asocs, asocs) { if (!asoc->peer.asconf_capable) continue; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) continue; if (!sctp_state(asoc, ESTABLISHED)) continue; /* Check if any address in the packed array of addresses is * not present in the bind address list of the association. * If so, do not send the asconf chunk to its peer, but * continue with other associations. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { laddr = addr_buf; af = sctp_get_af_specific(laddr->v4.sin_family); if (!af) { retval = -EINVAL; goto out; } if (!sctp_assoc_lookup_laddr(asoc, laddr)) break; addr_buf += af->sockaddr_len; } if (i < addrcnt) continue; /* Find one address in the association's bind address list * that is not in the packed array of addresses. This is to * make sure that we do not delete all the addresses in the * association. */ bp = &asoc->base.bind_addr; laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, addrcnt, sp); if ((laddr == NULL) && (addrcnt == 1)) { if (asoc->asconf_addr_del_pending) continue; asoc->asconf_addr_del_pending = kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); if (asoc->asconf_addr_del_pending == NULL) { retval = -ENOMEM; goto out; } asoc->asconf_addr_del_pending->sa.sa_family = addrs->sa_family; asoc->asconf_addr_del_pending->v4.sin_port = htons(bp->port); if (addrs->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addrs; asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; } else if (addrs->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addrs; asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; } pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", __func__, asoc, &asoc->asconf_addr_del_pending->sa, asoc->asconf_addr_del_pending); asoc->src_out_of_asoc_ok = 1; stored = 1; goto skip_mkasconf; } if (laddr == NULL) return -EINVAL; /* We do not need RCU protection throughout this loop * because this is done under a socket lock from the * setsockopt call. */ chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, SCTP_PARAM_DEL_IP); if (!chunk) { retval = -ENOMEM; goto out; } skip_mkasconf: /* Reset use_as_src flag for the addresses in the bind address * list that are to be deleted. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { laddr = addr_buf; af = sctp_get_af_specific(laddr->v4.sin_family); list_for_each_entry(saddr, &bp->address_list, list) { if (sctp_cmp_addr_exact(&saddr->a, laddr)) saddr->state = SCTP_ADDR_DEL; } addr_buf += af->sockaddr_len; } /* Update the route and saddr entries for all the transports * as some of the addresses in the bind address list are * about to be deleted and cannot be used as source addresses. */ list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { dst_release(transport->dst); sctp_transport_route(transport, NULL, sctp_sk(asoc->base.sk)); } if (stored) /* We don't need to transmit ASCONF */ continue; retval = sctp_send_asconf(asoc, chunk); } out: return retval; } /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) { struct sock *sk = sctp_opt2sk(sp); union sctp_addr *addr; struct sctp_af *af; /* It is safe to write port space in caller. */ addr = &addrw->a; addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); af = sctp_get_af_specific(addr->sa.sa_family); if (!af) return -EINVAL; if (sctp_verify_addr(sk, addr, af->sockaddr_len)) return -EINVAL; if (addrw->state == SCTP_ADDR_NEW) return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); else return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); } /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() * * API 8.1 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, * int flags); * * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. * If the sd is an IPv6 socket, the addresses passed can either be IPv4 * or IPv6 addresses. * * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see * Section 3.1.2 for this usage. * * addrs is a pointer to an array of one or more socket addresses. Each * address is contained in its appropriate structure (i.e. struct * sockaddr_in or struct sockaddr_in6) the family of the address type * must be used to distinguish the address length (note that this * representation is termed a "packed array" of addresses). The caller * specifies the number of addresses in the array with addrcnt. * * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns * -1, and sets errno to the appropriate error code. * * For SCTP, the port given in each socket address must be the same, or * sctp_bindx() will fail, setting errno to EINVAL. * * The flags parameter is formed from the bitwise OR of zero or more of * the following currently defined flags: * * SCTP_BINDX_ADD_ADDR * * SCTP_BINDX_REM_ADDR * * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given * addresses from the association. The two flags are mutually exclusive; * if both are given, sctp_bindx() will fail with EINVAL. A caller may * not remove all addresses from an association; sctp_bindx() will * reject such an attempt with EINVAL. * * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate * additional addresses with an endpoint after calling bind(). Or use * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening * socket is associated with so that no new association accepted will be * associated with those addresses. If the endpoint supports dynamic * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a * endpoint to send the appropriate message to the peer to change the * peers address lists. * * Adding and removing addresses from a connected association is * optional functionality. Implementations that do not support this * functionality should return EOPNOTSUPP. * * Basically do nothing but copying the addresses from user to kernel * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() * from userspace. * * We don't use copy_from_user() for optimization: we first do the * sanity checks (buffer size -fast- and access check-healthy * pointer); if all of those succeed, then we can alloc the memory * (expensive operation) needed to copy the data to kernel. Then we do * the copying without checking the user space area * (__copy_from_user()). * * On exit there is no need to do sockfd_put(), sys_setsockopt() does * it. * * sk The sk of the socket * addrs The pointer to the addresses in user land * addrssize Size of the addrs buffer * op Operation to perform (add or remove, see the flags of * sctp_bindx) * * Returns 0 if ok, <0 errno code on error. */ static int sctp_setsockopt_bindx(struct sock *sk, struct sockaddr __user *addrs, int addrs_size, int op) { struct sockaddr *kaddrs; int err; int addrcnt = 0; int walk_size = 0; struct sockaddr *sa_addr; void *addr_buf; struct sctp_af *af; pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", __func__, sk, addrs, addrs_size, op); if (unlikely(addrs_size <= 0)) return -EINVAL; /* Check the user passed a healthy pointer. */ if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) return -EFAULT; /* Alloc space for the address array in kernel memory. */ kaddrs = kmalloc(addrs_size, GFP_KERNEL); if (unlikely(!kaddrs)) return -ENOMEM; if (__copy_from_user(kaddrs, addrs, addrs_size)) { kfree(kaddrs); return -EFAULT; } /* Walk through the addrs buffer and count the number of addresses. */ addr_buf = kaddrs; while (walk_size < addrs_size) { if (walk_size + sizeof(sa_family_t) > addrs_size) { kfree(kaddrs); return -EINVAL; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa_family); /* If the address family is not supported or if this address * causes the address buffer to overflow return EINVAL. */ if (!af || (walk_size + af->sockaddr_len) > addrs_size) { kfree(kaddrs); return -EINVAL; } addrcnt++; addr_buf += af->sockaddr_len; walk_size += af->sockaddr_len; } /* Do the work. */ switch (op) { case SCTP_BINDX_ADD_ADDR: err = sctp_bindx_add(sk, kaddrs, addrcnt); if (err) goto out; err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); break; case SCTP_BINDX_REM_ADDR: err = sctp_bindx_rem(sk, kaddrs, addrcnt); if (err) goto out; err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); break; default: err = -EINVAL; break; } out: kfree(kaddrs); return err; } /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) * * Common routine for handling connect() and sctp_connectx(). * Connect will come in with just a single address. */ static int __sctp_connect(struct sock *sk, struct sockaddr *kaddrs, int addrs_size, sctp_assoc_t *assoc_id) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc = NULL; struct sctp_association *asoc2; struct sctp_transport *transport; union sctp_addr to; sctp_scope_t scope; long timeo; int err = 0; int addrcnt = 0; int walk_size = 0; union sctp_addr *sa_addr = NULL; void *addr_buf; unsigned short port; unsigned int f_flags = 0; sp = sctp_sk(sk); ep = sp->ep; /* connect() cannot be done on a socket that is already in ESTABLISHED * state - UDP-style peeled off socket or a TCP-style socket that * is already connected. * It cannot be done even on a TCP-style listening socket. */ if (sctp_sstate(sk, ESTABLISHED) || (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { err = -EISCONN; goto out_free; } /* Walk through the addrs buffer and count the number of addresses. */ addr_buf = kaddrs; while (walk_size < addrs_size) { struct sctp_af *af; if (walk_size + sizeof(sa_family_t) > addrs_size) { err = -EINVAL; goto out_free; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa.sa_family); /* If the address family is not supported or if this address * causes the address buffer to overflow return EINVAL. */ if (!af || (walk_size + af->sockaddr_len) > addrs_size) { err = -EINVAL; goto out_free; } port = ntohs(sa_addr->v4.sin_port); /* Save current address so we can work with it */ memcpy(&to, sa_addr, af->sockaddr_len); err = sctp_verify_addr(sk, &to, af->sockaddr_len); if (err) goto out_free; /* Make sure the destination port is correctly set * in all addresses. */ if (asoc && asoc->peer.port && asoc->peer.port != port) { err = -EINVAL; goto out_free; } /* Check if there already is a matching association on the * endpoint (other than the one created here). */ asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); if (asoc2 && asoc2 != asoc) { if (asoc2->state >= SCTP_STATE_ESTABLISHED) err = -EISCONN; else err = -EALREADY; goto out_free; } /* If we could not find a matching association on the endpoint, * make sure that there is no peeled-off association matching * the peer address even on another socket. */ if (sctp_endpoint_is_peeled_off(ep, &to)) { err = -EADDRNOTAVAIL; goto out_free; } if (!asoc) { /* If a bind() or sctp_bindx() is not called prior to * an sctp_connectx() call, the system picks an * ephemeral port and will choose an address set * equivalent to binding with a wildcard address. */ if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) { err = -EAGAIN; goto out_free; } } else { /* * If an unprivileged user inherits a 1-many * style socket with open associations on a * privileged port, it MAY be permitted to * accept new associations, but it SHOULD NOT * be permitted to open new associations. */ if (ep->base.bind_addr.port < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { err = -EACCES; goto out_free; } } scope = sctp_scope(&to); asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); if (!asoc) { err = -ENOMEM; goto out_free; } err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); if (err < 0) { goto out_free; } } /* Prime the peer's transport structures. */ transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); if (!transport) { err = -ENOMEM; goto out_free; } addrcnt++; addr_buf += af->sockaddr_len; walk_size += af->sockaddr_len; } /* In case the user of sctp_connectx() wants an association * id back, assign one now. */ if (assoc_id) { err = sctp_assoc_set_id(asoc, GFP_KERNEL); if (err < 0) goto out_free; } err = sctp_primitive_ASSOCIATE(net, asoc, NULL); if (err < 0) { goto out_free; } /* Initialize sk's dport and daddr for getpeername() */ inet_sk(sk)->inet_dport = htons(asoc->peer.port); sp->pf->to_sk_daddr(sa_addr, sk); sk->sk_err = 0; /* in-kernel sockets don't generally have a file allocated to them * if all they do is call sock_create_kern(). */ if (sk->sk_socket->file) f_flags = sk->sk_socket->file->f_flags; timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); err = sctp_wait_for_connect(asoc, &timeo); if ((err == 0 || err == -EINPROGRESS) && assoc_id) *assoc_id = asoc->assoc_id; /* Don't free association on exit. */ asoc = NULL; out_free: pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", __func__, asoc, kaddrs, err); if (asoc) { /* sctp_primitive_ASSOCIATE may have added this association * To the hash table, try to unhash it, just in case, its a noop * if it wasn't hashed so we're safe */ sctp_unhash_established(asoc); sctp_association_free(asoc); } return err; } /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() * * API 8.9 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, * sctp_assoc_t *asoc); * * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. * If the sd is an IPv6 socket, the addresses passed can either be IPv4 * or IPv6 addresses. * * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see * Section 3.1.2 for this usage. * * addrs is a pointer to an array of one or more socket addresses. Each * address is contained in its appropriate structure (i.e. struct * sockaddr_in or struct sockaddr_in6) the family of the address type * must be used to distengish the address length (note that this * representation is termed a "packed array" of addresses). The caller * specifies the number of addresses in the array with addrcnt. * * On success, sctp_connectx() returns 0. It also sets the assoc_id to * the association id of the new association. On failure, sctp_connectx() * returns -1, and sets errno to the appropriate error code. The assoc_id * is not touched by the kernel. * * For SCTP, the port given in each socket address must be the same, or * sctp_connectx() will fail, setting errno to EINVAL. * * An application can use sctp_connectx to initiate an association with * an endpoint that is multi-homed. Much like sctp_bindx() this call * allows a caller to specify multiple addresses at which a peer can be * reached. The way the SCTP stack uses the list of addresses to set up * the association is implementation dependent. This function only * specifies that the stack will try to make use of all the addresses in * the list when needed. * * Note that the list of addresses passed in is only used for setting up * the association. It does not necessarily equal the set of addresses * the peer uses for the resulting association. If the caller wants to * find out the set of peer addresses, it must use sctp_getpaddrs() to * retrieve them after the association has been set up. * * Basically do nothing but copying the addresses from user to kernel * land and invoking either sctp_connectx(). This is used for tunneling * the sctp_connectx() request through sctp_setsockopt() from userspace. * * We don't use copy_from_user() for optimization: we first do the * sanity checks (buffer size -fast- and access check-healthy * pointer); if all of those succeed, then we can alloc the memory * (expensive operation) needed to copy the data to kernel. Then we do * the copying without checking the user space area * (__copy_from_user()). * * On exit there is no need to do sockfd_put(), sys_setsockopt() does * it. * * sk The sk of the socket * addrs The pointer to the addresses in user land * addrssize Size of the addrs buffer * * Returns >=0 if ok, <0 errno code on error. */ static int __sctp_setsockopt_connectx(struct sock *sk, struct sockaddr __user *addrs, int addrs_size, sctp_assoc_t *assoc_id) { int err = 0; struct sockaddr *kaddrs; pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", __func__, sk, addrs, addrs_size); if (unlikely(addrs_size <= 0)) return -EINVAL; /* Check the user passed a healthy pointer. */ if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) return -EFAULT; /* Alloc space for the address array in kernel memory. */ kaddrs = kmalloc(addrs_size, GFP_KERNEL); if (unlikely(!kaddrs)) return -ENOMEM; if (__copy_from_user(kaddrs, addrs, addrs_size)) { err = -EFAULT; } else { err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); } kfree(kaddrs); return err; } /* * This is an older interface. It's kept for backward compatibility * to the option that doesn't provide association id. */ static int sctp_setsockopt_connectx_old(struct sock *sk, struct sockaddr __user *addrs, int addrs_size) { return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); } /* * New interface for the API. The since the API is done with a socket * option, to make it simple we feed back the association id is as a return * indication to the call. Error is always negative and association id is * always positive. */ static int sctp_setsockopt_connectx(struct sock *sk, struct sockaddr __user *addrs, int addrs_size) { sctp_assoc_t assoc_id = 0; int err = 0; err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); if (err) return err; else return assoc_id; } /* * New (hopefully final) interface for the API. * We use the sctp_getaddrs_old structure so that use-space library * can avoid any unnecessary allocations. The only different part * is that we store the actual length of the address buffer into the * addrs_num structure member. That way we can re-use the existing * code. */ #ifdef CONFIG_COMPAT struct compat_sctp_getaddrs_old { sctp_assoc_t assoc_id; s32 addr_num; compat_uptr_t addrs; /* struct sockaddr * */ }; #endif static int sctp_getsockopt_connectx3(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_getaddrs_old param; sctp_assoc_t assoc_id = 0; int err = 0; #ifdef CONFIG_COMPAT if (is_compat_task()) { struct compat_sctp_getaddrs_old param32; if (len < sizeof(param32)) return -EINVAL; if (copy_from_user(&param32, optval, sizeof(param32))) return -EFAULT; param.assoc_id = param32.assoc_id; param.addr_num = param32.addr_num; param.addrs = compat_ptr(param32.addrs); } else #endif { if (len < sizeof(param)) return -EINVAL; if (copy_from_user(&param, optval, sizeof(param))) return -EFAULT; } err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) param.addrs, param.addr_num, &assoc_id); if (err == 0 || err == -EINPROGRESS) { if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) return -EFAULT; if (put_user(sizeof(assoc_id), optlen)) return -EFAULT; } return err; } /* API 3.1.4 close() - UDP Style Syntax * Applications use close() to perform graceful shutdown (as described in * Section 10.1 of [SCTP]) on ALL the associations currently represented * by a UDP-style socket. * * The syntax is * * ret = close(int sd); * * sd - the socket descriptor of the associations to be closed. * * To gracefully shutdown a specific association represented by the * UDP-style socket, an application should use the sendmsg() call, * passing no user data, but including the appropriate flag in the * ancillary data (see Section xxxx). * * If sd in the close() call is a branched-off socket representing only * one association, the shutdown is performed on that association only. * * 4.1.6 close() - TCP Style Syntax * * Applications use close() to gracefully close down an association. * * The syntax is: * * int close(int sd); * * sd - the socket descriptor of the association to be closed. * * After an application calls close() on a socket descriptor, no further * socket operations will succeed on that descriptor. * * API 7.1.4 SO_LINGER * * An application using the TCP-style socket can use this option to * perform the SCTP ABORT primitive. The linger option structure is: * * struct linger { * int l_onoff; // option on/off * int l_linger; // linger time * }; * * To enable the option, set l_onoff to 1. If the l_linger value is set * to 0, calling close() is the same as the ABORT primitive. If the * value is set to a negative value, the setsockopt() call will return * an error. If the value is set to a positive value linger_time, the * close() can be blocked for at most linger_time ms. If the graceful * shutdown phase does not finish during this period, close() will * return but the graceful shutdown phase continues in the system. */ static void sctp_close(struct sock *sk, long timeout) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; struct sctp_association *asoc; struct list_head *pos, *temp; unsigned int data_was_unread; pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_state = SCTP_SS_CLOSING; ep = sctp_sk(sk)->ep; /* Clean up any skbs sitting on the receive queue. */ data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); /* Walk all associations on an endpoint. */ list_for_each_safe(pos, temp, &ep->asocs) { asoc = list_entry(pos, struct sctp_association, asocs); if (sctp_style(sk, TCP)) { /* A closed association can still be in the list if * it belongs to a TCP-style listening socket that is * not yet accepted. If so, free it. If not, send an * ABORT or SHUTDOWN based on the linger options. */ if (sctp_state(asoc, CLOSED)) { sctp_unhash_established(asoc); sctp_association_free(asoc); continue; } } if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || !skb_queue_empty(&asoc->ulpq.reasm) || (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { struct sctp_chunk *chunk; chunk = sctp_make_abort_user(asoc, NULL, 0); if (chunk) sctp_primitive_ABORT(net, asoc, chunk); } else sctp_primitive_SHUTDOWN(net, asoc, NULL); } /* On a TCP-style socket, block for at most linger_time if set. */ if (sctp_style(sk, TCP) && timeout) sctp_wait_for_close(sk, timeout); /* This will run the backlog queue. */ release_sock(sk); /* Supposedly, no process has access to the socket, but * the net layers still may. */ local_bh_disable(); bh_lock_sock(sk); /* Hold the sock, since sk_common_release() will put sock_put() * and we have just a little more cleanup. */ sock_hold(sk); sk_common_release(sk); bh_unlock_sock(sk); local_bh_enable(); sock_put(sk); SCTP_DBG_OBJCNT_DEC(sock); } /* Handle EPIPE error. */ static int sctp_error(struct sock *sk, int flags, int err) { if (err == -EPIPE) err = sock_error(sk) ? : -EPIPE; if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); return err; } /* API 3.1.3 sendmsg() - UDP Style Syntax * * An application uses sendmsg() and recvmsg() calls to transmit data to * and receive data from its peer. * * ssize_t sendmsg(int socket, const struct msghdr *message, * int flags); * * socket - the socket descriptor of the endpoint. * message - pointer to the msghdr structure which contains a single * user message and possibly some ancillary data. * * See Section 5 for complete description of the data * structures. * * flags - flags sent or received with the user message, see Section * 5 for complete description of the flags. * * Note: This function could use a rewrite especially when explicit * connect support comes in. */ /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *new_asoc = NULL, *asoc = NULL; struct sctp_transport *transport, *chunk_tp; struct sctp_chunk *chunk; union sctp_addr to; struct sockaddr *msg_name = NULL; struct sctp_sndrcvinfo default_sinfo; struct sctp_sndrcvinfo *sinfo; struct sctp_initmsg *sinit; sctp_assoc_t associd = 0; sctp_cmsgs_t cmsgs = { NULL }; sctp_scope_t scope; bool fill_sinfo_ttl = false, wait_connect = false; struct sctp_datamsg *datamsg; int msg_flags = msg->msg_flags; __u16 sinfo_flags = 0; long timeo; int err; err = 0; sp = sctp_sk(sk); ep = sp->ep; pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, msg, msg_len, ep); /* We cannot send a message over a TCP-style listening socket. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { err = -EPIPE; goto out_nounlock; } /* Parse out the SCTP CMSGs. */ err = sctp_msghdr_parse(msg, &cmsgs); if (err) { pr_debug("%s: msghdr parse err:%x\n", __func__, err); goto out_nounlock; } /* Fetch the destination address for this packet. This * address only selects the association--it is not necessarily * the address we will send to. * For a peeled-off socket, msg_name is ignored. */ if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { int msg_namelen = msg->msg_namelen; err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, msg_namelen); if (err) return err; if (msg_namelen > sizeof(to)) msg_namelen = sizeof(to); memcpy(&to, msg->msg_name, msg_namelen); msg_name = msg->msg_name; } sinit = cmsgs.init; if (cmsgs.sinfo != NULL) { memset(&default_sinfo, 0, sizeof(default_sinfo)); default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid; default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags; default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid; default_sinfo.sinfo_context = cmsgs.sinfo->snd_context; default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id; sinfo = &default_sinfo; fill_sinfo_ttl = true; } else { sinfo = cmsgs.srinfo; } /* Did the user specify SNDINFO/SNDRCVINFO? */ if (sinfo) { sinfo_flags = sinfo->sinfo_flags; associd = sinfo->sinfo_assoc_id; } pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, msg_len, sinfo_flags); /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { err = -EINVAL; goto out_nounlock; } /* If SCTP_EOF is set, no data can be sent. Disallow sending zero * length messages when SCTP_EOF|SCTP_ABORT is not set. * If SCTP_ABORT is set, the message length could be non zero with * the msg_iov set to the user abort reason. */ if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { err = -EINVAL; goto out_nounlock; } /* If SCTP_ADDR_OVER is set, there must be an address * specified in msg_name. */ if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { err = -EINVAL; goto out_nounlock; } transport = NULL; pr_debug("%s: about to look up association\n", __func__); lock_sock(sk); /* If a msg_name has been specified, assume this is to be used. */ if (msg_name) { /* Look for a matching association on the endpoint. */ asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); if (!asoc) { /* If we could not find a matching association on the * endpoint, make sure that it is not a TCP-style * socket that already has an association or there is * no peeled-off association on another socket. */ if ((sctp_style(sk, TCP) && sctp_sstate(sk, ESTABLISHED)) || sctp_endpoint_is_peeled_off(ep, &to)) { err = -EADDRNOTAVAIL; goto out_unlock; } } } else { asoc = sctp_id2assoc(sk, associd); if (!asoc) { err = -EPIPE; goto out_unlock; } } if (asoc) { pr_debug("%s: just looked up association:%p\n", __func__, asoc); /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED * socket that has an association in CLOSED state. This can * happen when an accepted socket has an association that is * already CLOSED. */ if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { err = -EPIPE; goto out_unlock; } if (sinfo_flags & SCTP_EOF) { pr_debug("%s: shutting down association:%p\n", __func__, asoc); sctp_primitive_SHUTDOWN(net, asoc, NULL); err = 0; goto out_unlock; } if (sinfo_flags & SCTP_ABORT) { chunk = sctp_make_abort_user(asoc, msg, msg_len); if (!chunk) { err = -ENOMEM; goto out_unlock; } pr_debug("%s: aborting association:%p\n", __func__, asoc); sctp_primitive_ABORT(net, asoc, chunk); err = 0; goto out_unlock; } } /* Do we need to create the association? */ if (!asoc) { pr_debug("%s: there is no association yet\n", __func__); if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { err = -EINVAL; goto out_unlock; } /* Check for invalid stream against the stream counts, * either the default or the user specified stream counts. */ if (sinfo) { if (!sinit || !sinit->sinit_num_ostreams) { /* Check against the defaults. */ if (sinfo->sinfo_stream >= sp->initmsg.sinit_num_ostreams) { err = -EINVAL; goto out_unlock; } } else { /* Check against the requested. */ if (sinfo->sinfo_stream >= sinit->sinit_num_ostreams) { err = -EINVAL; goto out_unlock; } } } /* * API 3.1.2 bind() - UDP Style Syntax * If a bind() or sctp_bindx() is not called prior to a * sendmsg() call that initiates a new association, the * system picks an ephemeral port and will choose an address * set equivalent to binding with a wildcard address. */ if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) { err = -EAGAIN; goto out_unlock; } } else { /* * If an unprivileged user inherits a one-to-many * style socket with open associations on a privileged * port, it MAY be permitted to accept new associations, * but it SHOULD NOT be permitted to open new * associations. */ if (ep->base.bind_addr.port < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { err = -EACCES; goto out_unlock; } } scope = sctp_scope(&to); new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); if (!new_asoc) { err = -ENOMEM; goto out_unlock; } asoc = new_asoc; err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); if (err < 0) { err = -ENOMEM; goto out_free; } /* If the SCTP_INIT ancillary data is specified, set all * the association init values accordingly. */ if (sinit) { if (sinit->sinit_num_ostreams) { asoc->c.sinit_num_ostreams = sinit->sinit_num_ostreams; } if (sinit->sinit_max_instreams) { asoc->c.sinit_max_instreams = sinit->sinit_max_instreams; } if (sinit->sinit_max_attempts) { asoc->max_init_attempts = sinit->sinit_max_attempts; } if (sinit->sinit_max_init_timeo) { asoc->max_init_timeo = msecs_to_jiffies(sinit->sinit_max_init_timeo); } } /* Prime the peer's transport structures. */ transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); if (!transport) { err = -ENOMEM; goto out_free; } } /* ASSERT: we have a valid association at this point. */ pr_debug("%s: we have a valid association\n", __func__); if (!sinfo) { /* If the user didn't specify SNDINFO/SNDRCVINFO, make up * one with some defaults. */ memset(&default_sinfo, 0, sizeof(default_sinfo)); default_sinfo.sinfo_stream = asoc->default_stream; default_sinfo.sinfo_flags = asoc->default_flags; default_sinfo.sinfo_ppid = asoc->default_ppid; default_sinfo.sinfo_context = asoc->default_context; default_sinfo.sinfo_timetolive = asoc->default_timetolive; default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); sinfo = &default_sinfo; } else if (fill_sinfo_ttl) { /* In case SNDINFO was specified, we still need to fill * it with a default ttl from the assoc here. */ sinfo->sinfo_timetolive = asoc->default_timetolive; } /* API 7.1.7, the sndbuf size per association bounds the * maximum size of data that can be sent in a single send call. */ if (msg_len > sk->sk_sndbuf) { err = -EMSGSIZE; goto out_free; } if (asoc->pmtu_pending) sctp_assoc_pending_pmtu(sk, asoc); /* If fragmentation is disabled and the message length exceeds the * association fragmentation point, return EMSGSIZE. The I-D * does not specify what this error is, but this looks like * a great fit. */ if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { err = -EMSGSIZE; goto out_free; } /* Check for invalid stream. */ if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { err = -EINVAL; goto out_free; } timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); if (!sctp_wspace(asoc)) { err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); if (err) goto out_free; } /* If an address is passed with the sendto/sendmsg call, it is used * to override the primary destination address in the TCP model, or * when SCTP_ADDR_OVER flag is set in the UDP model. */ if ((sctp_style(sk, TCP) && msg_name) || (sinfo_flags & SCTP_ADDR_OVER)) { chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); if (!chunk_tp) { err = -EINVAL; goto out_free; } } else chunk_tp = NULL; /* Auto-connect, if we aren't connected already. */ if (sctp_state(asoc, CLOSED)) { err = sctp_primitive_ASSOCIATE(net, asoc, NULL); if (err < 0) goto out_free; wait_connect = true; pr_debug("%s: we associated primitively\n", __func__); } /* Break the message into multiple chunks of maximum size. */ datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter); if (IS_ERR(datamsg)) { err = PTR_ERR(datamsg); goto out_free; } /* Now send the (possibly) fragmented message. */ list_for_each_entry(chunk, &datamsg->chunks, frag_list) { sctp_chunk_hold(chunk); /* Do accounting for the write space. */ sctp_set_owner_w(chunk); chunk->transport = chunk_tp; } /* Send it to the lower layers. Note: all chunks * must either fail or succeed. The lower layer * works that way today. Keep it that way or this * breaks. */ err = sctp_primitive_SEND(net, asoc, datamsg); /* Did the lower layer accept the chunk? */ if (err) { sctp_datamsg_free(datamsg); goto out_free; } pr_debug("%s: we sent primitively\n", __func__); sctp_datamsg_put(datamsg); err = msg_len; if (unlikely(wait_connect)) { timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT); sctp_wait_for_connect(asoc, &timeo); } /* If we are already past ASSOCIATE, the lower * layers are responsible for association cleanup. */ goto out_unlock; out_free: if (new_asoc) { sctp_unhash_established(asoc); sctp_association_free(asoc); } out_unlock: release_sock(sk); out_nounlock: return sctp_error(sk, msg_flags, err); #if 0 do_sock_err: if (msg_len) err = msg_len; else err = sock_error(sk); goto out; do_interrupted: if (msg_len) err = msg_len; goto out; #endif /* 0 */ } /* This is an extended version of skb_pull() that removes the data from the * start of a skb even when data is spread across the list of skb's in the * frag_list. len specifies the total amount of data that needs to be removed. * when 'len' bytes could be removed from the skb, it returns 0. * If 'len' exceeds the total skb length, it returns the no. of bytes that * could not be removed. */ static int sctp_skb_pull(struct sk_buff *skb, int len) { struct sk_buff *list; int skb_len = skb_headlen(skb); int rlen; if (len <= skb_len) { __skb_pull(skb, len); return 0; } len -= skb_len; __skb_pull(skb, skb_len); skb_walk_frags(skb, list) { rlen = sctp_skb_pull(list, len); skb->len -= (len-rlen); skb->data_len -= (len-rlen); if (!rlen) return 0; len = rlen; } return len; } /* API 3.1.3 recvmsg() - UDP Style Syntax * * ssize_t recvmsg(int socket, struct msghdr *message, * int flags); * * socket - the socket descriptor of the endpoint. * message - pointer to the msghdr structure which contains a single * user message and possibly some ancillary data. * * See Section 5 for complete description of the data * structures. * * flags - flags sent or received with the user message, see Section * 5 for complete description of the flags. */ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct sctp_ulpevent *event = NULL; struct sctp_sock *sp = sctp_sk(sk); struct sk_buff *skb; int copied; int err = 0; int skb_len; pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, addr_len); lock_sock(sk); if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) { err = -ENOTCONN; goto out; } skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; /* Get the total length of the skb including any skb's in the * frag_list. */ skb_len = skb->len; copied = skb_len; if (copied > len) copied = len; err = skb_copy_datagram_msg(skb, 0, msg, copied); event = sctp_skb2event(skb); if (err) goto out_free; sock_recv_ts_and_drops(msg, sk, skb); if (sctp_ulpevent_is_notification(event)) { msg->msg_flags |= MSG_NOTIFICATION; sp->pf->event_msgname(event, msg->msg_name, addr_len); } else { sp->pf->skb_msgname(skb, msg->msg_name, addr_len); } /* Check if we allow SCTP_NXTINFO. */ if (sp->recvnxtinfo) sctp_ulpevent_read_nxtinfo(event, msg, sk); /* Check if we allow SCTP_RCVINFO. */ if (sp->recvrcvinfo) sctp_ulpevent_read_rcvinfo(event, msg); /* Check if we allow SCTP_SNDRCVINFO. */ if (sp->subscribe.sctp_data_io_event) sctp_ulpevent_read_sndrcvinfo(event, msg); #if 0 /* FIXME: we should be calling IP/IPv6 layers. */ if (sk->sk_protinfo.af_inet.cmsg_flags) ip_cmsg_recv(msg, skb); #endif err = copied; /* If skb's length exceeds the user's buffer, update the skb and * push it back to the receive_queue so that the next call to * recvmsg() will return the remaining data. Don't set MSG_EOR. */ if (skb_len > copied) { msg->msg_flags &= ~MSG_EOR; if (flags & MSG_PEEK) goto out_free; sctp_skb_pull(skb, copied); skb_queue_head(&sk->sk_receive_queue, skb); /* When only partial message is copied to the user, increase * rwnd by that amount. If all the data in the skb is read, * rwnd is updated when the event is freed. */ if (!sctp_ulpevent_is_notification(event)) sctp_assoc_rwnd_increase(event->asoc, copied); goto out; } else if ((event->msg_flags & MSG_NOTIFICATION) || (event->msg_flags & MSG_EOR)) msg->msg_flags |= MSG_EOR; else msg->msg_flags &= ~MSG_EOR; out_free: if (flags & MSG_PEEK) { /* Release the skb reference acquired after peeking the skb in * sctp_skb_recv_datagram(). */ kfree_skb(skb); } else { /* Free the event which includes releasing the reference to * the owner of the skb, freeing the skb and updating the * rwnd. */ sctp_ulpevent_free(event); } out: release_sock(sk); return err; } /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) * * This option is a on/off flag. If enabled no SCTP message * fragmentation will be performed. Instead if a message being sent * exceeds the current PMTU size, the message will NOT be sent and * instead a error will be indicated to the user. */ static int sctp_setsockopt_disable_fragments(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; return 0; } static int sctp_setsockopt_events(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_association *asoc; struct sctp_ulpevent *event; if (optlen > sizeof(struct sctp_event_subscribe)) return -EINVAL; if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) return -EFAULT; if (sctp_sk(sk)->subscribe.sctp_data_io_event) pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Requested SCTP_SNDRCVINFO event.\n" "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n", current->comm, task_pid_nr(current)); /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, * if there is no data to be sent or retransmit, the stack will * immediately send up this notification. */ if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, &sctp_sk(sk)->subscribe)) { asoc = sctp_id2assoc(sk, 0); if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC); if (!event) return -ENOMEM; sctp_ulpq_tail_event(&asoc->ulpq, event); } } return 0; } /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) * * This socket option is applicable to the UDP-style socket only. When * set it will cause associations that are idle for more than the * specified number of seconds to automatically close. An association * being idle is defined an association that has NOT sent or received * user data. The special value of '0' indicates that no automatic * close of any associations should be performed. The option expects an * integer defining the number of seconds of idle time before an * association is closed. */ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct net *net = sock_net(sk); /* Applicable to UDP-style socket only */ if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (optlen != sizeof(int)) return -EINVAL; if (copy_from_user(&sp->autoclose, optval, optlen)) return -EFAULT; if (sp->autoclose > net->sctp.max_autoclose) sp->autoclose = net->sctp.max_autoclose; return 0; } /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) * * Applications can enable or disable heartbeats for any peer address of * an association, modify an address's heartbeat interval, force a * heartbeat to be sent immediately, and adjust the address's maximum * number of retransmissions sent before an address is considered * unreachable. The following structure is used to access and modify an * address's parameters: * * struct sctp_paddrparams { * sctp_assoc_t spp_assoc_id; * struct sockaddr_storage spp_address; * uint32_t spp_hbinterval; * uint16_t spp_pathmaxrxt; * uint32_t spp_pathmtu; * uint32_t spp_sackdelay; * uint32_t spp_flags; * }; * * spp_assoc_id - (one-to-many style socket) This is filled in the * application, and identifies the association for * this query. * spp_address - This specifies which address is of interest. * spp_hbinterval - This contains the value of the heartbeat interval, * in milliseconds. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmaxrxt - This contains the maximum number of * retransmissions before this address shall be * considered unreachable. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmtu - When Path MTU discovery is disabled the value * specified here will be the "fixed" path mtu. * Note that if the spp_address field is empty * then all associations on this address will * have this fixed path mtu set upon them. * * spp_sackdelay - When delayed sack is enabled, this value specifies * the number of milliseconds that sacks will be delayed * for. This value will apply to all addresses of an * association if the spp_address field is empty. Note * also, that if delayed sack is enabled and this * value is set to 0, no change is made to the last * recorded delayed sack timer value. * * spp_flags - These flags are used to control various features * on an association. The flag field may contain * zero or more of the following options. * * SPP_HB_ENABLE - Enable heartbeats on the * specified address. Note that if the address * field is empty all addresses for the association * have heartbeats enabled upon them. * * SPP_HB_DISABLE - Disable heartbeats on the * speicifed address. Note that if the address * field is empty all addresses for the association * will have their heartbeats disabled. Note also * that SPP_HB_ENABLE and SPP_HB_DISABLE are * mutually exclusive, only one of these two should * be specified. Enabling both fields will have * undetermined results. * * SPP_HB_DEMAND - Request a user initiated heartbeat * to be made immediately. * * SPP_HB_TIME_IS_ZERO - Specify's that the time for * heartbeat delayis to be set to the value of 0 * milliseconds. * * SPP_PMTUD_ENABLE - This field will enable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. * * SPP_PMTUD_DISABLE - This field will disable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. Not also that * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually * exclusive. Enabling both will have undetermined * results. * * SPP_SACKDELAY_ENABLE - Setting this flag turns * on delayed sack. The time specified in spp_sackdelay * is used to specify the sack delay for this address. Note * that if spp_address is empty then all addresses will * enable delayed sack and take on the sack delay * value specified in spp_sackdelay. * SPP_SACKDELAY_DISABLE - Setting this flag turns * off delayed sack. If the spp_address field is blank then * delayed sack is disabled for the entire association. Note * also that this field is mutually exclusive to * SPP_SACKDELAY_ENABLE, setting both will have undefined * results. */ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, struct sctp_transport *trans, struct sctp_association *asoc, struct sctp_sock *sp, int hb_change, int pmtud_change, int sackdelay_change) { int error; if (params->spp_flags & SPP_HB_DEMAND && trans) { struct net *net = sock_net(trans->asoc->base.sk); error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); if (error) return error; } /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of * this field is ignored. Note also that a value of zero indicates * the current setting should be left unchanged. */ if (params->spp_flags & SPP_HB_ENABLE) { /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is * set. This lets us use 0 value when this flag * is set. */ if (params->spp_flags & SPP_HB_TIME_IS_ZERO) params->spp_hbinterval = 0; if (params->spp_hbinterval || (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { if (trans) { trans->hbinterval = msecs_to_jiffies(params->spp_hbinterval); } else if (asoc) { asoc->hbinterval = msecs_to_jiffies(params->spp_hbinterval); } else { sp->hbinterval = params->spp_hbinterval; } } } if (hb_change) { if (trans) { trans->param_flags = (trans->param_flags & ~SPP_HB) | hb_change; } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_HB) | hb_change; } else { sp->param_flags = (sp->param_flags & ~SPP_HB) | hb_change; } } /* When Path MTU discovery is disabled the value specified here will * be the "fixed" path mtu (i.e. the value of the spp_flags field must * include the flag SPP_PMTUD_DISABLE for this field to have any * effect). */ if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { if (trans) { trans->pathmtu = params->spp_pathmtu; sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); } else if (asoc) { asoc->pathmtu = params->spp_pathmtu; sctp_frag_point(asoc, params->spp_pathmtu); } else { sp->pathmtu = params->spp_pathmtu; } } if (pmtud_change) { if (trans) { int update = (trans->param_flags & SPP_PMTUD_DISABLE) && (params->spp_flags & SPP_PMTUD_ENABLE); trans->param_flags = (trans->param_flags & ~SPP_PMTUD) | pmtud_change; if (update) { sctp_transport_pmtu(trans, sctp_opt2sk(sp)); sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); } } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; } else { sp->param_flags = (sp->param_flags & ~SPP_PMTUD) | pmtud_change; } } /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the * value of this field is ignored. Note also that a value of zero * indicates the current setting should be left unchanged. */ if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { if (trans) { trans->sackdelay = msecs_to_jiffies(params->spp_sackdelay); } else if (asoc) { asoc->sackdelay = msecs_to_jiffies(params->spp_sackdelay); } else { sp->sackdelay = params->spp_sackdelay; } } if (sackdelay_change) { if (trans) { trans->param_flags = (trans->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } else { sp->param_flags = (sp->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } } /* Note that a value of zero indicates the current setting should be left unchanged. */ if (params->spp_pathmaxrxt) { if (trans) { trans->pathmaxrxt = params->spp_pathmaxrxt; } else if (asoc) { asoc->pathmaxrxt = params->spp_pathmaxrxt; } else { sp->pathmaxrxt = params->spp_pathmaxrxt; } } return 0; } static int sctp_setsockopt_peer_addr_params(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_paddrparams params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); int error; int hb_change, pmtud_change, sackdelay_change; if (optlen != sizeof(struct sctp_paddrparams)) return -EINVAL; if (copy_from_user(&params, optval, optlen)) return -EFAULT; /* Validate flags and value parameters. */ hb_change = params.spp_flags & SPP_HB; pmtud_change = params.spp_flags & SPP_PMTUD; sackdelay_change = params.spp_flags & SPP_SACKDELAY; if (hb_change == SPP_HB || pmtud_change == SPP_PMTUD || sackdelay_change == SPP_SACKDELAY || params.spp_sackdelay > 500 || (params.spp_pathmtu && params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) return -EINVAL; /* If an address other than INADDR_ANY is specified, and * no transport is found, then the request is invalid. */ if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) { trans = sctp_addr_id2transport(sk, &params.spp_address, params.spp_assoc_id); if (!trans) return -EINVAL; } /* Get association, if assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.spp_assoc_id); if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Heartbeat demand can only be sent on a transport or * association, but not a socket. */ if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) return -EINVAL; /* Process parameters. */ error = sctp_apply_peer_addr_params(&params, trans, asoc, sp, hb_change, pmtud_change, sackdelay_change); if (error) return error; /* If changes are for association, also apply parameters to each * transport. */ if (!trans && asoc) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { sctp_apply_peer_addr_params(&params, trans, asoc, sp, hb_change, pmtud_change, sackdelay_change); } } return 0; } static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags) { return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; } static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags) { return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; } /* * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) * * This option will effect the way delayed acks are performed. This * option allows you to get or set the delayed ack time, in * milliseconds. It also allows changing the delayed ack frequency. * Changing the frequency to 1 disables the delayed sack algorithm. If * the assoc_id is 0, then this sets or gets the endpoints default * values. If the assoc_id field is non-zero, then the set or get * effects the specified association for the one to many model (the * assoc_id field is ignored by the one to one model). Note that if * sack_delay or sack_freq are 0 when setting this option, then the * current values will remain unchanged. * * struct sctp_sack_info { * sctp_assoc_t sack_assoc_id; * uint32_t sack_delay; * uint32_t sack_freq; * }; * * sack_assoc_id - This parameter, indicates which association the user * is performing an action upon. Note that if this field's value is * zero then the endpoints default value is changed (effecting future * associations only). * * sack_delay - This parameter contains the number of milliseconds that * the user is requesting the delayed ACK timer be set to. Note that * this value is defined in the standard to be between 200 and 500 * milliseconds. * * sack_freq - This parameter contains the number of packets that must * be received before a sack is sent without waiting for the delay * timer to expire. The default value for this is 2, setting this * value to 1 will disable the delayed sack algorithm. */ static int sctp_setsockopt_delayed_ack(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sack_info params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (optlen == sizeof(struct sctp_sack_info)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; if (params.sack_delay == 0 && params.sack_freq == 0) return 0; } else if (optlen == sizeof(struct sctp_assoc_value)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of struct sctp_assoc_value in delayed_ack socket option.\n" "Use struct sctp_sack_info instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&params, optval, optlen)) return -EFAULT; if (params.sack_delay == 0) params.sack_freq = 1; else params.sack_freq = 0; } else return -EINVAL; /* Validate value parameter. */ if (params.sack_delay > 500) return -EINVAL; /* Get association, if sack_assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.sack_assoc_id); if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (params.sack_delay) { if (asoc) { asoc->sackdelay = msecs_to_jiffies(params.sack_delay); asoc->param_flags = sctp_spp_sackdelay_enable(asoc->param_flags); } else { sp->sackdelay = params.sack_delay; sp->param_flags = sctp_spp_sackdelay_enable(sp->param_flags); } } if (params.sack_freq == 1) { if (asoc) { asoc->param_flags = sctp_spp_sackdelay_disable(asoc->param_flags); } else { sp->param_flags = sctp_spp_sackdelay_disable(sp->param_flags); } } else if (params.sack_freq > 1) { if (asoc) { asoc->sackfreq = params.sack_freq; asoc->param_flags = sctp_spp_sackdelay_enable(asoc->param_flags); } else { sp->sackfreq = params.sack_freq; sp->param_flags = sctp_spp_sackdelay_enable(sp->param_flags); } } /* If change is for association, also apply to each transport. */ if (asoc) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { if (params.sack_delay) { trans->sackdelay = msecs_to_jiffies(params.sack_delay); trans->param_flags = sctp_spp_sackdelay_enable(trans->param_flags); } if (params.sack_freq == 1) { trans->param_flags = sctp_spp_sackdelay_disable(trans->param_flags); } else if (params.sack_freq > 1) { trans->sackfreq = params.sack_freq; trans->param_flags = sctp_spp_sackdelay_enable(trans->param_flags); } } } return 0; } /* 7.1.3 Initialization Parameters (SCTP_INITMSG) * * Applications can specify protocol parameters for the default association * initialization. The option name argument to setsockopt() and getsockopt() * is SCTP_INITMSG. * * Setting initialization parameters is effective only on an unconnected * socket (for UDP-style sockets only future associations are effected * by the change). With TCP-style sockets, this option is inherited by * sockets derived from a listener socket. */ static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_initmsg sinit; struct sctp_sock *sp = sctp_sk(sk); if (optlen != sizeof(struct sctp_initmsg)) return -EINVAL; if (copy_from_user(&sinit, optval, optlen)) return -EFAULT; if (sinit.sinit_num_ostreams) sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; if (sinit.sinit_max_instreams) sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; if (sinit.sinit_max_attempts) sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; if (sinit.sinit_max_init_timeo) sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; return 0; } /* * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) * * Applications that wish to use the sendto() system call may wish to * specify a default set of parameters that would normally be supplied * through the inclusion of ancillary data. This socket option allows * such an application to set the default sctp_sndrcvinfo structure. * The application that wishes to use this socket option simply passes * in to this call the sctp_sndrcvinfo structure defined in Section * 5.2.2) The input parameters accepted by this call include * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, * sinfo_timetolive. The user must provide the sinfo_assoc_id field in * to this call if the caller is using the UDP model. */ static int sctp_setsockopt_default_send_param(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndrcvinfo info; if (optlen != sizeof(info)) return -EINVAL; if (copy_from_user(&info, optval, optlen)) return -EFAULT; if (info.sinfo_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { asoc->default_stream = info.sinfo_stream; asoc->default_flags = info.sinfo_flags; asoc->default_ppid = info.sinfo_ppid; asoc->default_context = info.sinfo_context; asoc->default_timetolive = info.sinfo_timetolive; } else { sp->default_stream = info.sinfo_stream; sp->default_flags = info.sinfo_flags; sp->default_ppid = info.sinfo_ppid; sp->default_context = info.sinfo_context; sp->default_timetolive = info.sinfo_timetolive; } return 0; } /* RFC6458, Section 8.1.31. Set/get Default Send Parameters * (SCTP_DEFAULT_SNDINFO) */ static int sctp_setsockopt_default_sndinfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndinfo info; if (optlen != sizeof(info)) return -EINVAL; if (copy_from_user(&info, optval, optlen)) return -EFAULT; if (info.snd_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; asoc = sctp_id2assoc(sk, info.snd_assoc_id); if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { asoc->default_stream = info.snd_sid; asoc->default_flags = info.snd_flags; asoc->default_ppid = info.snd_ppid; asoc->default_context = info.snd_context; } else { sp->default_stream = info.snd_sid; sp->default_flags = info.snd_flags; sp->default_ppid = info.snd_ppid; sp->default_context = info.snd_context; } return 0; } /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) * * Requests that the local SCTP stack use the enclosed peer address as * the association primary. The enclosed address must be one of the * association peer's addresses. */ static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_prim prim; struct sctp_transport *trans; if (optlen != sizeof(struct sctp_prim)) return -EINVAL; if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) return -EFAULT; trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); if (!trans) return -EINVAL; sctp_assoc_set_primary(trans->asoc, trans); return 0; } /* * 7.1.5 SCTP_NODELAY * * Turn on/off any Nagle-like algorithm. This means that packets are * generally sent as soon as possible and no unnecessary delays are * introduced, at the cost of more packets in the network. Expects an * integer boolean flag. */ static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; return 0; } /* * * 7.1.1 SCTP_RTOINFO * * The protocol parameters used to initialize and bound retransmission * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access * and modify these parameters. * All parameters are time values, in milliseconds. A value of 0, when * modifying the parameters, indicates that the current value should not * be changed. * */ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_rtoinfo rtoinfo; struct sctp_association *asoc; unsigned long rto_min, rto_max; struct sctp_sock *sp = sctp_sk(sk); if (optlen != sizeof (struct sctp_rtoinfo)) return -EINVAL; if (copy_from_user(&rtoinfo, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); /* Set the values to the specific association */ if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) return -EINVAL; rto_max = rtoinfo.srto_max; rto_min = rtoinfo.srto_min; if (rto_max) rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; else rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; if (rto_min) rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; else rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; if (rto_min > rto_max) return -EINVAL; if (asoc) { if (rtoinfo.srto_initial != 0) asoc->rto_initial = msecs_to_jiffies(rtoinfo.srto_initial); asoc->rto_max = rto_max; asoc->rto_min = rto_min; } else { /* If there is no association or the association-id = 0 * set the values to the endpoint. */ if (rtoinfo.srto_initial != 0) sp->rtoinfo.srto_initial = rtoinfo.srto_initial; sp->rtoinfo.srto_max = rto_max; sp->rtoinfo.srto_min = rto_min; } return 0; } /* * * 7.1.2 SCTP_ASSOCINFO * * This option is used to tune the maximum retransmission attempts * of the association. * Returns an error if the new association retransmission value is * greater than the sum of the retransmission value of the peer. * See [SCTP] for more information. * */ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assocparams assocparams; struct sctp_association *asoc; if (optlen != sizeof(struct sctp_assocparams)) return -EINVAL; if (copy_from_user(&assocparams, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Set the values to the specific association */ if (asoc) { if (assocparams.sasoc_asocmaxrxt != 0) { __u32 path_sum = 0; int paths = 0; struct sctp_transport *peer_addr; list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, transports) { path_sum += peer_addr->pathmaxrxt; paths++; } /* Only validate asocmaxrxt if we have more than * one path/transport. We do this because path * retransmissions are only counted when we have more * then one path. */ if (paths > 1 && assocparams.sasoc_asocmaxrxt > path_sum) return -EINVAL; asoc->max_retrans = assocparams.sasoc_asocmaxrxt; } if (assocparams.sasoc_cookie_life != 0) asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); } else { /* Set the values to the endpoint */ struct sctp_sock *sp = sctp_sk(sk); if (assocparams.sasoc_asocmaxrxt != 0) sp->assocparams.sasoc_asocmaxrxt = assocparams.sasoc_asocmaxrxt; if (assocparams.sasoc_cookie_life != 0) sp->assocparams.sasoc_cookie_life = assocparams.sasoc_cookie_life; } return 0; } /* * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) * * This socket option is a boolean flag which turns on or off mapped V4 * addresses. If this option is turned on and the socket is type * PF_INET6, then IPv4 addresses will be mapped to V6 representation. * If this option is turned off, then no mapping will be done of V4 * addresses and a user will receive both PF_INET6 and PF_INET type * addresses on the socket. */ static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; if (val) sp->v4mapped = 1; else sp->v4mapped = 0; return 0; } /* * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) * This option will get or set the maximum size to put in any outgoing * SCTP DATA chunk. If a message is larger than this size it will be * fragmented by SCTP into the specified size. Note that the underlying * SCTP implementation may fragment into smaller sized chunks when the * PMTU of the underlying association is smaller than the value set by * the user. The default value for this option is '0' which indicates * the user is NOT limiting fragmentation and only the PMTU will effect * SCTP's choice of DATA chunk size. Note also that values set larger * than the maximum size of an IP datagram will effectively let SCTP * control fragmentation (i.e. the same as setting this option to 0). * * The following structure is used to access and modify this parameter: * * struct sctp_assoc_value { * sctp_assoc_t assoc_id; * uint32_t assoc_value; * }; * * assoc_id: This parameter is ignored for one-to-one style sockets. * For one-to-many style sockets this parameter indicates which * association the user is performing an action upon. Note that if * this field's value is zero then the endpoints default value is * changed (effecting future associations only). * assoc_value: This parameter specifies the maximum size in bytes. */ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; struct sctp_sock *sp = sctp_sk(sk); int val; if (optlen == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in maxseg socket option.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&val, optval, optlen)) return -EFAULT; params.assoc_id = 0; } else if (optlen == sizeof(struct sctp_assoc_value)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; val = params.assoc_value; } else return -EINVAL; if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) return -EINVAL; asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc && params.assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { if (val == 0) { val = asoc->pathmtu; val -= sp->pf->af->net_header_len; val -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk); } asoc->user_frag = val; asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); } else { sp->user_frag = val; } return 0; } /* * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) * * Requests that the peer mark the enclosed address as the association * primary. The enclosed address must be one of the association's * locally bound addresses. The following structure is used to make a * set primary request: */ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, unsigned int optlen) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_association *asoc = NULL; struct sctp_setpeerprim prim; struct sctp_chunk *chunk; struct sctp_af *af; int err; sp = sctp_sk(sk); if (!net->sctp.addip_enable) return -EPERM; if (optlen != sizeof(struct sctp_setpeerprim)) return -EINVAL; if (copy_from_user(&prim, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); if (!asoc) return -EINVAL; if (!asoc->peer.asconf_capable) return -EPERM; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) return -EPERM; if (!sctp_state(asoc, ESTABLISHED)) return -ENOTCONN; af = sctp_get_af_specific(prim.sspp_addr.ss_family); if (!af) return -EINVAL; if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) return -EADDRNOTAVAIL; if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) return -EADDRNOTAVAIL; /* Create an ASCONF chunk with SET_PRIMARY parameter */ chunk = sctp_make_asconf_set_prim(asoc, (union sctp_addr *)&prim.sspp_addr); if (!chunk) return -ENOMEM; err = sctp_send_asconf(asoc, chunk); pr_debug("%s: we set peer primary addr primitively\n", __func__); return err; } static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_setadaptation adaptation; if (optlen != sizeof(struct sctp_setadaptation)) return -EINVAL; if (copy_from_user(&adaptation, optval, optlen)) return -EFAULT; sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; return 0; } /* * 7.1.29. Set or Get the default context (SCTP_CONTEXT) * * The context field in the sctp_sndrcvinfo structure is normally only * used when a failed message is retrieved holding the value that was * sent down on the actual send call. This option allows the setting of * a default context on an association basis that will be received on * reading messages from the peer. This is especially helpful in the * one-2-many model for an application to keep some reference to an * internal state machine that is processing messages on the * association. Note that the setting of this value only effects * received messages from the peer and does not effect the value that is * saved with outbound messages. */ static int sctp_setsockopt_context(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (optlen != sizeof(struct sctp_assoc_value)) return -EINVAL; if (copy_from_user(&params, optval, optlen)) return -EFAULT; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; asoc->default_rcv_context = params.assoc_value; } else { sp->default_rcv_context = params.assoc_value; } return 0; } /* * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) * * This options will at a minimum specify if the implementation is doing * fragmented interleave. Fragmented interleave, for a one to many * socket, is when subsequent calls to receive a message may return * parts of messages from different associations. Some implementations * may allow you to turn this value on or off. If so, when turned off, * no fragment interleave will occur (which will cause a head of line * blocking amongst multiple associations sharing the same one to many * socket). When this option is turned on, then each receive call may * come from a different association (thus the user must receive data * with the extended calls (e.g. sctp_recvmsg) to keep track of which * association each receive belongs to. * * This option takes a boolean value. A non-zero value indicates that * fragmented interleave is on. A value of zero indicates that * fragmented interleave is off. * * Note that it is important that an implementation that allows this * option to be turned on, have it off by default. Otherwise an unaware * application using the one to many model may become confused and act * incorrectly. */ static int sctp_setsockopt_fragment_interleave(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen != sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; return 0; } /* * 8.1.21. Set or Get the SCTP Partial Delivery Point * (SCTP_PARTIAL_DELIVERY_POINT) * * This option will set or get the SCTP partial delivery point. This * point is the size of a message where the partial delivery API will be * invoked to help free up rwnd space for the peer. Setting this to a * lower value will cause partial deliveries to happen more often. The * calls argument is an integer that sets or gets the partial delivery * point. Note also that the call will fail if the user attempts to set * this value larger than the socket receive buffer size. * * Note that any single message having a length smaller than or equal to * the SCTP partial delivery point will be delivered in one single read * call as long as the user provided buffer is large enough to hold the * message. */ static int sctp_setsockopt_partial_delivery_point(struct sock *sk, char __user *optval, unsigned int optlen) { u32 val; if (optlen != sizeof(u32)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; /* Note: We double the receive buffer from what the user sets * it to be, also initial rwnd is based on rcvbuf/2. */ if (val > (sk->sk_rcvbuf >> 1)) return -EINVAL; sctp_sk(sk)->pd_point = val; return 0; /* is this the right error code? */ } /* * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) * * This option will allow a user to change the maximum burst of packets * that can be emitted by this association. Note that the default value * is 4, and some implementations may restrict this setting so that it * can only be lowered. * * NOTE: This text doesn't seem right. Do this on a socket basis with * future associations inheriting the socket value. */ static int sctp_setsockopt_maxburst(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; int val; int assoc_id = 0; if (optlen == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in max_burst socket option deprecated.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&val, optval, optlen)) return -EFAULT; } else if (optlen == sizeof(struct sctp_assoc_value)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; val = params.assoc_value; assoc_id = params.assoc_id; } else return -EINVAL; sp = sctp_sk(sk); if (assoc_id != 0) { asoc = sctp_id2assoc(sk, assoc_id); if (!asoc) return -EINVAL; asoc->max_burst = val; } else sp->max_burst = val; return 0; } /* * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) * * This set option adds a chunk type that the user is requesting to be * received only in an authenticated way. Changes to the list of chunks * will only effect future associations on the socket. */ static int sctp_setsockopt_auth_chunk(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunk val; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authchunk)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; switch (val.sauth_chunk) { case SCTP_CID_INIT: case SCTP_CID_INIT_ACK: case SCTP_CID_SHUTDOWN_COMPLETE: case SCTP_CID_AUTH: return -EINVAL; } /* add this chunk id to the endpoint */ return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); } /* * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) * * This option gets or sets the list of HMAC algorithms that the local * endpoint requires the peer to use. */ static int sctp_setsockopt_hmac_ident(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_hmacalgo *hmacs; u32 idents; int err; if (!ep->auth_enable) return -EACCES; if (optlen < sizeof(struct sctp_hmacalgo)) return -EINVAL; hmacs = memdup_user(optval, optlen); if (IS_ERR(hmacs)) return PTR_ERR(hmacs); idents = hmacs->shmac_num_idents; if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { err = -EINVAL; goto out; } err = sctp_auth_ep_set_hmacs(ep, hmacs); out: kfree(hmacs); return err; } /* * 7.1.20. Set a shared key (SCTP_AUTH_KEY) * * This option will set a shared secret key which is used to build an * association shared key. */ static int sctp_setsockopt_auth_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkey *authkey; struct sctp_association *asoc; int ret; if (!ep->auth_enable) return -EACCES; if (optlen <= sizeof(struct sctp_authkey)) return -EINVAL; authkey = memdup_user(optval, optlen); if (IS_ERR(authkey)) return PTR_ERR(authkey); if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { ret = -EINVAL; goto out; } asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { ret = -EINVAL; goto out; } ret = sctp_auth_set_key(ep, asoc, authkey); out: kzfree(authkey); return ret; } /* * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) * * This option will get or set the active shared key to be used to build * the association shared key. */ static int sctp_setsockopt_active_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); } /* * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) * * This set option will delete a shared secret key from use. */ static int sctp_setsockopt_del_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); } /* * 8.1.23 SCTP_AUTO_ASCONF * * This option will enable or disable the use of the automatic generation of * ASCONF chunks to add and delete addresses to an existing association. Note * that this option has two caveats namely: a) it only affects sockets that * are bound to all addresses available to the SCTP stack, and b) the system * administrator may have an overriding control that turns the ASCONF feature * off no matter what setting the socket option may have. * This option expects an integer boolean flag, where a non-zero value turns on * the option, and a zero value turns off the option. * Note. In this implementation, socket operation overrides default parameter * being set by sysctl as well as FreeBSD implementation */ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, unsigned int optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; if (!sctp_is_ep_boundall(sk) && val) return -EINVAL; if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) return 0; if (val == 0 && sp->do_auto_asconf) { list_del(&sp->auto_asconf_list); sp->do_auto_asconf = 0; } else if (val && !sp->do_auto_asconf) { list_add_tail(&sp->auto_asconf_list, &sock_net(sk)->sctp.auto_asconf_splist); sp->do_auto_asconf = 1; } return 0; } /* * SCTP_PEER_ADDR_THLDS * * This option allows us to alter the partially failed threshold for one or all * transports in an association. See Section 6.1 of: * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt */ static int sctp_setsockopt_paddr_thresholds(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_paddrthlds val; struct sctp_transport *trans; struct sctp_association *asoc; if (optlen < sizeof(struct sctp_paddrthlds)) return -EINVAL; if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, sizeof(struct sctp_paddrthlds))) return -EFAULT; if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { asoc = sctp_id2assoc(sk, val.spt_assoc_id); if (!asoc) return -ENOENT; list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { if (val.spt_pathmaxrxt) trans->pathmaxrxt = val.spt_pathmaxrxt; trans->pf_retrans = val.spt_pathpfthld; } if (val.spt_pathmaxrxt) asoc->pathmaxrxt = val.spt_pathmaxrxt; asoc->pf_retrans = val.spt_pathpfthld; } else { trans = sctp_addr_id2transport(sk, &val.spt_address, val.spt_assoc_id); if (!trans) return -ENOENT; if (val.spt_pathmaxrxt) trans->pathmaxrxt = val.spt_pathmaxrxt; trans->pf_retrans = val.spt_pathpfthld; } return 0; } static int sctp_setsockopt_recvrcvinfo(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *) optval)) return -EFAULT; sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1; return 0; } static int sctp_setsockopt_recvnxtinfo(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *) optval)) return -EFAULT; sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1; return 0; } /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve * socket options. Socket options are used to change the default * behavior of sockets calls. They are described in Section 7. * * The syntax is: * * ret = getsockopt(int sd, int level, int optname, void __user *optval, * int __user *optlen); * ret = setsockopt(int sd, int level, int optname, const void __user *optval, * int optlen); * * sd - the socket descript. * level - set to IPPROTO_SCTP for all SCTP options. * optname - the option name. * optval - the buffer to store the value of the option. * optlen - the size of the buffer. */ static int sctp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { int retval = 0; pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); /* I can hardly begin to describe how wrong this is. This is * so broken as to be worse than useless. The API draft * REALLY is NOT helpful here... I am not convinced that the * semantics of setsockopt() with a level OTHER THAN SOL_SCTP * are at all well-founded. */ if (level != SOL_SCTP) { struct sctp_af *af = sctp_sk(sk)->pf->af; retval = af->setsockopt(sk, level, optname, optval, optlen); goto out_nounlock; } lock_sock(sk); switch (optname) { case SCTP_SOCKOPT_BINDX_ADD: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, optlen, SCTP_BINDX_ADD_ADDR); break; case SCTP_SOCKOPT_BINDX_REM: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, optlen, SCTP_BINDX_REM_ADDR); break; case SCTP_SOCKOPT_CONNECTX_OLD: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_connectx_old(sk, (struct sockaddr __user *)optval, optlen); break; case SCTP_SOCKOPT_CONNECTX: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_connectx(sk, (struct sockaddr __user *)optval, optlen); break; case SCTP_DISABLE_FRAGMENTS: retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); break; case SCTP_EVENTS: retval = sctp_setsockopt_events(sk, optval, optlen); break; case SCTP_AUTOCLOSE: retval = sctp_setsockopt_autoclose(sk, optval, optlen); break; case SCTP_PEER_ADDR_PARAMS: retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); break; case SCTP_DELAYED_SACK: retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); break; case SCTP_PARTIAL_DELIVERY_POINT: retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); break; case SCTP_INITMSG: retval = sctp_setsockopt_initmsg(sk, optval, optlen); break; case SCTP_DEFAULT_SEND_PARAM: retval = sctp_setsockopt_default_send_param(sk, optval, optlen); break; case SCTP_DEFAULT_SNDINFO: retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen); break; case SCTP_PRIMARY_ADDR: retval = sctp_setsockopt_primary_addr(sk, optval, optlen); break; case SCTP_SET_PEER_PRIMARY_ADDR: retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); break; case SCTP_NODELAY: retval = sctp_setsockopt_nodelay(sk, optval, optlen); break; case SCTP_RTOINFO: retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); break; case SCTP_ASSOCINFO: retval = sctp_setsockopt_associnfo(sk, optval, optlen); break; case SCTP_I_WANT_MAPPED_V4_ADDR: retval = sctp_setsockopt_mappedv4(sk, optval, optlen); break; case SCTP_MAXSEG: retval = sctp_setsockopt_maxseg(sk, optval, optlen); break; case SCTP_ADAPTATION_LAYER: retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); break; case SCTP_CONTEXT: retval = sctp_setsockopt_context(sk, optval, optlen); break; case SCTP_FRAGMENT_INTERLEAVE: retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); break; case SCTP_MAX_BURST: retval = sctp_setsockopt_maxburst(sk, optval, optlen); break; case SCTP_AUTH_CHUNK: retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); break; case SCTP_HMAC_IDENT: retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); break; case SCTP_AUTH_KEY: retval = sctp_setsockopt_auth_key(sk, optval, optlen); break; case SCTP_AUTH_ACTIVE_KEY: retval = sctp_setsockopt_active_key(sk, optval, optlen); break; case SCTP_AUTH_DELETE_KEY: retval = sctp_setsockopt_del_key(sk, optval, optlen); break; case SCTP_AUTO_ASCONF: retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); break; case SCTP_PEER_ADDR_THLDS: retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); break; case SCTP_RECVRCVINFO: retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen); break; case SCTP_RECVNXTINFO: retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen); break; default: retval = -ENOPROTOOPT; break; } release_sock(sk); out_nounlock: return retval; } /* API 3.1.6 connect() - UDP Style Syntax * * An application may use the connect() call in the UDP model to initiate an * association without sending data. * * The syntax is: * * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); * * sd: the socket descriptor to have a new association added to. * * nam: the address structure (either struct sockaddr_in or struct * sockaddr_in6 defined in RFC2553 [7]). * * len: the size of the address. */ static int sctp_connect(struct sock *sk, struct sockaddr *addr, int addr_len) { int err = 0; struct sctp_af *af; lock_sock(sk); pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, addr, addr_len); /* Validate addr_len before calling common connect/connectx routine. */ af = sctp_get_af_specific(addr->sa_family); if (!af || addr_len < af->sockaddr_len) { err = -EINVAL; } else { /* Pass correct addr len to common routine (so it knows there * is only one address being passed. */ err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); } release_sock(sk); return err; } /* FIXME: Write comments. */ static int sctp_disconnect(struct sock *sk, int flags) { return -EOPNOTSUPP; /* STUB */ } /* 4.1.4 accept() - TCP Style Syntax * * Applications use accept() call to remove an established SCTP * association from the accept queue of the endpoint. A new socket * descriptor will be returned from accept() to represent the newly * formed association. */ static struct sock *sctp_accept(struct sock *sk, int flags, int *err) { struct sctp_sock *sp; struct sctp_endpoint *ep; struct sock *newsk = NULL; struct sctp_association *asoc; long timeo; int error = 0; lock_sock(sk); sp = sctp_sk(sk); ep = sp->ep; if (!sctp_style(sk, TCP)) { error = -EOPNOTSUPP; goto out; } if (!sctp_sstate(sk, LISTENING)) { error = -EINVAL; goto out; } timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); error = sctp_wait_for_accept(sk, timeo); if (error) goto out; /* We treat the list of associations on the endpoint as the accept * queue and pick the first association on the list. */ asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); newsk = sp->pf->create_accept_sk(sk, asoc); if (!newsk) { error = -ENOMEM; goto out; } /* Populate the fields of the newsk from the oldsk and migrate the * asoc to the newsk. */ sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); out: release_sock(sk); *err = error; return newsk; } /* The SCTP ioctl handler. */ static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) { int rc = -ENOTCONN; lock_sock(sk); /* * SEQPACKET-style sockets in LISTENING state are valid, for * SCTP, so only discard TCP-style sockets in LISTENING state. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) goto out; switch (cmd) { case SIOCINQ: { struct sk_buff *skb; unsigned int amount = 0; skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) { /* * We will only return the amount of this packet since * that is all that will be read. */ amount = skb->len; } rc = put_user(amount, (int __user *)arg); break; } default: rc = -ENOIOCTLCMD; break; } out: release_sock(sk); return rc; } /* This is the function which gets called during socket creation to * initialized the SCTP-specific portion of the sock. * The sock structure should already be zero-filled memory. */ static int sctp_init_sock(struct sock *sk) { struct net *net = sock_net(sk); struct sctp_sock *sp; pr_debug("%s: sk:%p\n", __func__, sk); sp = sctp_sk(sk); /* Initialize the SCTP per socket area. */ switch (sk->sk_type) { case SOCK_SEQPACKET: sp->type = SCTP_SOCKET_UDP; break; case SOCK_STREAM: sp->type = SCTP_SOCKET_TCP; break; default: return -ESOCKTNOSUPPORT; } /* Initialize default send parameters. These parameters can be * modified with the SCTP_DEFAULT_SEND_PARAM socket option. */ sp->default_stream = 0; sp->default_ppid = 0; sp->default_flags = 0; sp->default_context = 0; sp->default_timetolive = 0; sp->default_rcv_context = 0; sp->max_burst = net->sctp.max_burst; sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; /* Initialize default setup parameters. These parameters * can be modified with the SCTP_INITMSG socket option or * overridden by the SCTP_INIT CMSG. */ sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; sp->initmsg.sinit_max_instreams = sctp_max_instreams; sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; /* Initialize default RTO related parameters. These parameters can * be modified for with the SCTP_RTOINFO socket option. */ sp->rtoinfo.srto_initial = net->sctp.rto_initial; sp->rtoinfo.srto_max = net->sctp.rto_max; sp->rtoinfo.srto_min = net->sctp.rto_min; /* Initialize default association related parameters. These parameters * can be modified with the SCTP_ASSOCINFO socket option. */ sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; sp->assocparams.sasoc_number_peer_destinations = 0; sp->assocparams.sasoc_peer_rwnd = 0; sp->assocparams.sasoc_local_rwnd = 0; sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; /* Initialize default event subscriptions. By default, all the * options are off. */ memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); /* Default Peer Address Parameters. These defaults can * be modified via SCTP_PEER_ADDR_PARAMS */ sp->hbinterval = net->sctp.hb_interval; sp->pathmaxrxt = net->sctp.max_retrans_path; sp->pathmtu = 0; /* allow default discovery */ sp->sackdelay = net->sctp.sack_timeout; sp->sackfreq = 2; sp->param_flags = SPP_HB_ENABLE | SPP_PMTUD_ENABLE | SPP_SACKDELAY_ENABLE; /* If enabled no SCTP message fragmentation will be performed. * Configure through SCTP_DISABLE_FRAGMENTS socket option. */ sp->disable_fragments = 0; /* Enable Nagle algorithm by default. */ sp->nodelay = 0; sp->recvrcvinfo = 0; sp->recvnxtinfo = 0; /* Enable by default. */ sp->v4mapped = 1; /* Auto-close idle associations after the configured * number of seconds. A value of 0 disables this * feature. Configure through the SCTP_AUTOCLOSE socket option, * for UDP-style sockets only. */ sp->autoclose = 0; /* User specified fragmentation limit. */ sp->user_frag = 0; sp->adaptation_ind = 0; sp->pf = sctp_get_pf_specific(sk->sk_family); /* Control variables for partial data delivery. */ atomic_set(&sp->pd_mode, 0); skb_queue_head_init(&sp->pd_lobby); sp->frag_interleave = 0; /* Create a per socket endpoint structure. Even if we * change the data structure relationships, this may still * be useful for storing pre-connect address information. */ sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); if (!sp->ep) return -ENOMEM; sp->hmac = NULL; sk->sk_destruct = sctp_destruct_sock; SCTP_DBG_OBJCNT_INC(sock); local_bh_disable(); percpu_counter_inc(&sctp_sockets_allocated); sock_prot_inuse_add(net, sk->sk_prot, 1); if (net->sctp.default_auto_asconf) { list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist); sp->do_auto_asconf = 1; } else sp->do_auto_asconf = 0; local_bh_enable(); return 0; } /* Cleanup any SCTP per socket resources. */ static void sctp_destroy_sock(struct sock *sk) { struct sctp_sock *sp; pr_debug("%s: sk:%p\n", __func__, sk); /* Release our hold on the endpoint. */ sp = sctp_sk(sk); /* This could happen during socket init, thus we bail out * early, since the rest of the below is not setup either. */ if (sp->ep == NULL) return; if (sp->do_auto_asconf) { sp->do_auto_asconf = 0; list_del(&sp->auto_asconf_list); } sctp_endpoint_free(sp->ep); local_bh_disable(); percpu_counter_dec(&sctp_sockets_allocated); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); local_bh_enable(); } /* Triggered when there are no references on the socket anymore */ static void sctp_destruct_sock(struct sock *sk) { struct sctp_sock *sp = sctp_sk(sk); /* Free up the HMAC transform. */ crypto_free_hash(sp->hmac); inet_sock_destruct(sk); } /* API 4.1.7 shutdown() - TCP Style Syntax * int shutdown(int socket, int how); * * sd - the socket descriptor of the association to be closed. * how - Specifies the type of shutdown. The values are * as follows: * SHUT_RD * Disables further receive operations. No SCTP * protocol action is taken. * SHUT_WR * Disables further send operations, and initiates * the SCTP shutdown sequence. * SHUT_RDWR * Disables further send and receive operations * and initiates the SCTP shutdown sequence. */ static void sctp_shutdown(struct sock *sk, int how) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; struct sctp_association *asoc; if (!sctp_style(sk, TCP)) return; if (how & SEND_SHUTDOWN) { ep = sctp_sk(sk)->ep; if (!list_empty(&ep->asocs)) { asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); sctp_primitive_SHUTDOWN(net, asoc, NULL); } } } /* 7.2.1 Association Status (SCTP_STATUS) * Applications can retrieve current status information about an * association, including association state, peer receiver window size, * number of unacked data chunks, and number of data chunks pending * receipt. This information is read-only. */ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_status status; struct sctp_association *asoc = NULL; struct sctp_transport *transport; sctp_assoc_t associd; int retval = 0; if (len < sizeof(status)) { retval = -EINVAL; goto out; } len = sizeof(status); if (copy_from_user(&status, optval, len)) { retval = -EFAULT; goto out; } associd = status.sstat_assoc_id; asoc = sctp_id2assoc(sk, associd); if (!asoc) { retval = -EINVAL; goto out; } transport = asoc->peer.primary_path; status.sstat_assoc_id = sctp_assoc2id(asoc); status.sstat_state = sctp_assoc_to_state(asoc); status.sstat_rwnd = asoc->peer.rwnd; status.sstat_unackdata = asoc->unack_data; status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); status.sstat_instrms = asoc->c.sinit_max_instreams; status.sstat_outstrms = asoc->c.sinit_num_ostreams; status.sstat_fragmentation_point = asoc->frag_point; status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, transport->af_specific->sockaddr_len); /* Map ipv4 address into v4-mapped-on-v6 address. */ sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), (union sctp_addr *)&status.sstat_primary.spinfo_address); status.sstat_primary.spinfo_state = transport->state; status.sstat_primary.spinfo_cwnd = transport->cwnd; status.sstat_primary.spinfo_srtt = transport->srtt; status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); status.sstat_primary.spinfo_mtu = transport->pathmtu; if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) status.sstat_primary.spinfo_state = SCTP_ACTIVE; if (put_user(len, optlen)) { retval = -EFAULT; goto out; } pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", __func__, len, status.sstat_state, status.sstat_rwnd, status.sstat_assoc_id); if (copy_to_user(optval, &status, len)) { retval = -EFAULT; goto out; } out: return retval; } /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) * * Applications can retrieve information about a specific peer address * of an association, including its reachability state, congestion * window, and retransmission timer values. This information is * read-only. */ static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_paddrinfo pinfo; struct sctp_transport *transport; int retval = 0; if (len < sizeof(pinfo)) { retval = -EINVAL; goto out; } len = sizeof(pinfo); if (copy_from_user(&pinfo, optval, len)) { retval = -EFAULT; goto out; } transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, pinfo.spinfo_assoc_id); if (!transport) return -EINVAL; pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); pinfo.spinfo_state = transport->state; pinfo.spinfo_cwnd = transport->cwnd; pinfo.spinfo_srtt = transport->srtt; pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); pinfo.spinfo_mtu = transport->pathmtu; if (pinfo.spinfo_state == SCTP_UNKNOWN) pinfo.spinfo_state = SCTP_ACTIVE; if (put_user(len, optlen)) { retval = -EFAULT; goto out; } if (copy_to_user(optval, &pinfo, len)) { retval = -EFAULT; goto out; } out: return retval; } /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) * * This option is a on/off flag. If enabled no SCTP message * fragmentation will be performed. Instead if a message being sent * exceeds the current PMTU size, the message will NOT be sent and * instead a error will be indicated to the user. */ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = (sctp_sk(sk)->disable_fragments == 1); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) * * This socket option is used to specify various notifications and * ancillary data the user wishes to receive. */ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, int __user *optlen) { if (len <= 0) return -EINVAL; if (len > sizeof(struct sctp_event_subscribe)) len = sizeof(struct sctp_event_subscribe); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) return -EFAULT; return 0; } /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) * * This socket option is applicable to the UDP-style socket only. When * set it will cause associations that are idle for more than the * specified number of seconds to automatically close. An association * being idle is defined an association that has NOT sent or received * user data. The special value of '0' indicates that no automatic * close of any associations should be performed. The option expects an * integer defining the number of seconds of idle time before an * association is closed. */ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) { /* Applicable to UDP-style socket only */ if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) return -EFAULT; return 0; } /* Helper routine to branch off an association to a new socket. */ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) { struct sctp_association *asoc = sctp_id2assoc(sk, id); struct sctp_sock *sp = sctp_sk(sk); struct socket *sock; int err = 0; if (!asoc) return -EINVAL; /* An association cannot be branched off from an already peeled-off * socket, nor is this supported for tcp style sockets. */ if (!sctp_style(sk, UDP)) return -EINVAL; /* Create a new socket. */ err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); if (err < 0) return err; sctp_copy_sock(sock->sk, sk, asoc); /* Make peeled-off sockets more like 1-1 accepted sockets. * Set the daddr and initialize id to something more random */ sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); /* Populate the fields of the newsk from the oldsk and migrate the * asoc to the newsk. */ sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); *sockp = sock; return err; } EXPORT_SYMBOL(sctp_do_peeloff); static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) { sctp_peeloff_arg_t peeloff; struct socket *newsock; struct file *newfile; int retval = 0; if (len < sizeof(sctp_peeloff_arg_t)) return -EINVAL; len = sizeof(sctp_peeloff_arg_t); if (copy_from_user(&peeloff, optval, len)) return -EFAULT; retval = sctp_do_peeloff(sk, peeloff.associd, &newsock); if (retval < 0) goto out; /* Map the socket to an unused fd that can be returned to the user. */ retval = get_unused_fd_flags(0); if (retval < 0) { sock_release(newsock); goto out; } newfile = sock_alloc_file(newsock, 0, NULL); if (unlikely(IS_ERR(newfile))) { put_unused_fd(retval); sock_release(newsock); return PTR_ERR(newfile); } pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, retval); /* Return the fd mapped to the new socket. */ if (put_user(len, optlen)) { fput(newfile); put_unused_fd(retval); return -EFAULT; } peeloff.sd = retval; if (copy_to_user(optval, &peeloff, len)) { fput(newfile); put_unused_fd(retval); return -EFAULT; } fd_install(retval, newfile); out: return retval; } /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) * * Applications can enable or disable heartbeats for any peer address of * an association, modify an address's heartbeat interval, force a * heartbeat to be sent immediately, and adjust the address's maximum * number of retransmissions sent before an address is considered * unreachable. The following structure is used to access and modify an * address's parameters: * * struct sctp_paddrparams { * sctp_assoc_t spp_assoc_id; * struct sockaddr_storage spp_address; * uint32_t spp_hbinterval; * uint16_t spp_pathmaxrxt; * uint32_t spp_pathmtu; * uint32_t spp_sackdelay; * uint32_t spp_flags; * }; * * spp_assoc_id - (one-to-many style socket) This is filled in the * application, and identifies the association for * this query. * spp_address - This specifies which address is of interest. * spp_hbinterval - This contains the value of the heartbeat interval, * in milliseconds. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmaxrxt - This contains the maximum number of * retransmissions before this address shall be * considered unreachable. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmtu - When Path MTU discovery is disabled the value * specified here will be the "fixed" path mtu. * Note that if the spp_address field is empty * then all associations on this address will * have this fixed path mtu set upon them. * * spp_sackdelay - When delayed sack is enabled, this value specifies * the number of milliseconds that sacks will be delayed * for. This value will apply to all addresses of an * association if the spp_address field is empty. Note * also, that if delayed sack is enabled and this * value is set to 0, no change is made to the last * recorded delayed sack timer value. * * spp_flags - These flags are used to control various features * on an association. The flag field may contain * zero or more of the following options. * * SPP_HB_ENABLE - Enable heartbeats on the * specified address. Note that if the address * field is empty all addresses for the association * have heartbeats enabled upon them. * * SPP_HB_DISABLE - Disable heartbeats on the * speicifed address. Note that if the address * field is empty all addresses for the association * will have their heartbeats disabled. Note also * that SPP_HB_ENABLE and SPP_HB_DISABLE are * mutually exclusive, only one of these two should * be specified. Enabling both fields will have * undetermined results. * * SPP_HB_DEMAND - Request a user initiated heartbeat * to be made immediately. * * SPP_PMTUD_ENABLE - This field will enable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. * * SPP_PMTUD_DISABLE - This field will disable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. Not also that * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually * exclusive. Enabling both will have undetermined * results. * * SPP_SACKDELAY_ENABLE - Setting this flag turns * on delayed sack. The time specified in spp_sackdelay * is used to specify the sack delay for this address. Note * that if spp_address is empty then all addresses will * enable delayed sack and take on the sack delay * value specified in spp_sackdelay. * SPP_SACKDELAY_DISABLE - Setting this flag turns * off delayed sack. If the spp_address field is blank then * delayed sack is disabled for the entire association. Note * also that this field is mutually exclusive to * SPP_SACKDELAY_ENABLE, setting both will have undefined * results. */ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_paddrparams params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(struct sctp_paddrparams)) return -EINVAL; len = sizeof(struct sctp_paddrparams); if (copy_from_user(&params, optval, len)) return -EFAULT; /* If an address other than INADDR_ANY is specified, and * no transport is found, then the request is invalid. */ if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) { trans = sctp_addr_id2transport(sk, &params.spp_address, params.spp_assoc_id); if (!trans) { pr_debug("%s: failed no transport\n", __func__); return -EINVAL; } } /* Get association, if assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.spp_assoc_id); if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { pr_debug("%s: failed no association\n", __func__); return -EINVAL; } if (trans) { /* Fetch transport values. */ params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); params.spp_pathmtu = trans->pathmtu; params.spp_pathmaxrxt = trans->pathmaxrxt; params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = trans->param_flags; } else if (asoc) { /* Fetch association values. */ params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); params.spp_pathmtu = asoc->pathmtu; params.spp_pathmaxrxt = asoc->pathmaxrxt; params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = asoc->param_flags; } else { /* Fetch socket values. */ params.spp_hbinterval = sp->hbinterval; params.spp_pathmtu = sp->pathmtu; params.spp_sackdelay = sp->sackdelay; params.spp_pathmaxrxt = sp->pathmaxrxt; /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = sp->param_flags; } if (copy_to_user(optval, &params, len)) return -EFAULT; if (put_user(len, optlen)) return -EFAULT; return 0; } /* * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) * * This option will effect the way delayed acks are performed. This * option allows you to get or set the delayed ack time, in * milliseconds. It also allows changing the delayed ack frequency. * Changing the frequency to 1 disables the delayed sack algorithm. If * the assoc_id is 0, then this sets or gets the endpoints default * values. If the assoc_id field is non-zero, then the set or get * effects the specified association for the one to many model (the * assoc_id field is ignored by the one to one model). Note that if * sack_delay or sack_freq are 0 when setting this option, then the * current values will remain unchanged. * * struct sctp_sack_info { * sctp_assoc_t sack_assoc_id; * uint32_t sack_delay; * uint32_t sack_freq; * }; * * sack_assoc_id - This parameter, indicates which association the user * is performing an action upon. Note that if this field's value is * zero then the endpoints default value is changed (effecting future * associations only). * * sack_delay - This parameter contains the number of milliseconds that * the user is requesting the delayed ACK timer be set to. Note that * this value is defined in the standard to be between 200 and 500 * milliseconds. * * sack_freq - This parameter contains the number of packets that must * be received before a sack is sent without waiting for the delay * timer to expire. The default value for this is 2, setting this * value to 1 will disable the delayed sack algorithm. */ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sack_info params; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (len >= sizeof(struct sctp_sack_info)) { len = sizeof(struct sctp_sack_info); if (copy_from_user(&params, optval, len)) return -EFAULT; } else if (len == sizeof(struct sctp_assoc_value)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of struct sctp_assoc_value in delayed_ack socket option.\n" "Use struct sctp_sack_info instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&params, optval, len)) return -EFAULT; } else return -EINVAL; /* Get association, if sack_assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.sack_assoc_id); if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { /* Fetch association values. */ if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { params.sack_delay = jiffies_to_msecs( asoc->sackdelay); params.sack_freq = asoc->sackfreq; } else { params.sack_delay = 0; params.sack_freq = 1; } } else { /* Fetch socket values. */ if (sp->param_flags & SPP_SACKDELAY_ENABLE) { params.sack_delay = sp->sackdelay; params.sack_freq = sp->sackfreq; } else { params.sack_delay = 0; params.sack_freq = 1; } } if (copy_to_user(optval, &params, len)) return -EFAULT; if (put_user(len, optlen)) return -EFAULT; return 0; } /* 7.1.3 Initialization Parameters (SCTP_INITMSG) * * Applications can specify protocol parameters for the default association * initialization. The option name argument to setsockopt() and getsockopt() * is SCTP_INITMSG. * * Setting initialization parameters is effective only on an unconnected * socket (for UDP-style sockets only future associations are effected * by the change). With TCP-style sockets, this option is inherited by * sockets derived from a listener socket. */ static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) { if (len < sizeof(struct sctp_initmsg)) return -EINVAL; len = sizeof(struct sctp_initmsg); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) return -EFAULT; return 0; } static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_association *asoc; int cnt = 0; struct sctp_getaddrs getaddrs; struct sctp_transport *from; void __user *to; union sctp_addr temp; struct sctp_sock *sp = sctp_sk(sk); int addrlen; size_t space_left; int bytes_copied; if (len < sizeof(struct sctp_getaddrs)) return -EINVAL; if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) return -EFAULT; /* For UDP-style sockets, id specifies the association to query. */ asoc = sctp_id2assoc(sk, getaddrs.assoc_id); if (!asoc) return -EINVAL; to = optval + offsetof(struct sctp_getaddrs, addrs); space_left = len - offsetof(struct sctp_getaddrs, addrs); list_for_each_entry(from, &asoc->peer.transport_addr_list, transports) { memcpy(&temp, &from->ipaddr, sizeof(temp)); addrlen = sctp_get_pf_specific(sk->sk_family) ->addr_to_user(sp, &temp); if (space_left < addrlen) return -ENOMEM; if (copy_to_user(to, &temp, addrlen)) return -EFAULT; to += addrlen; cnt++; space_left -= addrlen; } if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) return -EFAULT; bytes_copied = ((char __user *)to) - optval; if (put_user(bytes_copied, optlen)) return -EFAULT; return 0; } static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, size_t space_left, int *bytes_copied) { struct sctp_sockaddr_entry *addr; union sctp_addr temp; int cnt = 0; int addrlen; struct net *net = sock_net(sk); rcu_read_lock(); list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { if (!addr->valid) continue; if ((PF_INET == sk->sk_family) && (AF_INET6 == addr->a.sa.sa_family)) continue; if ((PF_INET6 == sk->sk_family) && inet_v6_ipv6only(sk) && (AF_INET == addr->a.sa.sa_family)) continue; memcpy(&temp, &addr->a, sizeof(temp)); if (!temp.v4.sin_port) temp.v4.sin_port = htons(port); addrlen = sctp_get_pf_specific(sk->sk_family) ->addr_to_user(sctp_sk(sk), &temp); if (space_left < addrlen) { cnt = -ENOMEM; break; } memcpy(to, &temp, addrlen); to += addrlen; cnt++; space_left -= addrlen; *bytes_copied += addrlen; } rcu_read_unlock(); return cnt; } static int sctp_getsockopt_local_addrs(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_bind_addr *bp; struct sctp_association *asoc; int cnt = 0; struct sctp_getaddrs getaddrs; struct sctp_sockaddr_entry *addr; void __user *to; union sctp_addr temp; struct sctp_sock *sp = sctp_sk(sk); int addrlen; int err = 0; size_t space_left; int bytes_copied = 0; void *addrs; void *buf; if (len < sizeof(struct sctp_getaddrs)) return -EINVAL; if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) return -EFAULT; /* * For UDP-style sockets, id specifies the association to query. * If the id field is set to the value '0' then the locally bound * addresses are returned without regard to any particular * association. */ if (0 == getaddrs.assoc_id) { bp = &sctp_sk(sk)->ep->base.bind_addr; } else { asoc = sctp_id2assoc(sk, getaddrs.assoc_id); if (!asoc) return -EINVAL; bp = &asoc->base.bind_addr; } to = optval + offsetof(struct sctp_getaddrs, addrs); space_left = len - offsetof(struct sctp_getaddrs, addrs); addrs = kmalloc(space_left, GFP_KERNEL); if (!addrs) return -ENOMEM; /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid * addresses from the global local address list. */ if (sctp_list_single_entry(&bp->address_list)) { addr = list_entry(bp->address_list.next, struct sctp_sockaddr_entry, list); if (sctp_is_any(sk, &addr->a)) { cnt = sctp_copy_laddrs(sk, bp->port, addrs, space_left, &bytes_copied); if (cnt < 0) { err = cnt; goto out; } goto copy_getaddrs; } } buf = addrs; /* Protection on the bound address list is not needed since * in the socket option context we hold a socket lock and * thus the bound address list can't change. */ list_for_each_entry(addr, &bp->address_list, list) { memcpy(&temp, &addr->a, sizeof(temp)); addrlen = sctp_get_pf_specific(sk->sk_family) ->addr_to_user(sp, &temp); if (space_left < addrlen) { err = -ENOMEM; /*fixme: right error?*/ goto out; } memcpy(buf, &temp, addrlen); buf += addrlen; bytes_copied += addrlen; cnt++; space_left -= addrlen; } copy_getaddrs: if (copy_to_user(to, addrs, bytes_copied)) { err = -EFAULT; goto out; } if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { err = -EFAULT; goto out; } if (put_user(bytes_copied, optlen)) err = -EFAULT; out: kfree(addrs); return err; } /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) * * Requests that the local SCTP stack use the enclosed peer address as * the association primary. The enclosed address must be one of the * association peer's addresses. */ static int sctp_getsockopt_primary_addr(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_prim prim; struct sctp_association *asoc; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(struct sctp_prim)) return -EINVAL; len = sizeof(struct sctp_prim); if (copy_from_user(&prim, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); if (!asoc) return -EINVAL; if (!asoc->peer.primary_path) return -ENOTCONN; memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, asoc->peer.primary_path->af_specific->sockaddr_len); sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp, (union sctp_addr *)&prim.ssp_addr); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &prim, len)) return -EFAULT; return 0; } /* * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) * * Requests that the local endpoint set the specified Adaptation Layer * Indication parameter for all future INIT and INIT-ACK exchanges. */ static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_setadaptation adaptation; if (len < sizeof(struct sctp_setadaptation)) return -EINVAL; len = sizeof(struct sctp_setadaptation); adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &adaptation, len)) return -EFAULT; return 0; } /* * * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) * * Applications that wish to use the sendto() system call may wish to * specify a default set of parameters that would normally be supplied * through the inclusion of ancillary data. This socket option allows * such an application to set the default sctp_sndrcvinfo structure. * The application that wishes to use this socket option simply passes * in to this call the sctp_sndrcvinfo structure defined in Section * 5.2.2) The input parameters accepted by this call include * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, * sinfo_timetolive. The user must provide the sinfo_assoc_id field in * to this call if the caller is using the UDP model. * * For getsockopt, it get the default sctp_sndrcvinfo structure. */ static int sctp_getsockopt_default_send_param(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndrcvinfo info; if (len < sizeof(info)) return -EINVAL; len = sizeof(info); if (copy_from_user(&info, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { info.sinfo_stream = asoc->default_stream; info.sinfo_flags = asoc->default_flags; info.sinfo_ppid = asoc->default_ppid; info.sinfo_context = asoc->default_context; info.sinfo_timetolive = asoc->default_timetolive; } else { info.sinfo_stream = sp->default_stream; info.sinfo_flags = sp->default_flags; info.sinfo_ppid = sp->default_ppid; info.sinfo_context = sp->default_context; info.sinfo_timetolive = sp->default_timetolive; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &info, len)) return -EFAULT; return 0; } /* RFC6458, Section 8.1.31. Set/get Default Send Parameters * (SCTP_DEFAULT_SNDINFO) */ static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndinfo info; if (len < sizeof(info)) return -EINVAL; len = sizeof(info); if (copy_from_user(&info, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, info.snd_assoc_id); if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { info.snd_sid = asoc->default_stream; info.snd_flags = asoc->default_flags; info.snd_ppid = asoc->default_ppid; info.snd_context = asoc->default_context; } else { info.snd_sid = sp->default_stream; info.snd_flags = sp->default_flags; info.snd_ppid = sp->default_ppid; info.snd_context = sp->default_context; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &info, len)) return -EFAULT; return 0; } /* * * 7.1.5 SCTP_NODELAY * * Turn on/off any Nagle-like algorithm. This means that packets are * generally sent as soon as possible and no unnecessary delays are * introduced, at the cost of more packets in the network. Expects an * integer boolean flag. */ static int sctp_getsockopt_nodelay(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = (sctp_sk(sk)->nodelay == 1); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * * 7.1.1 SCTP_RTOINFO * * The protocol parameters used to initialize and bound retransmission * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access * and modify these parameters. * All parameters are time values, in milliseconds. A value of 0, when * modifying the parameters, indicates that the current value should not * be changed. * */ static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_rtoinfo rtoinfo; struct sctp_association *asoc; if (len < sizeof (struct sctp_rtoinfo)) return -EINVAL; len = sizeof(struct sctp_rtoinfo); if (copy_from_user(&rtoinfo, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Values corresponding to the specific association. */ if (asoc) { rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); } else { /* Values corresponding to the endpoint. */ struct sctp_sock *sp = sctp_sk(sk); rtoinfo.srto_initial = sp->rtoinfo.srto_initial; rtoinfo.srto_max = sp->rtoinfo.srto_max; rtoinfo.srto_min = sp->rtoinfo.srto_min; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &rtoinfo, len)) return -EFAULT; return 0; } /* * * 7.1.2 SCTP_ASSOCINFO * * This option is used to tune the maximum retransmission attempts * of the association. * Returns an error if the new association retransmission value is * greater than the sum of the retransmission value of the peer. * See [SCTP] for more information. * */ static int sctp_getsockopt_associnfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assocparams assocparams; struct sctp_association *asoc; struct list_head *pos; int cnt = 0; if (len < sizeof (struct sctp_assocparams)) return -EINVAL; len = sizeof(struct sctp_assocparams); if (copy_from_user(&assocparams, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Values correspoinding to the specific association */ if (asoc) { assocparams.sasoc_asocmaxrxt = asoc->max_retrans; assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; assocparams.sasoc_local_rwnd = asoc->a_rwnd; assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); list_for_each(pos, &asoc->peer.transport_addr_list) { cnt++; } assocparams.sasoc_number_peer_destinations = cnt; } else { /* Values corresponding to the endpoint */ struct sctp_sock *sp = sctp_sk(sk); assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; assocparams.sasoc_cookie_life = sp->assocparams.sasoc_cookie_life; assocparams.sasoc_number_peer_destinations = sp->assocparams. sasoc_number_peer_destinations; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &assocparams, len)) return -EFAULT; return 0; } /* * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) * * This socket option is a boolean flag which turns on or off mapped V4 * addresses. If this option is turned on and the socket is type * PF_INET6, then IPv4 addresses will be mapped to V6 representation. * If this option is turned off, then no mapping will be done of V4 * addresses and a user will receive both PF_INET6 and PF_INET type * addresses on the socket. */ static int sctp_getsockopt_mappedv4(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = sp->v4mapped; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.29. Set or Get the default context (SCTP_CONTEXT) * (chapter and verse is quoted at sctp_setsockopt_context()) */ static int sctp_getsockopt_context(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (len < sizeof(struct sctp_assoc_value)) return -EINVAL; len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, len)) return -EFAULT; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; params.assoc_value = asoc->default_rcv_context; } else { params.assoc_value = sp->default_rcv_context; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &params, len)) return -EFAULT; return 0; } /* * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) * This option will get or set the maximum size to put in any outgoing * SCTP DATA chunk. If a message is larger than this size it will be * fragmented by SCTP into the specified size. Note that the underlying * SCTP implementation may fragment into smaller sized chunks when the * PMTU of the underlying association is smaller than the value set by * the user. The default value for this option is '0' which indicates * the user is NOT limiting fragmentation and only the PMTU will effect * SCTP's choice of DATA chunk size. Note also that values set larger * than the maximum size of an IP datagram will effectively let SCTP * control fragmentation (i.e. the same as setting this option to 0). * * The following structure is used to access and modify this parameter: * * struct sctp_assoc_value { * sctp_assoc_t assoc_id; * uint32_t assoc_value; * }; * * assoc_id: This parameter is ignored for one-to-one style sockets. * For one-to-many style sockets this parameter indicates which * association the user is performing an action upon. Note that if * this field's value is zero then the endpoints default value is * changed (effecting future associations only). * assoc_value: This parameter specifies the maximum size in bytes. */ static int sctp_getsockopt_maxseg(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; if (len == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in maxseg socket option.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); params.assoc_id = 0; } else if (len >= sizeof(struct sctp_assoc_value)) { len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, sizeof(params))) return -EFAULT; } else return -EINVAL; asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc && params.assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) params.assoc_value = asoc->frag_point; else params.assoc_value = sctp_sk(sk)->user_frag; if (put_user(len, optlen)) return -EFAULT; if (len == sizeof(int)) { if (copy_to_user(optval, &params.assoc_value, len)) return -EFAULT; } else { if (copy_to_user(optval, &params, len)) return -EFAULT; } return 0; } /* * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) */ static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = sctp_sk(sk)->frag_interleave; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.25. Set or Get the sctp partial delivery point * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) */ static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, char __user *optval, int __user *optlen) { u32 val; if (len < sizeof(u32)) return -EINVAL; len = sizeof(u32); val = sctp_sk(sk)->pd_point; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) * (chapter and verse is quoted at sctp_setsockopt_maxburst()) */ static int sctp_getsockopt_maxburst(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (len == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in max_burst socket option.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); params.assoc_id = 0; } else if (len >= sizeof(struct sctp_assoc_value)) { len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, len)) return -EFAULT; } else return -EINVAL; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; params.assoc_value = asoc->max_burst; } else params.assoc_value = sp->max_burst; if (len == sizeof(int)) { if (copy_to_user(optval, &params.assoc_value, len)) return -EFAULT; } else { if (copy_to_user(optval, &params, len)) return -EFAULT; } return 0; } static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_hmacalgo __user *p = (void __user *)optval; struct sctp_hmac_algo_param *hmacs; __u16 data_len = 0; u32 num_idents; if (!ep->auth_enable) return -EACCES; hmacs = ep->auth_hmacs_list; data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < sizeof(struct sctp_hmacalgo) + data_len) return -EINVAL; len = sizeof(struct sctp_hmacalgo) + data_len; num_idents = data_len / sizeof(u16); if (put_user(len, optlen)) return -EFAULT; if (put_user(num_idents, &p->shmac_num_idents)) return -EFAULT; if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) return -EFAULT; return 0; } static int sctp_getsockopt_active_key(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) val.scact_keynumber = asoc->active_key_id; else val.scact_keynumber = ep->active_key_id; len = sizeof(struct sctp_authkeyid); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunks __user *p = (void __user *)optval; struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; u32 num_chunks = 0; char __user *to; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authchunks)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; asoc = sctp_id2assoc(sk, val.gauth_assoc_id); if (!asoc) return -EINVAL; ch = asoc->peer.peer_chunks; if (!ch) goto num; /* See if the user provided enough room for all the data */ num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < num_chunks) return -EINVAL; if (copy_to_user(to, ch->chunks, num_chunks)) return -EFAULT; num: len = sizeof(struct sctp_authchunks) + num_chunks; if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; return 0; } static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunks __user *p = (void __user *)optval; struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; u32 num_chunks = 0; char __user *to; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authchunks)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; asoc = sctp_id2assoc(sk, val.gauth_assoc_id); if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; else ch = ep->auth_chunk_list; if (!ch) goto num; num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < sizeof(struct sctp_authchunks) + num_chunks) return -EINVAL; if (copy_to_user(to, ch->chunks, num_chunks)) return -EFAULT; num: len = sizeof(struct sctp_authchunks) + num_chunks; if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; return 0; } /* * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) * This option gets the current number of associations that are attached * to a one-to-many style socket. The option value is an uint32_t. */ static int sctp_getsockopt_assoc_number(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; u32 val = 0; if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(u32)) return -EINVAL; len = sizeof(u32); list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { val++; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 8.1.23 SCTP_AUTO_ASCONF * See the corresponding setsockopt entry as description */ static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val = 0; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) val = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 8.2.6. Get the Current Identifiers of Associations * (SCTP_GET_ASSOC_ID_LIST) * * This option gets the current list of SCTP association identifiers of * the SCTP associations handled by a one-to-many style socket. */ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_assoc_ids *ids; u32 num = 0; if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(struct sctp_assoc_ids)) return -EINVAL; list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { num++; } if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) return -EINVAL; len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; ids = kmalloc(len, GFP_KERNEL); if (unlikely(!ids)) return -ENOMEM; ids->gaids_number_of_ids = num; num = 0; list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { ids->gaids_assoc_id[num++] = asoc->assoc_id; } if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { kfree(ids); return -EFAULT; } kfree(ids); return 0; } /* * SCTP_PEER_ADDR_THLDS * * This option allows us to fetch the partially failed threshold for one or all * transports in an association. See Section 6.1 of: * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt */ static int sctp_getsockopt_paddr_thresholds(struct sock *sk, char __user *optval, int len, int __user *optlen) { struct sctp_paddrthlds val; struct sctp_transport *trans; struct sctp_association *asoc; if (len < sizeof(struct sctp_paddrthlds)) return -EINVAL; len = sizeof(struct sctp_paddrthlds); if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) return -EFAULT; if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { asoc = sctp_id2assoc(sk, val.spt_assoc_id); if (!asoc) return -ENOENT; val.spt_pathpfthld = asoc->pf_retrans; val.spt_pathmaxrxt = asoc->pathmaxrxt; } else { trans = sctp_addr_id2transport(sk, &val.spt_address, val.spt_assoc_id); if (!trans) return -ENOENT; val.spt_pathmaxrxt = trans->pathmaxrxt; val.spt_pathpfthld = trans->pf_retrans; } if (put_user(len, optlen) || copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * SCTP_GET_ASSOC_STATS * * This option retrieves local per endpoint statistics. It is modeled * after OpenSolaris' implementation */ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_stats sas; struct sctp_association *asoc = NULL; /* User must provide at least the assoc id */ if (len < sizeof(sctp_assoc_t)) return -EINVAL; /* Allow the struct to grow and fill in as much as possible */ len = min_t(size_t, len, sizeof(sas)); if (copy_from_user(&sas, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, sas.sas_assoc_id); if (!asoc) return -EINVAL; sas.sas_rtxchunks = asoc->stats.rtxchunks; sas.sas_gapcnt = asoc->stats.gapcnt; sas.sas_outofseqtsns = asoc->stats.outofseqtsns; sas.sas_osacks = asoc->stats.osacks; sas.sas_isacks = asoc->stats.isacks; sas.sas_octrlchunks = asoc->stats.octrlchunks; sas.sas_ictrlchunks = asoc->stats.ictrlchunks; sas.sas_oodchunks = asoc->stats.oodchunks; sas.sas_iodchunks = asoc->stats.iodchunks; sas.sas_ouodchunks = asoc->stats.ouodchunks; sas.sas_iuodchunks = asoc->stats.iuodchunks; sas.sas_idupchunks = asoc->stats.idupchunks; sas.sas_opackets = asoc->stats.opackets; sas.sas_ipackets = asoc->stats.ipackets; /* New high max rto observed, will return 0 if not a single * RTO update took place. obs_rto_ipaddr will be bogus * in such a case */ sas.sas_maxrto = asoc->stats.max_obs_rto; memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, sizeof(struct sockaddr_storage)); /* Mark beginning of a new observation period */ asoc->stats.max_obs_rto = asoc->rto_min; if (put_user(len, optlen)) return -EFAULT; pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); if (copy_to_user(optval, &sas, len)) return -EFAULT; return 0; } static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val = 0; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (sctp_sk(sk)->recvrcvinfo) val = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val = 0; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (sctp_sk(sk)->recvnxtinfo) val = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { int retval = 0; int len; pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); /* I can hardly begin to describe how wrong this is. This is * so broken as to be worse than useless. The API draft * REALLY is NOT helpful here... I am not convinced that the * semantics of getsockopt() with a level OTHER THAN SOL_SCTP * are at all well-founded. */ if (level != SOL_SCTP) { struct sctp_af *af = sctp_sk(sk)->pf->af; retval = af->getsockopt(sk, level, optname, optval, optlen); return retval; } if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case SCTP_STATUS: retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); break; case SCTP_DISABLE_FRAGMENTS: retval = sctp_getsockopt_disable_fragments(sk, len, optval, optlen); break; case SCTP_EVENTS: retval = sctp_getsockopt_events(sk, len, optval, optlen); break; case SCTP_AUTOCLOSE: retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); break; case SCTP_SOCKOPT_PEELOFF: retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); break; case SCTP_PEER_ADDR_PARAMS: retval = sctp_getsockopt_peer_addr_params(sk, len, optval, optlen); break; case SCTP_DELAYED_SACK: retval = sctp_getsockopt_delayed_ack(sk, len, optval, optlen); break; case SCTP_INITMSG: retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); break; case SCTP_GET_PEER_ADDRS: retval = sctp_getsockopt_peer_addrs(sk, len, optval, optlen); break; case SCTP_GET_LOCAL_ADDRS: retval = sctp_getsockopt_local_addrs(sk, len, optval, optlen); break; case SCTP_SOCKOPT_CONNECTX3: retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); break; case SCTP_DEFAULT_SEND_PARAM: retval = sctp_getsockopt_default_send_param(sk, len, optval, optlen); break; case SCTP_DEFAULT_SNDINFO: retval = sctp_getsockopt_default_sndinfo(sk, len, optval, optlen); break; case SCTP_PRIMARY_ADDR: retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); break; case SCTP_NODELAY: retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); break; case SCTP_RTOINFO: retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); break; case SCTP_ASSOCINFO: retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); break; case SCTP_I_WANT_MAPPED_V4_ADDR: retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); break; case SCTP_MAXSEG: retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); break; case SCTP_GET_PEER_ADDR_INFO: retval = sctp_getsockopt_peer_addr_info(sk, len, optval, optlen); break; case SCTP_ADAPTATION_LAYER: retval = sctp_getsockopt_adaptation_layer(sk, len, optval, optlen); break; case SCTP_CONTEXT: retval = sctp_getsockopt_context(sk, len, optval, optlen); break; case SCTP_FRAGMENT_INTERLEAVE: retval = sctp_getsockopt_fragment_interleave(sk, len, optval, optlen); break; case SCTP_PARTIAL_DELIVERY_POINT: retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, optlen); break; case SCTP_MAX_BURST: retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); break; case SCTP_AUTH_KEY: case SCTP_AUTH_CHUNK: case SCTP_AUTH_DELETE_KEY: retval = -EOPNOTSUPP; break; case SCTP_HMAC_IDENT: retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); break; case SCTP_AUTH_ACTIVE_KEY: retval = sctp_getsockopt_active_key(sk, len, optval, optlen); break; case SCTP_PEER_AUTH_CHUNKS: retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, optlen); break; case SCTP_LOCAL_AUTH_CHUNKS: retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, optlen); break; case SCTP_GET_ASSOC_NUMBER: retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); break; case SCTP_GET_ASSOC_ID_LIST: retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); break; case SCTP_AUTO_ASCONF: retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); break; case SCTP_PEER_ADDR_THLDS: retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); break; case SCTP_GET_ASSOC_STATS: retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); break; case SCTP_RECVRCVINFO: retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen); break; case SCTP_RECVNXTINFO: retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen); break; default: retval = -ENOPROTOOPT; break; } release_sock(sk); return retval; } static void sctp_hash(struct sock *sk) { /* STUB */ } static void sctp_unhash(struct sock *sk) { /* STUB */ } /* Check if port is acceptable. Possibly find first available port. * * The port hash table (contained in the 'global' SCTP protocol storage * returned by struct sctp_protocol *sctp_get_protocol()). The hash * table is an array of 4096 lists (sctp_bind_hashbucket). Each * list (the list number is the port number hashed out, so as you * would expect from a hash function, all the ports in a given list have * such a number that hashes out to the same list number; you were * expecting that, right?); so each list has a set of ports, with a * link to the socket (struct sock) that uses it, the port number and * a fastreuse flag (FIXME: NPI ipg). */ static struct sctp_bind_bucket *sctp_bucket_create( struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) { struct sctp_bind_hashbucket *head; /* hash list */ struct sctp_bind_bucket *pp; unsigned short snum; int ret; snum = ntohs(addr->v4.sin_port); pr_debug("%s: begins, snum:%d\n", __func__, snum); local_bh_disable(); if (snum == 0) { /* Search for an available port. */ int low, high, remaining, index; unsigned int rover; struct net *net = sock_net(sk); inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rover = prandom_u32() % remaining + low; do { rover++; if ((rover < low) || (rover > high)) rover = low; if (inet_is_local_reserved_port(net, rover)) continue; index = sctp_phashfn(sock_net(sk), rover); head = &sctp_port_hashtable[index]; spin_lock(&head->lock); sctp_for_each_hentry(pp, &head->chain) if ((pp->port == rover) && net_eq(sock_net(sk), pp->net)) goto next; break; next: spin_unlock(&head->lock); } while (--remaining > 0); /* Exhausted local port range during search? */ ret = 1; if (remaining <= 0) goto fail; /* OK, here is the one we will use. HEAD (the port * hash table list entry) is non-NULL and we hold it's * mutex. */ snum = rover; } else { /* We are given an specific port number; we verify * that it is not being used. If it is used, we will * exahust the search in the hash list corresponding * to the port number (snum) - we detect that with the * port iterator, pp being NULL. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; spin_lock(&head->lock); sctp_for_each_hentry(pp, &head->chain) { if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) goto pp_found; } } pp = NULL; goto pp_not_found; pp_found: if (!hlist_empty(&pp->owner)) { /* We had a port hash table hit - there is an * available port (pp != NULL) and it is being * used by other socket (pp->owner not empty); that other * socket is going to be sk2. */ int reuse = sk->sk_reuse; struct sock *sk2; pr_debug("%s: found a possible match\n", __func__); if (pp->fastreuse && sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) goto success; /* Run through the list of sockets bound to the port * (pp->port) [via the pointers bind_next and * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, * we get the endpoint they describe and run through * the endpoint's list of IP (v4 or v6) addresses, * comparing each of the addresses with the address of * the socket sk. If we find a match, then that means * that this port/socket (sk) combination are already * in an endpoint. */ sk_for_each_bound(sk2, &pp->owner) { struct sctp_endpoint *ep2; ep2 = sctp_sk(sk2)->ep; if (sk == sk2 || (reuse && sk2->sk_reuse && sk2->sk_state != SCTP_SS_LISTENING)) continue; if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, sctp_sk(sk2), sctp_sk(sk))) { ret = (long)sk2; goto fail_unlock; } } pr_debug("%s: found a match\n", __func__); } pp_not_found: /* If there was a hash table miss, create a new port. */ ret = 1; if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) goto fail_unlock; /* In either case (hit or miss), make sure fastreuse is 1 only * if sk->sk_reuse is too (that is, if the caller requested * SO_REUSEADDR on this socket -sk-). */ if (hlist_empty(&pp->owner)) { if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) pp->fastreuse = 1; else pp->fastreuse = 0; } else if (pp->fastreuse && (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) pp->fastreuse = 0; /* We are set, so fill up all the data in the hash table * entry, tie the socket list information with the rest of the * sockets FIXME: Blurry, NPI (ipg). */ success: if (!sctp_sk(sk)->bind_hash) { inet_sk(sk)->inet_num = snum; sk_add_bind_node(sk, &pp->owner); sctp_sk(sk)->bind_hash = pp; } ret = 0; fail_unlock: spin_unlock(&head->lock); fail: local_bh_enable(); return ret; } /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral * port is requested. */ static int sctp_get_port(struct sock *sk, unsigned short snum) { union sctp_addr addr; struct sctp_af *af = sctp_sk(sk)->pf->af; /* Set up a dummy address struct from the sk. */ af->from_sk(&addr, sk); addr.v4.sin_port = htons(snum); /* Note: sk->sk_num gets filled in if ephemeral port request. */ return !!sctp_get_port_local(sk, &addr); } /* * Move a socket to LISTENING state. */ static int sctp_listen_start(struct sock *sk, int backlog) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; struct crypto_hash *tfm = NULL; char alg[32]; /* Allocate HMAC for generating cookie. */ if (!sp->hmac && sp->sctp_hmac_alg) { sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { net_info_ratelimited("failed to load transform for %s: %ld\n", sp->sctp_hmac_alg, PTR_ERR(tfm)); return -ENOSYS; } sctp_sk(sk)->hmac = tfm; } /* * If a bind() or sctp_bindx() is not called prior to a listen() * call that allows new associations to be accepted, the system * picks an ephemeral port and will choose an address set equivalent * to binding with a wildcard address. * * This is not currently spelled out in the SCTP sockets * extensions draft, but follows the practice as seen in TCP * sockets. * */ sk->sk_state = SCTP_SS_LISTENING; if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) return -EAGAIN; } else { if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { sk->sk_state = SCTP_SS_CLOSED; return -EADDRINUSE; } } sk->sk_max_ack_backlog = backlog; sctp_hash_endpoint(ep); return 0; } /* * 4.1.3 / 5.1.3 listen() * * By default, new associations are not accepted for UDP style sockets. * An application uses listen() to mark a socket as being able to * accept new associations. * * On TCP style sockets, applications use listen() to ready the SCTP * endpoint for accepting inbound associations. * * On both types of endpoints a backlog of '0' disables listening. * * Move a socket to LISTENING state. */ int sctp_inet_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; struct sctp_endpoint *ep = sctp_sk(sk)->ep; int err = -EINVAL; if (unlikely(backlog < 0)) return err; lock_sock(sk); /* Peeled-off sockets are not allowed to listen(). */ if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) goto out; if (sock->state != SS_UNCONNECTED) goto out; /* If backlog is zero, disable listening. */ if (!backlog) { if (sctp_sstate(sk, CLOSED)) goto out; err = 0; sctp_unhash_endpoint(ep); sk->sk_state = SCTP_SS_CLOSED; if (sk->sk_reuse) sctp_sk(sk)->bind_hash->fastreuse = 1; goto out; } /* If we are already listening, just update the backlog */ if (sctp_sstate(sk, LISTENING)) sk->sk_max_ack_backlog = backlog; else { err = sctp_listen_start(sk, backlog); if (err) goto out; } err = 0; out: release_sock(sk); return err; } /* * This function is done by modeling the current datagram_poll() and the * tcp_poll(). Note that, based on these implementations, we don't * lock the socket in this function, even though it seems that, * ideally, locking or some other mechanisms can be used to ensure * the integrity of the counters (sndbuf and wmem_alloc) used * in this place. We assume that we don't need locks either until proven * otherwise. * * Another thing to note is that we include the Async I/O support * here, again, by modeling the current TCP/UDP code. We don't have * a good way to test with it yet. */ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct sctp_sock *sp = sctp_sk(sk); unsigned int mask; poll_wait(file, sk_sleep(sk), wait); /* A TCP-style listening socket becomes readable when the accept queue * is not empty. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) return (!list_empty(&sp->ep->asocs)) ? (POLLIN | POLLRDNORM) : 0; mask = 0; /* Is there any exceptional events? */ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) mask |= POLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP | POLLIN | POLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; /* Is it readable? Reconsider this code with TCP-style support. */ if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= POLLIN | POLLRDNORM; /* The association is either gone or not ready. */ if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) return mask; /* Is it writable? */ if (sctp_writeable(sk)) { mask |= POLLOUT | POLLWRNORM; } else { set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); /* * Since the socket is not locked, the buffer * might be made available after the writeable check and * before the bit is set. This could cause a lost I/O * signal. tcp_poll() has a race breaker for this race * condition. Based on their implementation, we put * in the following code to cover it as well. */ if (sctp_writeable(sk)) mask |= POLLOUT | POLLWRNORM; } return mask; } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ static struct sctp_bind_bucket *sctp_bucket_create( struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) { struct sctp_bind_bucket *pp; pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); if (pp) { SCTP_DBG_OBJCNT_INC(bind_bucket); pp->port = snum; pp->fastreuse = 0; INIT_HLIST_HEAD(&pp->owner); pp->net = net; hlist_add_head(&pp->node, &head->chain); } return pp; } /* Caller must hold hashbucket lock for this tb with local BH disabled */ static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) { if (pp && hlist_empty(&pp->owner)) { __hlist_del(&pp->node); kmem_cache_free(sctp_bucket_cachep, pp); SCTP_DBG_OBJCNT_DEC(bind_bucket); } } /* Release this socket's reference to a local port. */ static inline void __sctp_put_port(struct sock *sk) { struct sctp_bind_hashbucket *head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), inet_sk(sk)->inet_num)]; struct sctp_bind_bucket *pp; spin_lock(&head->lock); pp = sctp_sk(sk)->bind_hash; __sk_del_bind_node(sk); sctp_sk(sk)->bind_hash = NULL; inet_sk(sk)->inet_num = 0; sctp_bucket_destroy(pp); spin_unlock(&head->lock); } void sctp_put_port(struct sock *sk) { local_bh_disable(); __sctp_put_port(sk); local_bh_enable(); } /* * The system picks an ephemeral port and choose an address set equivalent * to binding with a wildcard address. * One of those addresses will be the primary address for the association. * This automatically enables the multihoming capability of SCTP. */ static int sctp_autobind(struct sock *sk) { union sctp_addr autoaddr; struct sctp_af *af; __be16 port; /* Initialize a local sockaddr structure to INADDR_ANY. */ af = sctp_sk(sk)->pf->af; port = htons(inet_sk(sk)->inet_num); af->inaddr_any(&autoaddr, port); return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); } /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. * * From RFC 2292 * 4.2 The cmsghdr Structure * * * When ancillary data is sent or received, any number of ancillary data * objects can be specified by the msg_control and msg_controllen members of * the msghdr structure, because each object is preceded by * a cmsghdr structure defining the object's length (the cmsg_len member). * Historically Berkeley-derived implementations have passed only one object * at a time, but this API allows multiple objects to be * passed in a single call to sendmsg() or recvmsg(). The following example * shows two ancillary data objects in a control buffer. * * |<--------------------------- msg_controllen -------------------------->| * | | * * |<----- ancillary data object ----->|<----- ancillary data object ----->| * * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| * | | | * * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | * * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | * | | | | | * * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| * * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| * * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ * ^ * | * * msg_control * points here */ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) { struct cmsghdr *cmsg; struct msghdr *my_msg = (struct msghdr *)msg; for_each_cmsghdr(cmsg, my_msg) { if (!CMSG_OK(my_msg, cmsg)) return -EINVAL; /* Should we parse this header or ignore? */ if (cmsg->cmsg_level != IPPROTO_SCTP) continue; /* Strictly check lengths following example in SCM code. */ switch (cmsg->cmsg_type) { case SCTP_INIT: /* SCTP Socket API Extension * 5.3.1 SCTP Initiation Structure (SCTP_INIT) * * This cmsghdr structure provides information for * initializing new SCTP associations with sendmsg(). * The SCTP_INITMSG socket option uses this same data * structure. This structure is not used for * recvmsg(). * * cmsg_level cmsg_type cmsg_data[] * ------------ ------------ ---------------------- * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg */ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg))) return -EINVAL; cmsgs->init = CMSG_DATA(cmsg); break; case SCTP_SNDRCV: /* SCTP Socket API Extension * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV) * * This cmsghdr structure specifies SCTP options for * sendmsg() and describes SCTP header information * about a received message through recvmsg(). * * cmsg_level cmsg_type cmsg_data[] * ------------ ------------ ---------------------- * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo */ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) return -EINVAL; cmsgs->srinfo = CMSG_DATA(cmsg); if (cmsgs->srinfo->sinfo_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; break; case SCTP_SNDINFO: /* SCTP Socket API Extension * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO) * * This cmsghdr structure specifies SCTP options for * sendmsg(). This structure and SCTP_RCVINFO replaces * SCTP_SNDRCV which has been deprecated. * * cmsg_level cmsg_type cmsg_data[] * ------------ ------------ --------------------- * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo */ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo))) return -EINVAL; cmsgs->sinfo = CMSG_DATA(cmsg); if (cmsgs->sinfo->snd_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; break; default: return -EINVAL; } } return 0; } /* * Wait for a packet.. * Note: This function is the same function as in core/datagram.c * with a few modifications to make lksctp work. */ static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p) { int error; DEFINE_WAIT(wait); prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); /* Socket errors? */ error = sock_error(sk); if (error) goto out; if (!skb_queue_empty(&sk->sk_receive_queue)) goto ready; /* Socket shut down? */ if (sk->sk_shutdown & RCV_SHUTDOWN) goto out; /* Sequenced packets can come disconnected. If so we report the * problem. */ error = -ENOTCONN; /* Is there a good reason to think that we may receive some data? */ if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) goto out; /* Handle signals. */ if (signal_pending(current)) goto interrupted; /* Let another process have a go. Since we are going to sleep * anyway. Note: This may cause odd behaviors if the message * does not fit in the user's buffer, but this seems to be the * only way to honor MSG_DONTWAIT realistically. */ release_sock(sk); *timeo_p = schedule_timeout(*timeo_p); lock_sock(sk); ready: finish_wait(sk_sleep(sk), &wait); return 0; interrupted: error = sock_intr_errno(*timeo_p); out: finish_wait(sk_sleep(sk), &wait); *err = error; return error; } /* Receive a datagram. * Note: This is pretty much the same routine as in core/datagram.c * with a few changes to make lksctp work. */ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, int noblock, int *err) { int error; struct sk_buff *skb; long timeo; timeo = sock_rcvtimeo(sk, noblock); pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, MAX_SCHEDULE_TIMEOUT); do { /* Again only user level code calls this function, * so nothing interrupt level * will suddenly eat the receive_queue. * * Look at current nfs client by the way... * However, this function was correct in any case. 8) */ if (flags & MSG_PEEK) { spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) atomic_inc(&skb->users); spin_unlock_bh(&sk->sk_receive_queue.lock); } else { skb = skb_dequeue(&sk->sk_receive_queue); } if (skb) return skb; /* Caller is allowed not to check sk->sk_err before calling. */ error = sock_error(sk); if (error) goto no_packet; if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk_can_busy_loop(sk) && sk_busy_loop(sk, noblock)) continue; /* User doesn't want to wait. */ error = -EAGAIN; if (!timeo) goto no_packet; } while (sctp_wait_for_packet(sk, err, &timeo) == 0); return NULL; no_packet: *err = error; return NULL; } /* If sndbuf has changed, wake up per association sndbuf waiters. */ static void __sctp_write_space(struct sctp_association *asoc) { struct sock *sk = asoc->base.sk; struct socket *sock = sk->sk_socket; if ((sctp_wspace(asoc) > 0) && sock) { if (waitqueue_active(&asoc->wait)) wake_up_interruptible(&asoc->wait); if (sctp_writeable(sk)) { wait_queue_head_t *wq = sk_sleep(sk); if (wq && waitqueue_active(wq)) wake_up_interruptible(wq); /* Note that we try to include the Async I/O support * here by modeling from the current TCP/UDP code. * We have not tested with it yet. */ if (!(sk->sk_shutdown & SEND_SHUTDOWN)) sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); } } } static void sctp_wake_up_waiters(struct sock *sk, struct sctp_association *asoc) { struct sctp_association *tmp = asoc; /* We do accounting for the sndbuf space per association, * so we only need to wake our own association. */ if (asoc->ep->sndbuf_policy) return __sctp_write_space(asoc); /* If association goes down and is just flushing its * outq, then just normally notify others. */ if (asoc->base.dead) return sctp_write_space(sk); /* Accounting for the sndbuf space is per socket, so we * need to wake up others, try to be fair and in case of * other associations, let them have a go first instead * of just doing a sctp_write_space() call. * * Note that we reach sctp_wake_up_waiters() only when * associations free up queued chunks, thus we are under * lock and the list of associations on a socket is * guaranteed not to change. */ for (tmp = list_next_entry(tmp, asocs); 1; tmp = list_next_entry(tmp, asocs)) { /* Manually skip the head element. */ if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) continue; /* Wake up association. */ __sctp_write_space(tmp); /* We've reached the end. */ if (tmp == asoc) break; } } /* Do accounting for the sndbuf space. * Decrement the used sndbuf space of the corresponding association by the * data size which was just transmitted(freed). */ static void sctp_wfree(struct sk_buff *skb) { struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; struct sctp_association *asoc = chunk->asoc; struct sock *sk = asoc->base.sk; asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + sizeof(struct sk_buff) + sizeof(struct sctp_chunk); atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); /* * This undoes what is done via sctp_set_owner_w and sk_mem_charge */ sk->sk_wmem_queued -= skb->truesize; sk_mem_uncharge(sk, skb->truesize); sock_wfree(skb); sctp_wake_up_waiters(sk, asoc); sctp_association_put(asoc); } /* Do accounting for the receive space on the socket. * Accounting for the association is done in ulpevent.c * We set this as a destructor for the cloned data skbs so that * accounting is done at the correct time. */ void sctp_sock_rfree(struct sk_buff *skb) { struct sock *sk = skb->sk; struct sctp_ulpevent *event = sctp_skb2event(skb); atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); /* * Mimic the behavior of sock_rfree */ sk_mem_uncharge(sk, event->rmem_len); } /* Helper function to wait for space in the sndbuf. */ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, size_t msg_len) { struct sock *sk = asoc->base.sk; int err = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, *timeo_p, msg_len); /* Increment the association's refcnt. */ sctp_association_hold(asoc); /* Wait on the association specific sndbuf space. */ for (;;) { prepare_to_wait_exclusive(&asoc->wait, &wait, TASK_INTERRUPTIBLE); if (!*timeo_p) goto do_nonblock; if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || asoc->base.dead) goto do_error; if (signal_pending(current)) goto do_interrupted; if (msg_len <= sctp_wspace(asoc)) break; /* Let another process have a go. Since we are going * to sleep anyway. */ release_sock(sk); current_timeo = schedule_timeout(current_timeo); BUG_ON(sk != asoc->base.sk); lock_sock(sk); *timeo_p = current_timeo; } out: finish_wait(&asoc->wait, &wait); /* Release the association's refcnt. */ sctp_association_put(asoc); return err; do_error: err = -EPIPE; goto out; do_interrupted: err = sock_intr_errno(*timeo_p); goto out; do_nonblock: err = -EAGAIN; goto out; } void sctp_data_ready(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLRDNORM | POLLRDBAND); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); } /* If socket sndbuf has changed, wake up all per association waiters. */ void sctp_write_space(struct sock *sk) { struct sctp_association *asoc; /* Wake up the tasks in each wait queue. */ list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { __sctp_write_space(asoc); } } /* Is there any sndbuf space available on the socket? * * Note that sk_wmem_alloc is the sum of the send buffers on all of the * associations on the same socket. For a UDP-style socket with * multiple associations, it is possible for it to be "unwriteable" * prematurely. I assume that this is acceptable because * a premature "unwriteable" is better than an accidental "writeable" which * would cause an unwanted block under certain circumstances. For the 1-1 * UDP-style sockets or TCP-style sockets, this code should work. * - Daisy */ static int sctp_writeable(struct sock *sk) { int amt = 0; amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amt < 0) amt = 0; return amt; } /* Wait for an association to go into ESTABLISHED state. If timeout is 0, * returns immediately with EINPROGRESS. */ static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) { struct sock *sk = asoc->base.sk; int err = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); /* Increment the association's refcnt. */ sctp_association_hold(asoc); for (;;) { prepare_to_wait_exclusive(&asoc->wait, &wait, TASK_INTERRUPTIBLE); if (!*timeo_p) goto do_nonblock; if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || asoc->base.dead) goto do_error; if (signal_pending(current)) goto do_interrupted; if (sctp_state(asoc, ESTABLISHED)) break; /* Let another process have a go. Since we are going * to sleep anyway. */ release_sock(sk); current_timeo = schedule_timeout(current_timeo); lock_sock(sk); *timeo_p = current_timeo; } out: finish_wait(&asoc->wait, &wait); /* Release the association's refcnt. */ sctp_association_put(asoc); return err; do_error: if (asoc->init_err_counter + 1 > asoc->max_init_attempts) err = -ETIMEDOUT; else err = -ECONNREFUSED; goto out; do_interrupted: err = sock_intr_errno(*timeo_p); goto out; do_nonblock: err = -EINPROGRESS; goto out; } static int sctp_wait_for_accept(struct sock *sk, long timeo) { struct sctp_endpoint *ep; int err = 0; DEFINE_WAIT(wait); ep = sctp_sk(sk)->ep; for (;;) { prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (list_empty(&ep->asocs)) { release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); } err = -EINVAL; if (!sctp_sstate(sk, LISTENING)) break; err = 0; if (!list_empty(&ep->asocs)) break; err = sock_intr_errno(timeo); if (signal_pending(current)) break; err = -EAGAIN; if (!timeo) break; } finish_wait(sk_sleep(sk), &wait); return err; } static void sctp_wait_for_close(struct sock *sk, long timeout) { DEFINE_WAIT(wait); do { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (list_empty(&sctp_sk(sk)->ep->asocs)) break; release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); } while (!signal_pending(current) && timeout); finish_wait(sk_sleep(sk), &wait); } static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) { struct sk_buff *frag; if (!skb->data_len) goto done; /* Don't forget the fragments. */ skb_walk_frags(skb, frag) sctp_skb_set_owner_r_frag(frag, sk); done: sctp_skb_set_owner_r(skb, sk); } void sctp_copy_sock(struct sock *newsk, struct sock *sk, struct sctp_association *asoc) { struct inet_sock *inet = inet_sk(sk); struct inet_sock *newinet; newsk->sk_type = sk->sk_type; newsk->sk_bound_dev_if = sk->sk_bound_dev_if; newsk->sk_flags = sk->sk_flags; newsk->sk_no_check_tx = sk->sk_no_check_tx; newsk->sk_no_check_rx = sk->sk_no_check_rx; newsk->sk_reuse = sk->sk_reuse; newsk->sk_shutdown = sk->sk_shutdown; newsk->sk_destruct = sctp_destruct_sock; newsk->sk_family = sk->sk_family; newsk->sk_protocol = IPPROTO_SCTP; newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; newsk->sk_sndbuf = sk->sk_sndbuf; newsk->sk_rcvbuf = sk->sk_rcvbuf; newsk->sk_lingertime = sk->sk_lingertime; newsk->sk_rcvtimeo = sk->sk_rcvtimeo; newsk->sk_sndtimeo = sk->sk_sndtimeo; newinet = inet_sk(newsk); /* Initialize sk's sport, dport, rcv_saddr and daddr for * getsockname() and getpeername() */ newinet->inet_sport = inet->inet_sport; newinet->inet_saddr = inet->inet_saddr; newinet->inet_rcv_saddr = inet->inet_rcv_saddr; newinet->inet_dport = htons(asoc->peer.port); newinet->pmtudisc = inet->pmtudisc; newinet->inet_id = asoc->next_tsn ^ jiffies; newinet->uc_ttl = inet->uc_ttl; newinet->mc_loop = 1; newinet->mc_ttl = 1; newinet->mc_index = 0; newinet->mc_list = NULL; } /* Populate the fields of the newsk from the oldsk and migrate the assoc * and its messages to the newsk. */ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, struct sctp_association *assoc, sctp_socket_type_t type) { struct sctp_sock *oldsp = sctp_sk(oldsk); struct sctp_sock *newsp = sctp_sk(newsk); struct sctp_bind_bucket *pp; /* hash list port iterator */ struct sctp_endpoint *newep = newsp->ep; struct sk_buff *skb, *tmp; struct sctp_ulpevent *event; struct sctp_bind_hashbucket *head; struct list_head tmplist; /* Migrate socket buffer sizes and all the socket level options to the * new socket. */ newsk->sk_sndbuf = oldsk->sk_sndbuf; newsk->sk_rcvbuf = oldsk->sk_rcvbuf; /* Brute force copy old sctp opt. */ if (oldsp->do_auto_asconf) { memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist)); inet_sk_copy_descendant(newsk, oldsk); memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist)); } else inet_sk_copy_descendant(newsk, oldsk); /* Restore the ep value that was overwritten with the above structure * copy. */ newsp->ep = newep; newsp->hmac = NULL; /* Hook this new socket in to the bind_hash list. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), inet_sk(oldsk)->inet_num)]; local_bh_disable(); spin_lock(&head->lock); pp = sctp_sk(oldsk)->bind_hash; sk_add_bind_node(newsk, &pp->owner); sctp_sk(newsk)->bind_hash = pp; inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; spin_unlock(&head->lock); local_bh_enable(); /* Copy the bind_addr list from the original endpoint to the new * endpoint so that we can handle restarts properly */ sctp_bind_addr_dup(&newsp->ep->base.bind_addr, &oldsp->ep->base.bind_addr, GFP_KERNEL); /* Move any messages in the old socket's receive queue that are for the * peeled off association to the new socket's receive queue. */ sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { __skb_unlink(skb, &oldsk->sk_receive_queue); __skb_queue_tail(&newsk->sk_receive_queue, skb); sctp_skb_set_owner_r_frag(skb, newsk); } } /* Clean up any messages pending delivery due to partial * delivery. Three cases: * 1) No partial deliver; no work. * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. */ skb_queue_head_init(&newsp->pd_lobby); atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { struct sk_buff_head *queue; /* Decide which queue to move pd_lobby skbs to. */ if (assoc->ulpq.pd_mode) { queue = &newsp->pd_lobby; } else queue = &newsk->sk_receive_queue; /* Walk through the pd_lobby, looking for skbs that * need moved to the new socket. */ sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { __skb_unlink(skb, &oldsp->pd_lobby); __skb_queue_tail(queue, skb); sctp_skb_set_owner_r_frag(skb, newsk); } } /* Clear up any skbs waiting for the partial * delivery to finish. */ if (assoc->ulpq.pd_mode) sctp_clear_pd(oldsk, NULL); } sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) sctp_skb_set_owner_r_frag(skb, newsk); sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) sctp_skb_set_owner_r_frag(skb, newsk); /* Set the type of socket to indicate that it is peeled off from the * original UDP-style socket or created with the accept() call on a * TCP-style socket.. */ newsp->type = type; /* Mark the new socket "in-use" by the user so that any packets * that may arrive on the association after we've moved it are * queued to the backlog. This prevents a potential race between * backlog processing on the old socket and new-packet processing * on the new socket. * * The caller has just allocated newsk so we can guarantee that other * paths won't try to lock it and then oldsk. */ lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); sctp_assoc_migrate(assoc, newsk); /* If the association on the newsk is already closed before accept() * is called, set RCV_SHUTDOWN flag. */ if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) newsk->sk_shutdown |= RCV_SHUTDOWN; newsk->sk_state = SCTP_SS_ESTABLISHED; release_sock(newsk); } /* This proto struct describes the ULP interface for SCTP. */ struct proto sctp_prot = { .name = "SCTP", .owner = THIS_MODULE, .close = sctp_close, .connect = sctp_connect, .disconnect = sctp_disconnect, .accept = sctp_accept, .ioctl = sctp_ioctl, .init = sctp_init_sock, .destroy = sctp_destroy_sock, .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, .sendmsg = sctp_sendmsg, .recvmsg = sctp_recvmsg, .bind = sctp_bind, .backlog_rcv = sctp_backlog_rcv, .hash = sctp_hash, .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp_sock), .sysctl_mem = sysctl_sctp_mem, .sysctl_rmem = sysctl_sctp_rmem, .sysctl_wmem = sysctl_sctp_wmem, .memory_pressure = &sctp_memory_pressure, .enter_memory_pressure = sctp_enter_memory_pressure, .memory_allocated = &sctp_memory_allocated, .sockets_allocated = &sctp_sockets_allocated, }; #if IS_ENABLED(CONFIG_IPV6) struct proto sctpv6_prot = { .name = "SCTPv6", .owner = THIS_MODULE, .close = sctp_close, .connect = sctp_connect, .disconnect = sctp_disconnect, .accept = sctp_accept, .ioctl = sctp_ioctl, .init = sctp_init_sock, .destroy = sctp_destroy_sock, .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, .sendmsg = sctp_sendmsg, .recvmsg = sctp_recvmsg, .bind = sctp_bind, .backlog_rcv = sctp_backlog_rcv, .hash = sctp_hash, .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp6_sock), .sysctl_mem = sysctl_sctp_mem, .sysctl_rmem = sysctl_sctp_rmem, .sysctl_wmem = sysctl_sctp_wmem, .memory_pressure = &sctp_memory_pressure, .enter_memory_pressure = sctp_enter_memory_pressure, .memory_allocated = &sctp_memory_allocated, .sockets_allocated = &sctp_sockets_allocated, }; #endif /* IS_ENABLED(CONFIG_IPV6) */
./CrossVul/dataset_final_sorted/CWE-362/c/bad_1578_2
crossvul-cpp_data_bad_1865_0
/* * Functions to sequence FLUSH and FUA writes. * * Copyright (C) 2011 Max Planck Institute for Gravitational Physics * Copyright (C) 2011 Tejun Heo <tj@kernel.org> * * This file is released under the GPLv2. * * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request * properties and hardware capability. * * If a request doesn't have data, only REQ_FLUSH makes sense, which * indicates a simple flush request. If there is data, REQ_FLUSH indicates * that the device cache should be flushed before the data is executed, and * REQ_FUA means that the data must be on non-volatile media on request * completion. * * If the device doesn't have writeback cache, FLUSH and FUA don't make any * difference. The requests are either completed immediately if there's no * data or executed as normal requests otherwise. * * If the device has writeback cache and supports FUA, REQ_FLUSH is * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. * * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is * translated to PREFLUSH and REQ_FUA to POSTFLUSH. * * The actual execution of flush is double buffered. Whenever a request * needs to execute PRE or POSTFLUSH, it queues at * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a * flush is issued and the pending_idx is toggled. When the flush * completes, all the requests which were pending are proceeded to the next * step. This allows arbitrary merging of different types of FLUSH/FUA * requests. * * Currently, the following conditions are used to determine when to issue * flush. * * C1. At any given time, only one flush shall be in progress. This makes * double buffering sufficient. * * C2. Flush is deferred if any request is executing DATA of its sequence. * This avoids issuing separate POSTFLUSHes for requests which shared * PREFLUSH. * * C3. The second condition is ignored if there is a request which has * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid * starvation in the unlikely case where there are continuous stream of * FUA (without FLUSH) requests. * * For devices which support FUA, it isn't clear whether C2 (and thus C3) * is beneficial. * * Note that a sequenced FLUSH/FUA request with DATA is completed twice. * Once while executing DATA and again after the whole sequence is * complete. The first completion updates the contained bio but doesn't * finish it so that the bio submitter is notified only after the whole * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in * req_bio_endio(). * * The above peculiarity requires that each FLUSH/FUA request has only one * bio attached to it, which is guaranteed as they aren't allowed to be * merged in the usual way. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/gfp.h> #include <linux/blk-mq.h> #include "blk.h" #include "blk-mq.h" /* FLUSH/FUA sequences */ enum { REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ REQ_FSEQ_DONE = (1 << 3), REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | REQ_FSEQ_POSTFLUSH, /* * If flush has been pending longer than the following timeout, * it's issued even if flush_data requests are still in flight. */ FLUSH_PENDING_TIMEOUT = 5 * HZ, }; static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq); static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) { unsigned int policy = 0; if (blk_rq_sectors(rq)) policy |= REQ_FSEQ_DATA; if (fflags & REQ_FLUSH) { if (rq->cmd_flags & REQ_FLUSH) policy |= REQ_FSEQ_PREFLUSH; if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) policy |= REQ_FSEQ_POSTFLUSH; } return policy; } static unsigned int blk_flush_cur_seq(struct request *rq) { return 1 << ffz(rq->flush.seq); } static void blk_flush_restore_request(struct request *rq) { /* * After flush data completion, @rq->bio is %NULL but we need to * complete the bio again. @rq->biotail is guaranteed to equal the * original @rq->bio. Restore it. */ rq->bio = rq->biotail; /* make @rq a normal request */ rq->cmd_flags &= ~REQ_FLUSH_SEQ; rq->end_io = rq->flush.saved_end_io; } static bool blk_flush_queue_rq(struct request *rq, bool add_front) { if (rq->q->mq_ops) { struct request_queue *q = rq->q; blk_mq_add_to_requeue_list(rq, add_front); blk_mq_kick_requeue_list(q); return false; } else { if (add_front) list_add(&rq->queuelist, &rq->q->queue_head); else list_add_tail(&rq->queuelist, &rq->q->queue_head); return true; } } /** * blk_flush_complete_seq - complete flush sequence * @rq: FLUSH/FUA request being sequenced * @fq: flush queue * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) * @error: whether an error occurred * * @rq just completed @seq part of its flush sequence, record the * completion and trigger the next step. * * CONTEXT: * spin_lock_irq(q->queue_lock or fq->mq_flush_lock) * * RETURNS: * %true if requests were added to the dispatch queue, %false otherwise. */ static bool blk_flush_complete_seq(struct request *rq, struct blk_flush_queue *fq, unsigned int seq, int error) { struct request_queue *q = rq->q; struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; bool queued = false, kicked; BUG_ON(rq->flush.seq & seq); rq->flush.seq |= seq; if (likely(!error)) seq = blk_flush_cur_seq(rq); else seq = REQ_FSEQ_DONE; switch (seq) { case REQ_FSEQ_PREFLUSH: case REQ_FSEQ_POSTFLUSH: /* queue for flush */ if (list_empty(pending)) fq->flush_pending_since = jiffies; list_move_tail(&rq->flush.list, pending); break; case REQ_FSEQ_DATA: list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); queued = blk_flush_queue_rq(rq, true); break; case REQ_FSEQ_DONE: /* * @rq was previously adjusted by blk_flush_issue() for * flush sequencing and may already have gone through the * flush data request completion path. Restore @rq for * normal completion and end it. */ BUG_ON(!list_empty(&rq->queuelist)); list_del_init(&rq->flush.list); blk_flush_restore_request(rq); if (q->mq_ops) blk_mq_end_request(rq, error); else __blk_end_request_all(rq, error); break; default: BUG(); } kicked = blk_kick_flush(q, fq); return kicked | queued; } static void flush_end_io(struct request *flush_rq, int error) { struct request_queue *q = flush_rq->q; struct list_head *running; bool queued = false; struct request *rq, *n; unsigned long flags = 0; struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); if (q->mq_ops) { spin_lock_irqsave(&fq->mq_flush_lock, flags); flush_rq->tag = -1; } running = &fq->flush_queue[fq->flush_running_idx]; BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); /* account completion of the flush request */ fq->flush_running_idx ^= 1; if (!q->mq_ops) elv_completed_request(q, flush_rq); /* and push the waiting requests to the next stage */ list_for_each_entry_safe(rq, n, running, flush.list) { unsigned int seq = blk_flush_cur_seq(rq); BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); queued |= blk_flush_complete_seq(rq, fq, seq, error); } /* * Kick the queue to avoid stall for two cases: * 1. Moving a request silently to empty queue_head may stall the * queue. * 2. When flush request is running in non-queueable queue, the * queue is hold. Restart the queue after flush request is finished * to avoid stall. * This function is called from request completion path and calling * directly into request_fn may confuse the driver. Always use * kblockd. */ if (queued || fq->flush_queue_delayed) { WARN_ON(q->mq_ops); blk_run_queue_async(q); } fq->flush_queue_delayed = 0; if (q->mq_ops) spin_unlock_irqrestore(&fq->mq_flush_lock, flags); } /** * blk_kick_flush - consider issuing flush request * @q: request_queue being kicked * @fq: flush queue * * Flush related states of @q have changed, consider issuing flush request. * Please read the comment at the top of this file for more info. * * CONTEXT: * spin_lock_irq(q->queue_lock or fq->mq_flush_lock) * * RETURNS: * %true if flush was issued, %false otherwise. */ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) { struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; struct request *first_rq = list_first_entry(pending, struct request, flush.list); struct request *flush_rq = fq->flush_rq; /* C1 described at the top of this file */ if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) return false; /* C2 and C3 */ if (!list_empty(&fq->flush_data_in_flight) && time_before(jiffies, fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) return false; /* * Issue flush and toggle pending_idx. This makes pending_idx * different from running_idx, which means flush is in flight. */ fq->flush_pending_idx ^= 1; blk_rq_init(q, flush_rq); /* * Borrow tag from the first request since they can't * be in flight at the same time. */ if (q->mq_ops) { flush_rq->mq_ctx = first_rq->mq_ctx; flush_rq->tag = first_rq->tag; } flush_rq->cmd_type = REQ_TYPE_FS; flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; flush_rq->rq_disk = first_rq->rq_disk; flush_rq->end_io = flush_end_io; return blk_flush_queue_rq(flush_rq, false); } static void flush_data_end_io(struct request *rq, int error) { struct request_queue *q = rq->q; struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); /* * After populating an empty queue, kick it to avoid stall. Read * the comment in flush_end_io(). */ if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) blk_run_queue_async(q); } static void mq_flush_data_end_io(struct request *rq, int error) { struct request_queue *q = rq->q; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx = rq->mq_ctx; unsigned long flags; struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); hctx = q->mq_ops->map_queue(q, ctx->cpu); /* * After populating an empty queue, kick it to avoid stall. Read * the comment in flush_end_io(). */ spin_lock_irqsave(&fq->mq_flush_lock, flags); if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) blk_mq_run_hw_queue(hctx, true); spin_unlock_irqrestore(&fq->mq_flush_lock, flags); } /** * blk_insert_flush - insert a new FLUSH/FUA request * @rq: request to insert * * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. * or __blk_mq_run_hw_queue() to dispatch request. * @rq is being submitted. Analyze what needs to be done and put it on the * right queue. * * CONTEXT: * spin_lock_irq(q->queue_lock) in !mq case */ void blk_insert_flush(struct request *rq) { struct request_queue *q = rq->q; unsigned int fflags = q->flush_flags; /* may change, cache */ unsigned int policy = blk_flush_policy(fflags, rq); struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); /* * @policy now records what operations need to be done. Adjust * REQ_FLUSH and FUA for the driver. */ rq->cmd_flags &= ~REQ_FLUSH; if (!(fflags & REQ_FUA)) rq->cmd_flags &= ~REQ_FUA; /* * An empty flush handed down from a stacking driver may * translate into nothing if the underlying device does not * advertise a write-back cache. In this case, simply * complete the request. */ if (!policy) { if (q->mq_ops) blk_mq_end_request(rq, 0); else __blk_end_bidi_request(rq, 0, 0, 0); return; } BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ /* * If there's data but flush is not necessary, the request can be * processed directly without going through flush machinery. Queue * for normal execution. */ if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { if (q->mq_ops) { blk_mq_insert_request(rq, false, false, true); } else list_add_tail(&rq->queuelist, &q->queue_head); return; } /* * @rq should go through flush machinery. Mark it part of flush * sequence and submit for further processing. */ memset(&rq->flush, 0, sizeof(rq->flush)); INIT_LIST_HEAD(&rq->flush.list); rq->cmd_flags |= REQ_FLUSH_SEQ; rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ if (q->mq_ops) { rq->end_io = mq_flush_data_end_io; spin_lock_irq(&fq->mq_flush_lock); blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); spin_unlock_irq(&fq->mq_flush_lock); return; } rq->end_io = flush_data_end_io; blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); } /** * blkdev_issue_flush - queue a flush * @bdev: blockdev to issue flush for * @gfp_mask: memory allocation flags (for bio_alloc) * @error_sector: error sector * * Description: * Issue a flush for the block device in question. Caller can supply * room for storing the error offset in case of a flush error, if they * wish to. If WAIT flag is not passed then caller may check only what * request was pushed in some internal queue for later handling. */ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, sector_t *error_sector) { struct request_queue *q; struct bio *bio; int ret = 0; if (bdev->bd_disk == NULL) return -ENXIO; q = bdev_get_queue(bdev); if (!q) return -ENXIO; /* * some block devices may not have their queue correctly set up here * (e.g. loop device without a backing file) and so issuing a flush * here will panic. Ensure there is a request function before issuing * the flush. */ if (!q->make_request_fn) return -ENXIO; bio = bio_alloc(gfp_mask, 0); bio->bi_bdev = bdev; ret = submit_bio_wait(WRITE_FLUSH, bio); /* * The driver must store the error location in ->bi_sector, if * it supports it. For non-stacked drivers, this should be * copied from blk_rq_pos(rq). */ if (error_sector) *error_sector = bio->bi_iter.bi_sector; bio_put(bio); return ret; } EXPORT_SYMBOL(blkdev_issue_flush); struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, int node, int cmd_size) { struct blk_flush_queue *fq; int rq_sz = sizeof(struct request); fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node); if (!fq) goto fail; if (q->mq_ops) { spin_lock_init(&fq->mq_flush_lock); rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); } fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node); if (!fq->flush_rq) goto fail_rq; INIT_LIST_HEAD(&fq->flush_queue[0]); INIT_LIST_HEAD(&fq->flush_queue[1]); INIT_LIST_HEAD(&fq->flush_data_in_flight); return fq; fail_rq: kfree(fq); fail: return NULL; } void blk_free_flush_queue(struct blk_flush_queue *fq) { /* bio based request queue hasn't flush queue */ if (!fq) return; kfree(fq->flush_rq); kfree(fq); }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_1865_0
crossvul-cpp_data_bad_5222_4
/* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* Describe, check and repair of MyISAM tables */ /* About checksum calculation. There are two types of checksums. Table checksum and row checksum. Row checksum is an additional byte at the end of dynamic length records. It must be calculated if the table is configured for them. Otherwise they must not be used. The variable MYISAM_SHARE::calc_checksum determines if row checksums are used. MI_INFO::checksum is used as temporary storage during row handling. For parallel repair we must assure that only one thread can use this variable. There is no problem on the write side as this is done by one thread only. But when checking a record after read this could go wrong. But since all threads read through a common read buffer, it is sufficient if only one thread checks it. Table checksum is an eight byte value in the header of the index file. It can be calculated even if row checksums are not used. The variable MI_CHECK::glob_crc is calculated over all records. MI_SORT_PARAM::calc_checksum determines if this should be done. This variable is not part of MI_CHECK because it must be set per thread for parallel repair. The global glob_crc must be changed by one thread only. And it is sufficient to calculate the checksum once only. */ #include "ftdefs.h" #include <m_ctype.h> #include <stdarg.h> #include <my_getopt.h> #ifdef HAVE_SYS_VADVISE_H #include <sys/vadvise.h> #endif #ifdef HAVE_SYS_MMAN_H #include <sys/mman.h> #endif #include "rt_index.h" /* Functions defined in this file */ static int check_k_link(MI_CHECK *param, MI_INFO *info,uint nr); static int chk_index(MI_CHECK *param, MI_INFO *info,MI_KEYDEF *keyinfo, my_off_t page, uchar *buff, ha_rows *keys, ha_checksum *key_checksum, uint level); static uint isam_key_length(MI_INFO *info,MI_KEYDEF *keyinfo); static ha_checksum calc_checksum(ha_rows count); static int writekeys(MI_SORT_PARAM *sort_param); static int sort_one_index(MI_CHECK *param, MI_INFO *info,MI_KEYDEF *keyinfo, my_off_t pagepos, File new_file); static int sort_key_read(MI_SORT_PARAM *sort_param,void *key); static int sort_ft_key_read(MI_SORT_PARAM *sort_param,void *key); static int sort_get_next_record(MI_SORT_PARAM *sort_param); static int sort_key_cmp(MI_SORT_PARAM *sort_param, const void *a,const void *b); static int sort_ft_key_write(MI_SORT_PARAM *sort_param, const void *a); static int sort_key_write(MI_SORT_PARAM *sort_param, const void *a); static my_off_t get_record_for_key(MI_INFO *info,MI_KEYDEF *keyinfo, uchar *key); static int sort_insert_key(MI_SORT_PARAM *sort_param, reg1 SORT_KEY_BLOCKS *key_block, uchar *key, my_off_t prev_block); static int sort_delete_record(MI_SORT_PARAM *sort_param); /*static int flush_pending_blocks(MI_CHECK *param);*/ static SORT_KEY_BLOCKS *alloc_key_blocks(MI_CHECK *param, uint blocks, uint buffer_length); static ha_checksum mi_byte_checksum(const uchar *buf, uint length); static void set_data_file_type(SORT_INFO *sort_info, MYISAM_SHARE *share); static HA_KEYSEG *ha_find_null(HA_KEYSEG *keyseg, uchar *a); void myisamchk_init(MI_CHECK *param) { bzero((uchar*) param,sizeof(*param)); param->opt_follow_links=1; param->keys_in_use= ~(ulonglong) 0; param->search_after_block=HA_OFFSET_ERROR; param->auto_increment_value= 0; param->use_buffers=USE_BUFFER_INIT; param->read_buffer_length=READ_BUFFER_INIT; param->write_buffer_length=READ_BUFFER_INIT; param->sort_buffer_length=SORT_BUFFER_INIT; param->sort_key_blocks=BUFFERS_WHEN_SORTING; param->tmpfile_createflag=O_RDWR | O_TRUNC | O_EXCL; param->myf_rw=MYF(MY_NABP | MY_WME | MY_WAIT_IF_FULL); param->start_check_pos=0; param->max_record_length= LONGLONG_MAX; param->key_cache_block_size= KEY_CACHE_BLOCK_SIZE; param->stats_method= MI_STATS_METHOD_NULLS_NOT_EQUAL; param->need_print_msg_lock= 0; } /* Check the status flags for the table */ int chk_status(MI_CHECK *param, register MI_INFO *info) { MYISAM_SHARE *share=info->s; if (mi_is_crashed_on_repair(info)) mi_check_print_warning(param, "Table is marked as crashed and last repair failed"); else if (mi_is_crashed(info)) mi_check_print_warning(param, "Table is marked as crashed"); if (share->state.open_count != (uint) (info->s->global_changed ? 1 : 0)) { /* Don't count this as a real warning, as check can correct this ! */ uint save=param->warning_printed; mi_check_print_warning(param, share->state.open_count==1 ? "%d client is using or hasn't closed the table properly" : "%d clients are using or haven't closed the table properly", share->state.open_count); /* If this will be fixed by the check, forget the warning */ if (param->testflag & T_UPDATE_STATE) param->warning_printed=save; } return 0; } /* Check delete links */ int chk_del(MI_CHECK *param, register MI_INFO *info, uint test_flag) { reg2 ha_rows i; uint delete_link_length; my_off_t empty, next_link, old_link= 0; char buff[22],buff2[22]; DBUG_ENTER("chk_del"); param->record_checksum=0; delete_link_length=((info->s->options & HA_OPTION_PACK_RECORD) ? 20 : info->s->rec_reflength+1); if (!(test_flag & T_SILENT)) puts("- check record delete-chain"); next_link=info->s->state.dellink; if (info->state->del == 0) { if (test_flag & T_VERBOSE) { puts("No recordlinks"); } } else { if (test_flag & T_VERBOSE) printf("Recordlinks: "); empty=0; for (i= info->state->del ; i > 0L && next_link != HA_OFFSET_ERROR ; i--) { if (*killed_ptr(param)) DBUG_RETURN(1); if (test_flag & T_VERBOSE) printf(" %9s",llstr(next_link,buff)); if (next_link >= info->state->data_file_length) goto wrong; if (mysql_file_pread(info->dfile, (uchar*) buff, delete_link_length, next_link, MYF(MY_NABP))) { if (test_flag & T_VERBOSE) puts(""); mi_check_print_error(param,"Can't read delete-link at filepos: %s", llstr(next_link,buff)); DBUG_RETURN(1); } if (*buff != '\0') { if (test_flag & T_VERBOSE) puts(""); mi_check_print_error(param,"Record at pos: %s is not remove-marked", llstr(next_link,buff)); goto wrong; } if (info->s->options & HA_OPTION_PACK_RECORD) { my_off_t prev_link=mi_sizekorr(buff+12); if (empty && prev_link != old_link) { if (test_flag & T_VERBOSE) puts(""); mi_check_print_error(param,"Deleted block at %s doesn't point back at previous delete link",llstr(next_link,buff2)); goto wrong; } old_link=next_link; next_link=mi_sizekorr(buff+4); empty+=mi_uint3korr(buff+1); } else { param->record_checksum+=(ha_checksum) next_link; next_link=_mi_rec_pos(info->s,(uchar*) buff+1); empty+=info->s->base.pack_reclength; } } if (test_flag & T_VERBOSE) puts("\n"); if (empty != info->state->empty) { mi_check_print_warning(param, "Found %s deleted space in delete link chain. Should be %s", llstr(empty,buff2), llstr(info->state->empty,buff)); } if (next_link != HA_OFFSET_ERROR) { mi_check_print_error(param, "Found more than the expected %s deleted rows in delete link chain", llstr(info->state->del, buff)); goto wrong; } if (i != 0) { mi_check_print_error(param, "Found %s deleted rows in delete link chain. Should be %s", llstr(info->state->del - i, buff2), llstr(info->state->del, buff)); goto wrong; } } DBUG_RETURN(0); wrong: param->testflag|=T_RETRY_WITHOUT_QUICK; if (test_flag & T_VERBOSE) puts(""); mi_check_print_error(param,"record delete-link-chain corrupted"); DBUG_RETURN(1); } /* chk_del */ /* Check delete links in index file */ static int check_k_link(MI_CHECK *param, register MI_INFO *info, uint nr) { my_off_t next_link; uint block_size=(nr+1)*MI_MIN_KEY_BLOCK_LENGTH; ha_rows records; char llbuff[21], llbuff2[21]; uchar *buff; DBUG_ENTER("check_k_link"); DBUG_PRINT("enter", ("block_size: %u", block_size)); if (param->testflag & T_VERBOSE) printf("block_size %4u:", block_size); /* purecov: tested */ next_link=info->s->state.key_del[nr]; records= (ha_rows) (info->state->key_file_length / block_size); while (next_link != HA_OFFSET_ERROR && records > 0) { if (*killed_ptr(param)) DBUG_RETURN(1); if (param->testflag & T_VERBOSE) printf("%16s",llstr(next_link,llbuff)); /* Key blocks must lay within the key file length entirely. */ if (next_link + block_size > info->state->key_file_length) { /* purecov: begin tested */ mi_check_print_error(param, "Invalid key block position: %s " "key block size: %u file_length: %s", llstr(next_link, llbuff), block_size, llstr(info->state->key_file_length, llbuff2)); DBUG_RETURN(1); /* purecov: end */ } /* Key blocks must be aligned at MI_MIN_KEY_BLOCK_LENGTH. */ if (next_link & (MI_MIN_KEY_BLOCK_LENGTH - 1)) { /* purecov: begin tested */ mi_check_print_error(param, "Mis-aligned key block: %s " "minimum key block length: %u", llstr(next_link, llbuff), MI_MIN_KEY_BLOCK_LENGTH); DBUG_RETURN(1); /* purecov: end */ } /* Read the key block with MI_MIN_KEY_BLOCK_LENGTH to find next link. If the key cache block size is smaller than block_size, we can so avoid unecessary eviction of cache block. */ if (!(buff=key_cache_read(info->s->key_cache, info->s->kfile, next_link, DFLT_INIT_HITS, (uchar*) info->buff, MI_MIN_KEY_BLOCK_LENGTH, MI_MIN_KEY_BLOCK_LENGTH, 1))) { /* purecov: begin tested */ mi_check_print_error(param, "key cache read error for block: %s", llstr(next_link,llbuff)); DBUG_RETURN(1); /* purecov: end */ } next_link=mi_sizekorr(buff); records--; param->key_file_blocks+=block_size; } if (param->testflag & T_VERBOSE) { if (next_link != HA_OFFSET_ERROR) printf("%16s\n",llstr(next_link,llbuff)); else puts(""); } DBUG_RETURN (next_link != HA_OFFSET_ERROR); } /* check_k_link */ /* Check sizes of files */ int chk_size(MI_CHECK *param, register MI_INFO *info) { int error=0; register my_off_t skr,size; char buff[22],buff2[22]; DBUG_ENTER("chk_size"); if (!(param->testflag & T_SILENT)) puts("- check file-size"); /* The following is needed if called externally (not from myisamchk) */ flush_key_blocks(info->s->key_cache, info->s->kfile, FLUSH_FORCE_WRITE); size= mysql_file_seek(info->s->kfile, 0L, MY_SEEK_END, MYF(MY_THREADSAFE)); if ((skr=(my_off_t) info->state->key_file_length) != size) { /* Don't give error if file generated by myisampack */ if (skr > size && mi_is_any_key_active(info->s->state.key_map)) { error=1; mi_check_print_error(param, "Size of indexfile is: %-8s Should be: %s", llstr(size,buff), llstr(skr,buff2)); } else mi_check_print_warning(param, "Size of indexfile is: %-8s Should be: %s", llstr(size,buff), llstr(skr,buff2)); } if (!(param->testflag & T_VERY_SILENT) && ! (info->s->options & HA_OPTION_COMPRESS_RECORD) && ulonglong2double(info->state->key_file_length) > ulonglong2double(info->s->base.margin_key_file_length)*0.9) mi_check_print_warning(param,"Keyfile is almost full, %10s of %10s used", llstr(info->state->key_file_length,buff), llstr(info->s->base.max_key_file_length-1,buff)); size= mysql_file_seek(info->dfile, 0L, MY_SEEK_END, MYF(0)); skr=(my_off_t) info->state->data_file_length; if (info->s->options & HA_OPTION_COMPRESS_RECORD) skr+= MEMMAP_EXTRA_MARGIN; #ifdef USE_RELOC if (info->data_file_type == STATIC_RECORD && skr < (my_off_t) info->s->base.reloc*info->s->base.min_pack_length) skr=(my_off_t) info->s->base.reloc*info->s->base.min_pack_length; #endif if (skr != size) { info->state->data_file_length=size; /* Skip other errors */ if (skr > size && skr != size + MEMMAP_EXTRA_MARGIN) { error=1; mi_check_print_error(param,"Size of datafile is: %-9s Should be: %s", llstr(size,buff), llstr(skr,buff2)); param->testflag|=T_RETRY_WITHOUT_QUICK; } else { mi_check_print_warning(param, "Size of datafile is: %-9s Should be: %s", llstr(size,buff), llstr(skr,buff2)); } } if (!(param->testflag & T_VERY_SILENT) && !(info->s->options & HA_OPTION_COMPRESS_RECORD) && ulonglong2double(info->state->data_file_length) > (ulonglong2double(info->s->base.max_data_file_length)*0.9)) mi_check_print_warning(param, "Datafile is almost full, %10s of %10s used", llstr(info->state->data_file_length,buff), llstr(info->s->base.max_data_file_length-1,buff2)); DBUG_RETURN(error); } /* chk_size */ /* Check keys */ int chk_key(MI_CHECK *param, register MI_INFO *info) { uint key,found_keys=0,full_text_keys=0,result=0; ha_rows keys; ha_checksum old_record_checksum,init_checksum; my_off_t all_keydata,all_totaldata,key_totlength,length; ulong *rec_per_key_part; MYISAM_SHARE *share=info->s; MI_KEYDEF *keyinfo; char buff[22],buff2[22]; DBUG_ENTER("chk_key"); if (!(param->testflag & T_SILENT)) puts("- check key delete-chain"); param->key_file_blocks=info->s->base.keystart; for (key=0 ; key < info->s->state.header.max_block_size_index ; key++) if (check_k_link(param,info,key)) { if (param->testflag & T_VERBOSE) puts(""); mi_check_print_error(param,"key delete-link-chain corrupted"); DBUG_RETURN(-1); } if (!(param->testflag & T_SILENT)) puts("- check index reference"); all_keydata=all_totaldata=key_totlength=0; old_record_checksum=0; init_checksum=param->record_checksum; if (!(share->options & (HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD))) old_record_checksum=calc_checksum(info->state->records+info->state->del-1)* share->base.pack_reclength; rec_per_key_part= param->rec_per_key_part; for (key= 0,keyinfo= &share->keyinfo[0]; key < share->base.keys ; rec_per_key_part+=keyinfo->keysegs, key++, keyinfo++) { param->key_crc[key]=0; if (! mi_is_key_active(share->state.key_map, key)) { /* Remember old statistics for key */ memcpy((char*) rec_per_key_part, (char*) (share->state.rec_per_key_part + (uint) (rec_per_key_part - param->rec_per_key_part)), keyinfo->keysegs*sizeof(*rec_per_key_part)); continue; } found_keys++; param->record_checksum=init_checksum; bzero((char*) &param->unique_count,sizeof(param->unique_count)); bzero((char*) &param->notnull_count,sizeof(param->notnull_count)); if ((!(param->testflag & T_SILENT))) printf ("- check data record references index: %d\n",key+1); if (keyinfo->flag & (HA_FULLTEXT | HA_SPATIAL)) full_text_keys++; if (share->state.key_root[key] == HA_OFFSET_ERROR && (info->state->records == 0 || keyinfo->flag & HA_FULLTEXT)) goto do_stat; if (!_mi_fetch_keypage(info,keyinfo,share->state.key_root[key], DFLT_INIT_HITS,info->buff,0)) { mi_check_print_error(param,"Can't read indexpage from filepos: %s", llstr(share->state.key_root[key],buff)); if (!(param->testflag & T_INFO)) DBUG_RETURN(-1); result= -1; continue; } param->key_file_blocks+=keyinfo->block_length; keys=0; param->keydata=param->totaldata=0; param->key_blocks=0; param->max_level=0; if (chk_index(param,info,keyinfo,share->state.key_root[key],info->buff, &keys, param->key_crc+key,1)) DBUG_RETURN(-1); if(!(keyinfo->flag & (HA_FULLTEXT | HA_SPATIAL))) { if (keys != info->state->records) { mi_check_print_error(param,"Found %s keys of %s",llstr(keys,buff), llstr(info->state->records,buff2)); if (!(param->testflag & T_INFO)) DBUG_RETURN(-1); result= -1; continue; } if (found_keys - full_text_keys == 1 && ((share->options & (HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD)) || (param->testflag & T_DONT_CHECK_CHECKSUM))) old_record_checksum=param->record_checksum; else if (old_record_checksum != param->record_checksum) { if (key) mi_check_print_error(param,"Key %u doesn't point at same records that key 1", key+1); else mi_check_print_error(param,"Key 1 doesn't point at all records"); if (!(param->testflag & T_INFO)) DBUG_RETURN(-1); result= -1; continue; } } if ((uint) share->base.auto_key -1 == key) { /* Check that auto_increment key is bigger than max key value */ ulonglong auto_increment; info->lastinx=key; _mi_read_key_record(info, 0L, info->rec_buff); auto_increment= retrieve_auto_increment(info, info->rec_buff); if (auto_increment > info->s->state.auto_increment) { mi_check_print_warning(param, "Auto-increment value: %s is smaller " "than max used value: %s", llstr(info->s->state.auto_increment,buff2), llstr(auto_increment, buff)); } if (param->testflag & T_AUTO_INC) { set_if_bigger(info->s->state.auto_increment, auto_increment); set_if_bigger(info->s->state.auto_increment, param->auto_increment_value); } /* Check that there isn't a row with auto_increment = 0 in the table */ mi_extra(info,HA_EXTRA_KEYREAD,0); bzero(info->lastkey,keyinfo->seg->length); if (!mi_rkey(info, info->rec_buff, key, (const uchar*) info->lastkey, (key_part_map)1, HA_READ_KEY_EXACT)) { /* Don't count this as a real warning, as myisamchk can't correct it */ uint save=param->warning_printed; mi_check_print_warning(param, "Found row where the auto_increment " "column has the value 0"); param->warning_printed=save; } mi_extra(info,HA_EXTRA_NO_KEYREAD,0); } length=(my_off_t) isam_key_length(info,keyinfo)*keys + param->key_blocks*2; if (param->testflag & T_INFO && param->totaldata != 0L && keys != 0L) printf("Key: %2d: Keyblocks used: %3d%% Packed: %4d%% Max levels: %2d\n", key+1, (int) (my_off_t2double(param->keydata)*100.0/my_off_t2double(param->totaldata)), (int) ((my_off_t2double(length) - my_off_t2double(param->keydata))*100.0/ my_off_t2double(length)), param->max_level); all_keydata+=param->keydata; all_totaldata+=param->totaldata; key_totlength+=length; do_stat: if (param->testflag & T_STATISTICS) update_key_parts(keyinfo, rec_per_key_part, param->unique_count, param->stats_method == MI_STATS_METHOD_IGNORE_NULLS? param->notnull_count: NULL, (ulonglong)info->state->records); } if (param->testflag & T_INFO) { if (all_totaldata != 0L && found_keys > 0) printf("Total: Keyblocks used: %3d%% Packed: %4d%%\n\n", (int) (my_off_t2double(all_keydata)*100.0/ my_off_t2double(all_totaldata)), (int) ((my_off_t2double(key_totlength) - my_off_t2double(all_keydata))*100.0/ my_off_t2double(key_totlength))); else if (all_totaldata != 0L && mi_is_any_key_active(share->state.key_map)) puts(""); } if (param->key_file_blocks != info->state->key_file_length && param->keys_in_use != ~(ulonglong) 0) mi_check_print_warning(param, "Some data are unreferenced in keyfile"); if (found_keys != full_text_keys) param->record_checksum=old_record_checksum-init_checksum; /* Remove delete links */ else param->record_checksum=0; DBUG_RETURN(result); } /* chk_key */ static int chk_index_down(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo, my_off_t page, uchar *buff, ha_rows *keys, ha_checksum *key_checksum, uint level) { char llbuff[22],llbuff2[22]; DBUG_ENTER("chk_index_down"); /* Key blocks must lay within the key file length entirely. */ if (page + keyinfo->block_length > info->state->key_file_length) { /* purecov: begin tested */ /* Give it a chance to fit in the real file size. */ my_off_t max_length= mysql_file_seek(info->s->kfile, 0L, MY_SEEK_END, MYF(MY_THREADSAFE)); mi_check_print_error(param, "Invalid key block position: %s " "key block size: %u file_length: %s", llstr(page, llbuff), keyinfo->block_length, llstr(info->state->key_file_length, llbuff2)); if (page + keyinfo->block_length > max_length) goto err; /* Fix the remebered key file length. */ info->state->key_file_length= (max_length & ~ (my_off_t) (keyinfo->block_length - 1)); /* purecov: end */ } /* Key blocks must be aligned at MI_MIN_KEY_BLOCK_LENGTH. */ if (page & (MI_MIN_KEY_BLOCK_LENGTH - 1)) { /* purecov: begin tested */ mi_check_print_error(param, "Mis-aligned key block: %s " "minimum key block length: %u", llstr(page, llbuff), MI_MIN_KEY_BLOCK_LENGTH); goto err; /* purecov: end */ } if (!_mi_fetch_keypage(info,keyinfo,page, DFLT_INIT_HITS,buff,0)) { mi_check_print_error(param,"Can't read key from filepos: %s", llstr(page,llbuff)); goto err; } param->key_file_blocks+=keyinfo->block_length; if (chk_index(param,info,keyinfo,page,buff,keys,key_checksum,level)) goto err; DBUG_RETURN(0); /* purecov: begin tested */ err: DBUG_RETURN(1); /* purecov: end */ } /* "Ignore NULLs" statistics collection method: process first index tuple. SYNOPSIS mi_collect_stats_nonulls_first() keyseg IN Array of key part descriptions notnull INOUT Array, notnull[i] = (number of {keypart1...keypart_i} tuples that don't contain NULLs) key IN Key values tuple DESCRIPTION Process the first index tuple - find out which prefix tuples don't contain NULLs, and update the array of notnull counters accordingly. */ static void mi_collect_stats_nonulls_first(HA_KEYSEG *keyseg, ulonglong *notnull, uchar *key) { uint first_null, kp; first_null= (uint) (ha_find_null(keyseg, key) - keyseg); /* All prefix tuples that don't include keypart_{first_null} are not-null tuples (and all others aren't), increment counters for them. */ for (kp= 0; kp < first_null; kp++) notnull[kp]++; } /* "Ignore NULLs" statistics collection method: process next index tuple. SYNOPSIS mi_collect_stats_nonulls_next() keyseg IN Array of key part descriptions notnull INOUT Array, notnull[i] = (number of {keypart1...keypart_i} tuples that don't contain NULLs) prev_key IN Previous key values tuple last_key IN Next key values tuple DESCRIPTION Process the next index tuple: 1. Find out which prefix tuples of last_key don't contain NULLs, and update the array of notnull counters accordingly. 2. Find the first keypart number where the prev_key and last_key tuples are different(A), or last_key has NULL value(B), and return it, so the caller can count number of unique tuples for each key prefix. We don't need (B) to be counted, and that is compensated back in update_key_parts(). RETURN 1 + number of first keypart where values differ or last_key tuple has NULL */ static int mi_collect_stats_nonulls_next(HA_KEYSEG *keyseg, ulonglong *notnull, uchar *prev_key, uchar *last_key) { uint diffs[2]; uint first_null_seg, kp; HA_KEYSEG *seg; /* Find the first keypart where values are different or either of them is NULL. We get results in diffs array: diffs[0]= 1 + number of first different keypart diffs[1]=offset: (last_key + diffs[1]) points to first value in last_key that is NULL or different from corresponding value in prev_key. */ ha_key_cmp(keyseg, prev_key, last_key, USE_WHOLE_KEY, SEARCH_FIND | SEARCH_NULL_ARE_NOT_EQUAL, diffs); seg= keyseg + diffs[0] - 1; /* Find first NULL in last_key */ first_null_seg= (uint) (ha_find_null(seg, last_key + diffs[1]) - keyseg); for (kp= 0; kp < first_null_seg; kp++) notnull[kp]++; /* Return 1+ number of first key part where values differ. Don't care if these were NULLs and not .... We compensate for that in update_key_parts. */ return diffs[0]; } /* Check if index is ok */ static int chk_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo, my_off_t page, uchar *buff, ha_rows *keys, ha_checksum *key_checksum, uint level) { int flag; uint used_length,comp_flag,nod_flag,key_length=0; uchar key[HA_MAX_POSSIBLE_KEY_BUFF],*temp_buff,*keypos,*old_keypos,*endpos; my_off_t next_page,record; char llbuff[22]; uint diff_pos[2]; DBUG_ENTER("chk_index"); DBUG_DUMP("buff",(uchar*) buff,mi_getint(buff)); /* TODO: implement appropriate check for RTree keys */ if (keyinfo->flag & HA_SPATIAL) DBUG_RETURN(0); if (!(temp_buff=(uchar*) my_alloca((uint) keyinfo->block_length))) { mi_check_print_error(param,"Not enough memory for keyblock"); DBUG_RETURN(-1); } if (keyinfo->flag & HA_NOSAME) comp_flag=SEARCH_FIND | SEARCH_UPDATE; /* Not real duplicates */ else comp_flag=SEARCH_SAME; /* Keys in positionorder */ nod_flag=mi_test_if_nod(buff); used_length=mi_getint(buff); keypos=buff+2+nod_flag; endpos=buff+used_length; param->keydata+=used_length; param->totaldata+=keyinfo->block_length; /* INFO */ param->key_blocks++; if (level > param->max_level) param->max_level=level; if (used_length > keyinfo->block_length) { mi_check_print_error(param,"Wrong pageinfo at page: %s", llstr(page,llbuff)); goto err; } for ( ;; ) { if (*killed_ptr(param)) goto err; memcpy((char*) info->lastkey,(char*) key,key_length); info->lastkey_length=key_length; if (nod_flag) { next_page=_mi_kpos(nod_flag,keypos); if (chk_index_down(param,info,keyinfo,next_page, temp_buff,keys,key_checksum,level+1)) goto err; } old_keypos=keypos; if (keypos >= endpos || (key_length=(*keyinfo->get_key)(keyinfo,nod_flag,&keypos,key)) == 0) break; if (keypos > endpos) { mi_check_print_error(param,"Wrong key block length at page: %s",llstr(page,llbuff)); goto err; } if ((*keys)++ && (flag=ha_key_cmp(keyinfo->seg,info->lastkey,key,key_length, comp_flag, diff_pos)) >=0) { DBUG_DUMP("old",(uchar*) info->lastkey, info->lastkey_length); DBUG_DUMP("new",(uchar*) key, key_length); DBUG_DUMP("new_in_page",(uchar*) old_keypos,(uint) (keypos-old_keypos)); if (comp_flag & SEARCH_FIND && flag == 0) mi_check_print_error(param,"Found duplicated key at page %s",llstr(page,llbuff)); else mi_check_print_error(param,"Key in wrong position at page %s",llstr(page,llbuff)); goto err; } if (param->testflag & T_STATISTICS) { if (*keys != 1L) /* not first_key */ { if (param->stats_method == MI_STATS_METHOD_NULLS_NOT_EQUAL) ha_key_cmp(keyinfo->seg,info->lastkey,key,USE_WHOLE_KEY, SEARCH_FIND | SEARCH_NULL_ARE_NOT_EQUAL, diff_pos); else if (param->stats_method == MI_STATS_METHOD_IGNORE_NULLS) { diff_pos[0]= mi_collect_stats_nonulls_next(keyinfo->seg, param->notnull_count, info->lastkey, key); } param->unique_count[diff_pos[0]-1]++; } else { if (param->stats_method == MI_STATS_METHOD_IGNORE_NULLS) mi_collect_stats_nonulls_first(keyinfo->seg, param->notnull_count, key); } } (*key_checksum)+= mi_byte_checksum((uchar*) key, key_length- info->s->rec_reflength); record= _mi_dpos(info,0,key+key_length); if (keyinfo->flag & HA_FULLTEXT) /* special handling for ft2 */ { uint off; int subkeys; get_key_full_length_rdonly(off, key); subkeys=ft_sintXkorr(key+off); if (subkeys < 0) { ha_rows tmp_keys=0; if (chk_index_down(param,info,&info->s->ft2_keyinfo,record, temp_buff,&tmp_keys,key_checksum,1)) goto err; if (tmp_keys + subkeys) { mi_check_print_error(param, "Number of words in the 2nd level tree " "does not match the number in the header. " "Parent word in on the page %s, offset %u", llstr(page,llbuff), (uint) (old_keypos-buff)); goto err; } (*keys)+=tmp_keys-1; continue; } /* fall through */ } if (record >= info->state->data_file_length) { #ifndef DBUG_OFF char llbuff2[22], llbuff3[22]; #endif mi_check_print_error(param,"Found key at page %s that points to record outside datafile",llstr(page,llbuff)); DBUG_PRINT("test",("page: %s record: %s filelength: %s", llstr(page,llbuff),llstr(record,llbuff2), llstr(info->state->data_file_length,llbuff3))); DBUG_DUMP("key",(uchar*) key,key_length); DBUG_DUMP("new_in_page",(uchar*) old_keypos,(uint) (keypos-old_keypos)); goto err; } param->record_checksum+=(ha_checksum) record; } if (keypos != endpos) { mi_check_print_error(param,"Keyblock size at page %s is not correct. Block length: %d key length: %d", llstr(page,llbuff), used_length, (keypos - buff)); goto err; } my_afree((uchar*) temp_buff); DBUG_RETURN(0); err: my_afree((uchar*) temp_buff); DBUG_RETURN(1); } /* chk_index */ /* Calculate a checksum of 1+2+3+4...N = N*(N+1)/2 without overflow */ static ha_checksum calc_checksum(ha_rows count) { ulonglong sum,a,b; DBUG_ENTER("calc_checksum"); sum=0; a=count; b=count+1; if (a & 1) b>>=1; else a>>=1; while (b) { if (b & 1) sum+=a; a<<=1; b>>=1; } DBUG_PRINT("exit",("sum: %lx",(ulong) sum)); DBUG_RETURN((ha_checksum) sum); } /* calc_checksum */ /* Calc length of key in normal isam */ static uint isam_key_length(MI_INFO *info, register MI_KEYDEF *keyinfo) { uint length; HA_KEYSEG *keyseg; DBUG_ENTER("isam_key_length"); length= info->s->rec_reflength; for (keyseg=keyinfo->seg ; keyseg->type ; keyseg++) length+= keyseg->length; DBUG_PRINT("exit",("length: %d",length)); DBUG_RETURN(length); } /* key_length */ /* Check that record-link is ok */ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend) { int error,got_error,flag; uint key,UNINIT_VAR(left_length),b_type,field; ha_rows records,del_blocks; my_off_t used,empty,pos,splits,UNINIT_VAR(start_recpos), del_length,link_used,start_block; uchar *record= 0, *UNINIT_VAR(to); char llbuff[22],llbuff2[22],llbuff3[22]; ha_checksum intern_record_checksum; ha_checksum key_checksum[HA_MAX_POSSIBLE_KEY]; my_bool static_row_size; MI_KEYDEF *keyinfo; MI_BLOCK_INFO block_info; DBUG_ENTER("chk_data_link"); if (!(param->testflag & T_SILENT)) { if (extend) puts("- check records and index references"); else puts("- check record links"); } if (!mi_alloc_rec_buff(info, -1, &record)) { mi_check_print_error(param,"Not enough memory for record"); DBUG_RETURN(-1); } records=del_blocks=0; used=link_used=splits=del_length=0; intern_record_checksum=param->glob_crc=0; got_error=error=0; empty=info->s->pack.header_length; /* Check how to calculate checksum of rows */ static_row_size=1; if (info->s->data_file_type == COMPRESSED_RECORD) { for (field=0 ; field < info->s->base.fields ; field++) { if (info->s->rec[field].base_type == FIELD_BLOB || info->s->rec[field].base_type == FIELD_VARCHAR) { static_row_size=0; break; } } } pos=my_b_tell(&param->read_cache); bzero((char*) key_checksum, info->s->base.keys * sizeof(key_checksum[0])); while (pos < info->state->data_file_length) { if (*killed_ptr(param)) goto err2; switch (info->s->data_file_type) { case STATIC_RECORD: if (my_b_read(&param->read_cache,(uchar*) record, info->s->base.pack_reclength)) goto err; start_recpos=pos; pos+=info->s->base.pack_reclength; splits++; if (*record == '\0') { del_blocks++; del_length+=info->s->base.pack_reclength; continue; /* Record removed */ } param->glob_crc+= mi_static_checksum(info,record); used+=info->s->base.pack_reclength; break; case DYNAMIC_RECORD: flag=block_info.second_read=0; block_info.next_filepos=pos; do { if (_mi_read_cache(&param->read_cache,(uchar*) block_info.header, (start_block=block_info.next_filepos), sizeof(block_info.header), (flag ? 0 : READING_NEXT) | READING_HEADER)) goto err; if (start_block & (MI_DYN_ALIGN_SIZE-1)) { mi_check_print_error(param,"Wrong aligned block at %s", llstr(start_block,llbuff)); goto err2; } b_type=_mi_get_block_info(&block_info,-1,start_block); if (b_type & (BLOCK_DELETED | BLOCK_ERROR | BLOCK_SYNC_ERROR | BLOCK_FATAL_ERROR)) { if (b_type & BLOCK_SYNC_ERROR) { if (flag) { mi_check_print_error(param,"Unexpected byte: %d at link: %s", (int) block_info.header[0], llstr(start_block,llbuff)); goto err2; } pos=block_info.filepos+block_info.block_len; goto next; } if (b_type & BLOCK_DELETED) { if (block_info.block_len < info->s->base.min_block_length) { mi_check_print_error(param, "Deleted block with impossible length %lu at %s", block_info.block_len,llstr(pos,llbuff)); goto err2; } if ((block_info.next_filepos != HA_OFFSET_ERROR && block_info.next_filepos >= info->state->data_file_length) || (block_info.prev_filepos != HA_OFFSET_ERROR && block_info.prev_filepos >= info->state->data_file_length)) { mi_check_print_error(param,"Delete link points outside datafile at %s", llstr(pos,llbuff)); goto err2; } del_blocks++; del_length+=block_info.block_len; pos=block_info.filepos+block_info.block_len; splits++; goto next; } mi_check_print_error(param,"Wrong bytesec: %d-%d-%d at linkstart: %s", block_info.header[0],block_info.header[1], block_info.header[2], llstr(start_block,llbuff)); goto err2; } if (info->state->data_file_length < block_info.filepos+ block_info.block_len) { mi_check_print_error(param, "Recordlink that points outside datafile at %s", llstr(pos,llbuff)); got_error=1; break; } splits++; if (!flag++) /* First block */ { start_recpos=pos; pos=block_info.filepos+block_info.block_len; if (block_info.rec_len > (uint) info->s->base.max_pack_length) { mi_check_print_error(param,"Found too long record (%lu) at %s", (ulong) block_info.rec_len, llstr(start_recpos,llbuff)); got_error=1; break; } if (info->s->base.blobs) { if (!(to= mi_alloc_rec_buff(info, block_info.rec_len, &info->rec_buff))) { mi_check_print_error(param, "Not enough memory (%lu) for blob at %s", (ulong) block_info.rec_len, llstr(start_recpos,llbuff)); got_error=1; break; } } else to= info->rec_buff; left_length=block_info.rec_len; } if (left_length < block_info.data_len) { mi_check_print_error(param,"Found too long record (%lu) at %s", (ulong) block_info.data_len, llstr(start_recpos,llbuff)); got_error=1; break; } if (_mi_read_cache(&param->read_cache,(uchar*) to,block_info.filepos, (uint) block_info.data_len, flag == 1 ? READING_NEXT : 0)) goto err; to+=block_info.data_len; link_used+= block_info.filepos-start_block; used+= block_info.filepos - start_block + block_info.data_len; empty+=block_info.block_len-block_info.data_len; left_length-=block_info.data_len; if (left_length) { if (b_type & BLOCK_LAST) { mi_check_print_error(param, "Wrong record length %s of %s at %s", llstr(block_info.rec_len-left_length,llbuff), llstr(block_info.rec_len, llbuff2), llstr(start_recpos,llbuff3)); got_error=1; break; } if (info->state->data_file_length < block_info.next_filepos) { mi_check_print_error(param, "Found next-recordlink that points outside datafile at %s", llstr(block_info.filepos,llbuff)); got_error=1; break; } } } while (left_length); if (! got_error) { if (_mi_rec_unpack(info,record,info->rec_buff,block_info.rec_len) == MY_FILE_ERROR) { mi_check_print_error(param,"Found wrong record at %s", llstr(start_recpos,llbuff)); got_error=1; } else { info->checksum=mi_checksum(info,record); if (param->testflag & (T_EXTEND | T_MEDIUM | T_VERBOSE)) { if (_mi_rec_check(info,record, info->rec_buff,block_info.rec_len, test(info->s->calc_checksum))) { mi_check_print_error(param,"Found wrong packed record at %s", llstr(start_recpos,llbuff)); got_error=1; } } if (!got_error) param->glob_crc+= info->checksum; } } else if (!flag) pos=block_info.filepos+block_info.block_len; break; case COMPRESSED_RECORD: if (_mi_read_cache(&param->read_cache,(uchar*) block_info.header, pos, info->s->pack.ref_length, READING_NEXT)) goto err; start_recpos=pos; splits++; (void) _mi_pack_get_block_info(info, &info->bit_buff, &block_info, &info->rec_buff, -1, start_recpos); pos=block_info.filepos+block_info.rec_len; if (block_info.rec_len < (uint) info->s->min_pack_length || block_info.rec_len > (uint) info->s->max_pack_length) { mi_check_print_error(param, "Found block with wrong recordlength: %d at %s", block_info.rec_len, llstr(start_recpos,llbuff)); got_error=1; break; } if (_mi_read_cache(&param->read_cache,(uchar*) info->rec_buff, block_info.filepos, block_info.rec_len, READING_NEXT)) goto err; if (_mi_pack_rec_unpack(info, &info->bit_buff, record, info->rec_buff, block_info.rec_len)) { mi_check_print_error(param,"Found wrong record at %s", llstr(start_recpos,llbuff)); got_error=1; } if (static_row_size) param->glob_crc+= mi_static_checksum(info,record); else param->glob_crc+= mi_checksum(info,record); link_used+= (block_info.filepos - start_recpos); used+= (pos-start_recpos); break; case BLOCK_RECORD: assert(0); /* Impossible */ } /* switch */ if (! got_error) { intern_record_checksum+=(ha_checksum) start_recpos; records++; if (param->testflag & T_WRITE_LOOP && records % WRITE_COUNT == 0) { printf("%s\r", llstr(records,llbuff)); (void) fflush(stdout); } /* Check if keys match the record */ for (key=0,keyinfo= info->s->keyinfo; key < info->s->base.keys; key++,keyinfo++) { if (mi_is_key_active(info->s->state.key_map, key)) { if(!(keyinfo->flag & HA_FULLTEXT)) { uint key_length=_mi_make_key(info,key,info->lastkey,record, start_recpos); if (extend) { /* We don't need to lock the key tree here as we don't allow concurrent threads when running myisamchk */ int search_result= #ifdef HAVE_RTREE_KEYS (keyinfo->flag & HA_SPATIAL) ? rtree_find_first(info, key, info->lastkey, key_length, MBR_EQUAL | MBR_DATA) : #endif _mi_search(info,keyinfo,info->lastkey,key_length, SEARCH_SAME, info->s->state.key_root[key]); if (search_result) { mi_check_print_error(param,"Record at: %10s " "Can't find key for index: %2d", llstr(start_recpos,llbuff),key+1); if (error++ > MAXERR || !(param->testflag & T_VERBOSE)) goto err2; } } else key_checksum[key]+=mi_byte_checksum((uchar*) info->lastkey, key_length); } } } } else { got_error=0; if (error++ > MAXERR || !(param->testflag & T_VERBOSE)) goto err2; } next:; /* Next record */ } if (param->testflag & T_WRITE_LOOP) { (void) fputs(" \r",stdout); (void) fflush(stdout); } if (records != info->state->records) { mi_check_print_error(param,"Record-count is not ok; is %-10s Should be: %s", llstr(records,llbuff), llstr(info->state->records,llbuff2)); error=1; } else if (param->record_checksum && param->record_checksum != intern_record_checksum) { mi_check_print_error(param, "Keypointers and record positions doesn't match"); error=1; } else if (param->glob_crc != info->state->checksum && (info->s->options & (HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD))) { mi_check_print_warning(param, "Record checksum is not the same as checksum stored in the index file\n"); error=1; } else if (!extend) { for (key=0 ; key < info->s->base.keys; key++) { if (key_checksum[key] != param->key_crc[key] && !(info->s->keyinfo[key].flag & (HA_FULLTEXT | HA_SPATIAL))) { mi_check_print_error(param,"Checksum for key: %2d doesn't match checksum for records", key+1); error=1; } } } if (del_length != info->state->empty) { mi_check_print_warning(param, "Found %s deleted space. Should be %s", llstr(del_length,llbuff2), llstr(info->state->empty,llbuff)); } if (used+empty+del_length != info->state->data_file_length) { mi_check_print_warning(param, "Found %s record-data and %s unused data and %s deleted-data", llstr(used,llbuff),llstr(empty,llbuff2), llstr(del_length,llbuff3)); mi_check_print_warning(param, "Total %s, Should be: %s", llstr((used+empty+del_length),llbuff), llstr(info->state->data_file_length,llbuff2)); } if (del_blocks != info->state->del) { mi_check_print_warning(param, "Found %10s deleted blocks Should be: %s", llstr(del_blocks,llbuff), llstr(info->state->del,llbuff2)); } if (splits != info->s->state.split) { mi_check_print_warning(param, "Found %10s key parts. Should be: %s", llstr(splits,llbuff), llstr(info->s->state.split,llbuff2)); } if (param->testflag & T_INFO) { if (param->warning_printed || param->error_printed) puts(""); if (used != 0 && ! param->error_printed) { printf("Records:%18s M.recordlength:%9lu Packed:%14.0f%%\n", llstr(records,llbuff), (long)((used-link_used)/records), (info->s->base.blobs ? 0.0 : (ulonglong2double((ulonglong) info->s->base.reclength*records)- my_off_t2double(used))/ ulonglong2double((ulonglong) info->s->base.reclength*records)*100.0)); printf("Recordspace used:%9.0f%% Empty space:%12d%% Blocks/Record: %6.2f\n", (ulonglong2double(used-link_used)/ulonglong2double(used-link_used+empty)*100.0), (!records ? 100 : (int) (ulonglong2double(del_length+empty)/ my_off_t2double(used)*100.0)), ulonglong2double(splits - del_blocks) / records); } printf("Record blocks:%12s Delete blocks:%10s\n", llstr(splits-del_blocks,llbuff),llstr(del_blocks,llbuff2)); printf("Record data: %12s Deleted data: %10s\n", llstr(used-link_used,llbuff),llstr(del_length,llbuff2)); printf("Lost space: %12s Linkdata: %10s\n", llstr(empty,llbuff),llstr(link_used,llbuff2)); } my_free(mi_get_rec_buff_ptr(info, record)); DBUG_RETURN (error); err: mi_check_print_error(param,"got error: %d when reading datafile at record: %s",my_errno, llstr(records,llbuff)); err2: my_free(mi_get_rec_buff_ptr(info, record)); param->testflag|=T_RETRY_WITHOUT_QUICK; DBUG_RETURN(1); } /* chk_data_link */ /** @brief Drop all indexes @param[in] param check parameters @param[in] info MI_INFO handle @param[in] force if to force drop all indexes @return status @retval 0 OK @retval != 0 Error @note Once allocated, index blocks remain part of the key file forever. When indexes are disabled, no block is freed. When enabling indexes, no block is freed either. The new indexes are create from new blocks. (Bug #4692) Before recreating formerly disabled indexes, the unused blocks must be freed. There are two options to do this: - Follow the tree of disabled indexes, add all blocks to the deleted blocks chain. Would require a lot of random I/O. - Drop all blocks by clearing all index root pointers and all delete chain pointers and resetting key_file_length to the end of the index file header. This requires to recreate all indexes, even those that may still be intact. The second method is probably faster in most cases. When disabling indexes, MySQL disables either all indexes or all non-unique indexes. When MySQL [re-]enables disabled indexes (T_CREATE_MISSING_KEYS), then we either have "lost" blocks in the index file, or there are no non-unique indexes. In the latter case, mi_repair*() would not be called as there would be no disabled indexes. If there would be more unique indexes than disabled (non-unique) indexes, we could do the first method. But this is not implemented yet. By now we drop and recreate all indexes when repair is called. However, there is an exception. Sometimes MySQL disables non-unique indexes when the table is empty (e.g. when copying a table in mysql_alter_table()). When enabling the non-unique indexes, they are still empty. So there is no index block that can be lost. This optimization is implemented in this function. Note that in normal repair (T_CREATE_MISSING_KEYS not set) we recreate all enabled indexes unconditonally. We do not change the key_map. Otherwise we invert the key map temporarily (outside of this function) and recreate the then "seemingly" enabled indexes. When we cannot use the optimization, and drop all indexes, we pretend that all indexes were disabled. By the inversion, we will then recrate all indexes. */ static int mi_drop_all_indexes(MI_CHECK *param, MI_INFO *info, my_bool force) { MYISAM_SHARE *share= info->s; MI_STATE_INFO *state= &share->state; uint i; int error; DBUG_ENTER("mi_drop_all_indexes"); /* If any of the disabled indexes has a key block assigned, we must drop and recreate all indexes to avoid losing index blocks. If we want to recreate disabled indexes only _and_ all of these indexes are empty, we don't need to recreate the existing indexes. */ if (!force && (param->testflag & T_CREATE_MISSING_KEYS)) { DBUG_PRINT("repair", ("creating missing indexes")); for (i= 0; i < share->base.keys; i++) { DBUG_PRINT("repair", ("index #: %u key_root: 0x%lx active: %d", i, (long) state->key_root[i], mi_is_key_active(state->key_map, i))); if ((state->key_root[i] != HA_OFFSET_ERROR) && !mi_is_key_active(state->key_map, i)) { /* This index has at least one key block and it is disabled. We would lose its block(s) if would just recreate it. So we need to drop and recreate all indexes. */ DBUG_PRINT("repair", ("nonempty and disabled: recreate all")); break; } } if (i >= share->base.keys) { /* All of the disabled indexes are empty. We can just recreate them. Flush dirty blocks of this index file from key cache and remove all blocks of this index file from key cache. */ DBUG_PRINT("repair", ("all disabled are empty: create missing")); error= flush_key_blocks(share->key_cache, share->kfile, FLUSH_FORCE_WRITE); goto end; } /* We do now drop all indexes and declare them disabled. With the T_CREATE_MISSING_KEYS flag, mi_repair*() will recreate all disabled indexes and enable them. */ mi_clear_all_keys_active(state->key_map); DBUG_PRINT("repair", ("declared all indexes disabled")); } /* Remove all key blocks of this index file from key cache. */ if ((error= flush_key_blocks(share->key_cache, share->kfile, FLUSH_IGNORE_CHANGED))) goto end; /* purecov: inspected */ /* Clear index root block pointers. */ for (i= 0; i < share->base.keys; i++) state->key_root[i]= HA_OFFSET_ERROR; /* Clear the delete chains. */ for (i= 0; i < state->header.max_block_size_index; i++) state->key_del[i]= HA_OFFSET_ERROR; /* Reset index file length to end of index file header. */ info->state->key_file_length= share->base.keystart; DBUG_PRINT("repair", ("dropped all indexes")); /* error= 0; set by last (error= flush_key_bocks()). */ end: DBUG_RETURN(error); } /* Recover old table by reading each record and writing all keys */ /* Save new datafile-name in temp_filename */ int mi_repair(MI_CHECK *param, register MI_INFO *info, char * name, int rep_quick) { int error,got_error; ha_rows start_records,new_header_length; my_off_t del; File new_file; MYISAM_SHARE *share=info->s; char llbuff[22],llbuff2[22]; SORT_INFO sort_info; MI_SORT_PARAM sort_param; DBUG_ENTER("mi_repair"); bzero((char *)&sort_info, sizeof(sort_info)); bzero((char *)&sort_param, sizeof(sort_param)); start_records=info->state->records; new_header_length=(param->testflag & T_UNPACK) ? 0L : share->pack.header_length; got_error=1; new_file= -1; sort_param.sort_info=&sort_info; if (!(param->testflag & T_SILENT)) { printf("- recovering (with keycache) MyISAM-table '%s'\n",name); printf("Data records: %s\n", llstr(info->state->records,llbuff)); } param->testflag|=T_REP; /* for easy checking */ if (info->s->options & (HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD)) param->testflag|=T_CALC_CHECKSUM; DBUG_ASSERT(param->use_buffers < SIZE_T_MAX); if (!param->using_global_keycache) (void) init_key_cache(dflt_key_cache, param->key_cache_block_size, param->use_buffers, 0, 0); if (init_io_cache(&param->read_cache,info->dfile, (uint) param->read_buffer_length, READ_CACHE,share->pack.header_length,1,MYF(MY_WME))) { bzero(&info->rec_cache,sizeof(info->rec_cache)); goto err; } if (!rep_quick) if (init_io_cache(&info->rec_cache,-1,(uint) param->write_buffer_length, WRITE_CACHE, new_header_length, 1, MYF(MY_WME | MY_WAIT_IF_FULL))) goto err; info->opt_flag|=WRITE_CACHE_USED; if (!mi_alloc_rec_buff(info, -1, &sort_param.record) || !mi_alloc_rec_buff(info, -1, &sort_param.rec_buff)) { mi_check_print_error(param, "Not enough memory for extra record"); goto err; } if (!rep_quick) { /* Get real path for data file */ if ((new_file= mysql_file_create(mi_key_file_datatmp, fn_format(param->temp_filename, share->data_file_name, "", DATA_TMP_EXT, 2+4), 0, param->tmpfile_createflag, MYF(0))) < 0) { mi_check_print_error(param,"Can't create new tempfile: '%s'", param->temp_filename); goto err; } if (new_header_length && filecopy(param,new_file,info->dfile,0L,new_header_length, "datafile-header")) goto err; info->s->state.dellink= HA_OFFSET_ERROR; info->rec_cache.file=new_file; if (param->testflag & T_UNPACK) { share->options&= ~HA_OPTION_COMPRESS_RECORD; mi_int2store(share->state.header.options,share->options); } } sort_info.info=info; sort_info.param = param; sort_param.read_cache=param->read_cache; sort_param.pos=sort_param.max_pos=share->pack.header_length; sort_param.filepos=new_header_length; param->read_cache.end_of_file=sort_info.filelength= mysql_file_seek(info->dfile, 0L, MY_SEEK_END, MYF(0)); sort_info.dupp=0; sort_param.fix_datafile= (my_bool) (! rep_quick); sort_param.master=1; sort_info.max_records= ~(ha_rows) 0; set_data_file_type(&sort_info, share); del=info->state->del; info->state->records=info->state->del=share->state.split=0; info->state->empty=0; param->glob_crc=0; if (param->testflag & T_CALC_CHECKSUM) sort_param.calc_checksum= 1; info->update= (short) (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED); /* This function always recreates all enabled indexes. */ if (param->testflag & T_CREATE_MISSING_KEYS) mi_set_all_keys_active(share->state.key_map, share->base.keys); mi_drop_all_indexes(param, info, TRUE); lock_memory(param); /* Everything is alloced */ /* Re-create all keys, which are set in key_map. */ while (!(error=sort_get_next_record(&sort_param))) { if (writekeys(&sort_param)) { if (my_errno != HA_ERR_FOUND_DUPP_KEY) goto err; DBUG_DUMP("record",(uchar*) sort_param.record,share->base.pack_reclength); mi_check_print_info(param,"Duplicate key %2d for record at %10s against new record at %10s", info->errkey+1, llstr(sort_param.start_recpos,llbuff), llstr(info->dupp_key_pos,llbuff2)); if (param->testflag & T_VERBOSE) { (void) _mi_make_key(info,(uint) info->errkey,info->lastkey, sort_param.record,0L); _mi_print_key(stdout,share->keyinfo[info->errkey].seg,info->lastkey, USE_WHOLE_KEY); } sort_info.dupp++; if ((param->testflag & (T_FORCE_UNIQUENESS|T_QUICK)) == T_QUICK) { param->testflag|=T_RETRY_WITHOUT_QUICK; param->error_printed=1; goto err; } continue; } if (sort_write_record(&sort_param)) goto err; } if (error > 0 || write_data_suffix(&sort_info, (my_bool)!rep_quick) || flush_io_cache(&info->rec_cache) || param->read_cache.error < 0) goto err; if (param->testflag & T_WRITE_LOOP) { (void) fputs(" \r",stdout); (void) fflush(stdout); } if (mysql_file_chsize(share->kfile, info->state->key_file_length, 0, MYF(0))) { mi_check_print_warning(param, "Can't change size of indexfile, error: %d", my_errno); goto err; } if (rep_quick && del+sort_info.dupp != info->state->del) { mi_check_print_error(param,"Couldn't fix table with quick recovery: Found wrong number of deleted records"); mi_check_print_error(param,"Run recovery again without -q"); got_error=1; param->retry_repair=1; param->testflag|=T_RETRY_WITHOUT_QUICK; goto err; } if (param->testflag & T_SAFE_REPAIR) { /* Don't repair if we loosed more than one row */ if (info->state->records+1 < start_records) { info->state->records=start_records; got_error=1; goto err; } } if (!rep_quick) { mysql_file_close(info->dfile, MYF(0)); info->dfile=new_file; info->state->data_file_length=sort_param.filepos; share->state.version=(ulong) time((time_t*) 0); /* Force reopen */ } else { info->state->data_file_length=sort_param.max_pos; } if (param->testflag & T_CALC_CHECKSUM) info->state->checksum=param->glob_crc; if (!(param->testflag & T_SILENT)) { if (start_records != info->state->records) printf("Data records: %s\n", llstr(info->state->records,llbuff)); if (sort_info.dupp) mi_check_print_warning(param, "%s records have been removed", llstr(sort_info.dupp,llbuff)); } got_error=0; /* If invoked by external program that uses thr_lock */ if (&share->state.state != info->state) memcpy( &share->state.state, info->state, sizeof(*info->state)); err: if (!got_error) { /* Replace the actual file with the temporary file */ if (new_file >= 0) { mysql_file_close(new_file, MYF(0)); info->dfile=new_file= -1; /* On Windows, the old data file cannot be deleted if it is either open, or memory mapped. Closing the file won't remove the memory map implicilty on Windows. We closed the data file, but we keep the MyISAM table open. A memory map will be closed on the final mi_close() only. So we need to unmap explicitly here. After renaming the new file under the hook, we couldn't use the map of the old file any more anyway. */ if (info->s->file_map) { (void) my_munmap((char*) info->s->file_map, (size_t) info->s->mmaped_length); info->s->file_map= NULL; } if (change_to_newfile(share->data_file_name, MI_NAME_DEXT, DATA_TMP_EXT, (param->testflag & T_BACKUP_DATA ? MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) || mi_open_datafile(info,share,name,-1)) got_error=1; param->retry_repair= 0; } } if (got_error) { if (! param->error_printed) mi_check_print_error(param,"%d for record at pos %s",my_errno, llstr(sort_param.start_recpos,llbuff)); if (new_file >= 0) { (void) mysql_file_close(new_file, MYF(0)); (void) mysql_file_delete(mi_key_file_datatmp, param->temp_filename, MYF(MY_WME)); info->rec_cache.file=-1; /* don't flush data to new_file, it's closed */ } mi_mark_crashed_on_repair(info); } my_free(mi_get_rec_buff_ptr(info, sort_param.rec_buff)); my_free(mi_get_rec_buff_ptr(info, sort_param.record)); my_free(sort_info.buff); (void) end_io_cache(&param->read_cache); info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); (void) end_io_cache(&info->rec_cache); got_error|=flush_blocks(param, share->key_cache, share->kfile); if (!got_error && param->testflag & T_UNPACK) { share->state.header.options[0]&= (uchar) ~HA_OPTION_COMPRESS_RECORD; share->pack.header_length=0; share->data_file_type=sort_info.new_data_file_type; } share->state.changed|= (STATE_NOT_OPTIMIZED_KEYS | STATE_NOT_SORTED_PAGES | STATE_NOT_ANALYZED); DBUG_RETURN(got_error); } /* Uppate keyfile when doing repair */ static int writekeys(MI_SORT_PARAM *sort_param) { register uint i; uchar *key; MI_INFO *info= sort_param->sort_info->info; uchar *buff= sort_param->record; my_off_t filepos= sort_param->filepos; DBUG_ENTER("writekeys"); key=info->lastkey+info->s->base.max_key_length; for (i=0 ; i < info->s->base.keys ; i++) { if (mi_is_key_active(info->s->state.key_map, i)) { if (info->s->keyinfo[i].flag & HA_FULLTEXT ) { if (_mi_ft_add(info, i, key, buff, filepos)) goto err; } #ifdef HAVE_SPATIAL else if (info->s->keyinfo[i].flag & HA_SPATIAL) { uint key_length=_mi_make_key(info,i,key,buff,filepos); if (rtree_insert(info, i, key, key_length)) goto err; } #endif /*HAVE_SPATIAL*/ else { uint key_length=_mi_make_key(info,i,key,buff,filepos); if (_mi_ck_write(info,i,key,key_length)) goto err; } } } DBUG_RETURN(0); err: if (my_errno == HA_ERR_FOUND_DUPP_KEY) { info->errkey=(int) i; /* This key was found */ while ( i-- > 0 ) { if (mi_is_key_active(info->s->state.key_map, i)) { if (info->s->keyinfo[i].flag & HA_FULLTEXT) { if (_mi_ft_del(info,i, key,buff,filepos)) break; } else { uint key_length=_mi_make_key(info,i,key,buff,filepos); if (_mi_ck_delete(info,i,key,key_length)) break; } } } } /* Remove checksum that was added to glob_crc in sort_get_next_record */ if (sort_param->calc_checksum) sort_param->sort_info->param->glob_crc-= info->checksum; DBUG_PRINT("error",("errno: %d",my_errno)); DBUG_RETURN(-1); } /* writekeys */ /* Change all key-pointers that points to a records */ int movepoint(register MI_INFO *info, uchar *record, my_off_t oldpos, my_off_t newpos, uint prot_key) { register uint i; uchar *key; uint key_length; DBUG_ENTER("movepoint"); key=info->lastkey+info->s->base.max_key_length; for (i=0 ; i < info->s->base.keys; i++) { if (i != prot_key && mi_is_key_active(info->s->state.key_map, i)) { key_length=_mi_make_key(info,i,key,record,oldpos); if (info->s->keyinfo[i].flag & HA_NOSAME) { /* Change pointer direct */ uint nod_flag; MI_KEYDEF *keyinfo; keyinfo=info->s->keyinfo+i; if (_mi_search(info,keyinfo,key,USE_WHOLE_KEY, (uint) (SEARCH_SAME | SEARCH_SAVE_BUFF), info->s->state.key_root[i])) DBUG_RETURN(-1); nod_flag=mi_test_if_nod(info->buff); _mi_dpointer(info,info->int_keypos-nod_flag- info->s->rec_reflength,newpos); if (_mi_write_keypage(info,keyinfo,info->last_keypage, DFLT_INIT_HITS,info->buff)) DBUG_RETURN(-1); } else { /* Change old key to new */ if (_mi_ck_delete(info,i,key,key_length)) DBUG_RETURN(-1); key_length=_mi_make_key(info,i,key,record,newpos); if (_mi_ck_write(info,i,key,key_length)) DBUG_RETURN(-1); } } } DBUG_RETURN(0); } /* movepoint */ /* Tell system that we want all memory for our cache */ void lock_memory(MI_CHECK *param __attribute__((unused))) { #ifdef SUN_OS /* Key-cacheing thrases on sun 4.1 */ if (param->opt_lock_memory) { int success = mlockall(MCL_CURRENT); /* or plock(DATLOCK); */ if (geteuid() == 0 && success != 0) mi_check_print_warning(param, "Failed to lock memory. errno %d",my_errno); } #endif } /* lock_memory */ /* Flush all changed blocks to disk */ int flush_blocks(MI_CHECK *param, KEY_CACHE *key_cache, File file) { if (flush_key_blocks(key_cache, file, FLUSH_RELEASE)) { mi_check_print_error(param,"%d when trying to write bufferts",my_errno); return(1); } if (!param->using_global_keycache) end_key_cache(key_cache,1); return 0; } /* flush_blocks */ /* Sort index for more efficent reads */ int mi_sort_index(MI_CHECK *param, register MI_INFO *info, char * name) { reg2 uint key; reg1 MI_KEYDEF *keyinfo; File new_file; my_off_t index_pos[HA_MAX_POSSIBLE_KEY]; uint r_locks,w_locks; int old_lock; MYISAM_SHARE *share=info->s; MI_STATE_INFO old_state; DBUG_ENTER("mi_sort_index"); /* cannot sort index files with R-tree indexes */ for (key= 0,keyinfo= &share->keyinfo[0]; key < share->base.keys ; key++,keyinfo++) if (keyinfo->key_alg == HA_KEY_ALG_RTREE) DBUG_RETURN(0); if (!(param->testflag & T_SILENT)) printf("- Sorting index for MyISAM-table '%s'\n",name); /* Get real path for index file */ fn_format(param->temp_filename,name,"", MI_NAME_IEXT,2+4+32); if ((new_file= mysql_file_create(mi_key_file_datatmp, fn_format(param->temp_filename, param->temp_filename, "", INDEX_TMP_EXT, 2+4), 0, param->tmpfile_createflag, MYF(0))) <= 0) { mi_check_print_error(param,"Can't create new tempfile: '%s'", param->temp_filename); DBUG_RETURN(-1); } if (filecopy(param, new_file,share->kfile,0L, (ulong) share->base.keystart, "headerblock")) goto err; param->new_file_pos=share->base.keystart; for (key= 0,keyinfo= &share->keyinfo[0]; key < share->base.keys ; key++,keyinfo++) { if (! mi_is_key_active(info->s->state.key_map, key)) continue; if (share->state.key_root[key] != HA_OFFSET_ERROR) { index_pos[key]=param->new_file_pos; /* Write first block here */ if (sort_one_index(param,info,keyinfo,share->state.key_root[key], new_file)) goto err; } else index_pos[key]= HA_OFFSET_ERROR; /* No blocks */ } /* Flush key cache for this file if we are calling this outside myisamchk */ flush_key_blocks(share->key_cache,share->kfile, FLUSH_IGNORE_CHANGED); share->state.version=(ulong) time((time_t*) 0); old_state= share->state; /* save state if not stored */ r_locks= share->r_locks; w_locks= share->w_locks; old_lock= info->lock_type; /* Put same locks as old file */ share->r_locks= share->w_locks= share->tot_locks= 0; (void) _mi_writeinfo(info,WRITEINFO_UPDATE_KEYFILE); (void) mysql_file_close(share->kfile, MYF(MY_WME)); share->kfile = -1; (void) mysql_file_close(new_file, MYF(MY_WME)); if (change_to_newfile(share->index_file_name, MI_NAME_IEXT, INDEX_TMP_EXT, MYF(0)) || mi_open_keyfile(share)) goto err2; info->lock_type= F_UNLCK; /* Force mi_readinfo to lock */ _mi_readinfo(info,F_WRLCK,0); /* Will lock the table */ info->lock_type= old_lock; share->r_locks= r_locks; share->w_locks= w_locks; share->tot_locks= r_locks+w_locks; share->state= old_state; /* Restore old state */ info->state->key_file_length=param->new_file_pos; info->update= (short) (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED); for (key=0 ; key < info->s->base.keys ; key++) info->s->state.key_root[key]=index_pos[key]; for (key=0 ; key < info->s->state.header.max_block_size_index ; key++) info->s->state.key_del[key]= HA_OFFSET_ERROR; info->s->state.changed&= ~STATE_NOT_SORTED_PAGES; DBUG_RETURN(0); err: (void) mysql_file_close(new_file, MYF(MY_WME)); err2: (void) mysql_file_delete(mi_key_file_datatmp, param->temp_filename, MYF(MY_WME)); DBUG_RETURN(-1); } /* mi_sort_index */ /* Sort records recursive using one index */ static int sort_one_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo, my_off_t pagepos, File new_file) { uint length,nod_flag,used_length, key_length; uchar *buff,*keypos,*endpos; uchar key[HA_MAX_POSSIBLE_KEY_BUFF]; my_off_t new_page_pos,next_page; char llbuff[22]; DBUG_ENTER("sort_one_index"); /* cannot walk over R-tree indices */ DBUG_ASSERT(keyinfo->key_alg != HA_KEY_ALG_RTREE); new_page_pos=param->new_file_pos; param->new_file_pos+=keyinfo->block_length; if (!(buff=(uchar*) my_alloca((uint) keyinfo->block_length))) { mi_check_print_error(param,"Not enough memory for key block"); DBUG_RETURN(-1); } if (!_mi_fetch_keypage(info,keyinfo,pagepos,DFLT_INIT_HITS,buff,0)) { mi_check_print_error(param,"Can't read key block from filepos: %s", llstr(pagepos,llbuff)); goto err; } if ((nod_flag=mi_test_if_nod(buff)) || keyinfo->flag & HA_FULLTEXT) { used_length=mi_getint(buff); keypos=buff+2+nod_flag; endpos=buff+used_length; for ( ;; ) { if (nod_flag) { next_page=_mi_kpos(nod_flag,keypos); _mi_kpointer(info,keypos-nod_flag,param->new_file_pos); /* Save new pos */ if (sort_one_index(param,info,keyinfo,next_page,new_file)) { DBUG_PRINT("error", ("From page: %ld, keyoffset: %lu used_length: %d", (ulong) pagepos, (ulong) (keypos - buff), (int) used_length)); DBUG_DUMP("buff",(uchar*) buff,used_length); goto err; } } if (keypos >= endpos || (key_length=(*keyinfo->get_key)(keyinfo,nod_flag,&keypos,key)) == 0) break; DBUG_ASSERT(keypos <= endpos); if (keyinfo->flag & HA_FULLTEXT) { uint off; int subkeys; get_key_full_length_rdonly(off, key); subkeys=ft_sintXkorr(key+off); if (subkeys < 0) { next_page= _mi_dpos(info,0,key+key_length); _mi_dpointer(info,keypos-nod_flag-info->s->rec_reflength, param->new_file_pos); /* Save new pos */ if (sort_one_index(param,info,&info->s->ft2_keyinfo, next_page,new_file)) goto err; } } } } /* Fill block with zero and write it to the new index file */ length=mi_getint(buff); bzero((uchar*) buff+length,keyinfo->block_length-length); if (mysql_file_pwrite(new_file, (uchar*) buff, (uint) keyinfo->block_length, new_page_pos, MYF(MY_NABP | MY_WAIT_IF_FULL))) { mi_check_print_error(param,"Can't write indexblock, error: %d",my_errno); goto err; } my_afree((uchar*) buff); DBUG_RETURN(0); err: my_afree((uchar*) buff); DBUG_RETURN(1); } /* sort_one_index */ /* Let temporary file replace old file. This assumes that the new file was created in the same directory as given by realpath(filename). This will ensure that any symlinks that are used will still work. Copy stats from old file to new file, deletes orignal and changes new file name to old file name */ int change_to_newfile(const char * filename, const char * old_ext, const char * new_ext, myf MyFlags) { char old_filename[FN_REFLEN],new_filename[FN_REFLEN]; /* Get real path to filename */ (void) fn_format(old_filename,filename,"",old_ext,2+4+32); return my_redel(old_filename, fn_format(new_filename,old_filename,"",new_ext,2+4), MYF(MY_WME | MY_LINK_WARNING | MyFlags)); } /* change_to_newfile */ /* Locks a whole file */ /* Gives an error-message if file can't be locked */ int lock_file(MI_CHECK *param, File file, my_off_t start, int lock_type, const char *filetype, const char *filename) { if (my_lock(file,lock_type,start,F_TO_EOF, param->testflag & T_WAIT_FOREVER ? MYF(MY_SEEK_NOT_DONE) : MYF(MY_SEEK_NOT_DONE | MY_DONT_WAIT))) { mi_check_print_error(param," %d when locking %s '%s'",my_errno,filetype,filename); param->error_printed=2; /* Don't give that data is crashed */ return 1; } return 0; } /* lock_file */ /* Copy a block between two files */ int filecopy(MI_CHECK *param, File to,File from,my_off_t start, my_off_t length, const char *type) { char tmp_buff[IO_SIZE],*buff; ulong buff_length; DBUG_ENTER("filecopy"); buff_length=(ulong) min(param->write_buffer_length,length); if (!(buff=my_malloc(buff_length,MYF(0)))) { buff=tmp_buff; buff_length=IO_SIZE; } mysql_file_seek(from, start, MY_SEEK_SET, MYF(0)); while (length > buff_length) { if (mysql_file_read(from, (uchar*) buff, buff_length, MYF(MY_NABP)) || mysql_file_write(to, (uchar*) buff, buff_length, param->myf_rw)) goto err; length-= buff_length; } if (mysql_file_read(from, (uchar*) buff, (uint) length, MYF(MY_NABP)) || mysql_file_write(to, (uchar*) buff, (uint) length, param->myf_rw)) goto err; if (buff != tmp_buff) my_free(buff); DBUG_RETURN(0); err: if (buff != tmp_buff) my_free(buff); mi_check_print_error(param,"Can't copy %s to tempfile, error %d", type,my_errno); DBUG_RETURN(1); } /* Repair table or given index using sorting SYNOPSIS mi_repair_by_sort() param Repair parameters info MyISAM handler to repair name Name of table (for warnings) rep_quick set to <> 0 if we should not change data file RESULT 0 ok <>0 Error */ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info, const char * name, int rep_quick) { int got_error; uint i; ulong length; ha_rows start_records; my_off_t new_header_length,del; File new_file; MI_SORT_PARAM sort_param; MYISAM_SHARE *share=info->s; HA_KEYSEG *keyseg; ulong *rec_per_key_part; char llbuff[22]; SORT_INFO sort_info; ulonglong UNINIT_VAR(key_map); DBUG_ENTER("mi_repair_by_sort"); start_records=info->state->records; got_error=1; new_file= -1; new_header_length=(param->testflag & T_UNPACK) ? 0 : share->pack.header_length; if (!(param->testflag & T_SILENT)) { printf("- recovering (with sort) MyISAM-table '%s'\n",name); printf("Data records: %s\n", llstr(start_records,llbuff)); } param->testflag|=T_REP; /* for easy checking */ if (info->s->options & (HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD)) param->testflag|=T_CALC_CHECKSUM; bzero((char*)&sort_info,sizeof(sort_info)); bzero((char *)&sort_param, sizeof(sort_param)); if (!(sort_info.key_block= alloc_key_blocks(param, (uint) param->sort_key_blocks, share->base.max_key_block_length)) || init_io_cache(&param->read_cache,info->dfile, (uint) param->read_buffer_length, READ_CACHE,share->pack.header_length,1,MYF(MY_WME)) || (! rep_quick && init_io_cache(&info->rec_cache,info->dfile, (uint) param->write_buffer_length, WRITE_CACHE,new_header_length,1, MYF(MY_WME | MY_WAIT_IF_FULL) & param->myf_rw))) goto err; sort_info.key_block_end=sort_info.key_block+param->sort_key_blocks; info->opt_flag|=WRITE_CACHE_USED; info->rec_cache.file=info->dfile; /* for sort_delete_record */ if (!mi_alloc_rec_buff(info, -1, &sort_param.record) || !mi_alloc_rec_buff(info, -1, &sort_param.rec_buff)) { mi_check_print_error(param, "Not enough memory for extra record"); goto err; } if (!rep_quick) { /* Get real path for data file */ if ((new_file= mysql_file_create(mi_key_file_datatmp, fn_format(param->temp_filename, share->data_file_name, "", DATA_TMP_EXT, 2+4), 0, param->tmpfile_createflag, MYF(0))) < 0) { mi_check_print_error(param,"Can't create new tempfile: '%s'", param->temp_filename); goto err; } if (new_header_length && filecopy(param, new_file,info->dfile,0L,new_header_length, "datafile-header")) goto err; if (param->testflag & T_UNPACK) { share->options&= ~HA_OPTION_COMPRESS_RECORD; mi_int2store(share->state.header.options,share->options); } share->state.dellink= HA_OFFSET_ERROR; info->rec_cache.file=new_file; } info->update= (short) (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED); /* Optionally drop indexes and optionally modify the key_map. */ mi_drop_all_indexes(param, info, FALSE); key_map= share->state.key_map; if (param->testflag & T_CREATE_MISSING_KEYS) { /* Invert the copied key_map to recreate all disabled indexes. */ key_map= ~key_map; } sort_info.info=info; sort_info.param = param; set_data_file_type(&sort_info, share); sort_param.filepos=new_header_length; sort_info.dupp=0; sort_info.buff=0; param->read_cache.end_of_file=sort_info.filelength= mysql_file_seek(param->read_cache.file, 0L, MY_SEEK_END, MYF(0)); sort_param.wordlist=NULL; init_alloc_root(&sort_param.wordroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0); if (share->data_file_type == DYNAMIC_RECORD) length=max(share->base.min_pack_length+1,share->base.min_block_length); else if (share->data_file_type == COMPRESSED_RECORD) length=share->base.min_block_length; else length=share->base.pack_reclength; sort_info.max_records= ((param->testflag & T_CREATE_MISSING_KEYS) ? info->state->records : (ha_rows) (sort_info.filelength/length+1)); sort_param.key_cmp=sort_key_cmp; sort_param.lock_in_memory=lock_memory; sort_param.tmpdir=param->tmpdir; sort_param.sort_info=&sort_info; sort_param.fix_datafile= (my_bool) (! rep_quick); sort_param.master =1; del=info->state->del; param->glob_crc=0; if (param->testflag & T_CALC_CHECKSUM) sort_param.calc_checksum= 1; rec_per_key_part= param->rec_per_key_part; for (sort_param.key=0 ; sort_param.key < share->base.keys ; rec_per_key_part+=sort_param.keyinfo->keysegs, sort_param.key++) { sort_param.read_cache=param->read_cache; sort_param.keyinfo=share->keyinfo+sort_param.key; sort_param.seg=sort_param.keyinfo->seg; /* Skip this index if it is marked disabled in the copied (and possibly inverted) key_map. */ if (! mi_is_key_active(key_map, sort_param.key)) { /* Remember old statistics for key */ memcpy((char*) rec_per_key_part, (char*) (share->state.rec_per_key_part + (uint) (rec_per_key_part - param->rec_per_key_part)), sort_param.keyinfo->keysegs*sizeof(*rec_per_key_part)); DBUG_PRINT("repair", ("skipping seemingly disabled index #: %u", sort_param.key)); continue; } if ((!(param->testflag & T_SILENT))) printf ("- Fixing index %d\n",sort_param.key+1); sort_param.max_pos=sort_param.pos=share->pack.header_length; keyseg=sort_param.seg; bzero((char*) sort_param.unique,sizeof(sort_param.unique)); sort_param.key_length=share->rec_reflength; for (i=0 ; keyseg[i].type != HA_KEYTYPE_END; i++) { sort_param.key_length+=keyseg[i].length; if (keyseg[i].flag & HA_SPACE_PACK) sort_param.key_length+=get_pack_length(keyseg[i].length); if (keyseg[i].flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART)) sort_param.key_length+=2 + test(keyseg[i].length >= 127); if (keyseg[i].flag & HA_NULL_PART) sort_param.key_length++; } info->state->records=info->state->del=share->state.split=0; info->state->empty=0; if (sort_param.keyinfo->flag & HA_FULLTEXT) { uint ft_max_word_len_for_sort=FT_MAX_WORD_LEN_FOR_SORT* sort_param.keyinfo->seg->charset->mbmaxlen; sort_param.key_length+=ft_max_word_len_for_sort-HA_FT_MAXBYTELEN; /* fulltext indexes may have much more entries than the number of rows in the table. We estimate the number here. */ if (sort_param.keyinfo->parser == &ft_default_parser) { /* for built-in parser the number of generated index entries cannot be larger than the size of the data file divided by the minimal word's length */ sort_info.max_records= (ha_rows) (sort_info.filelength/ft_min_word_len+1); } else { /* for external plugin parser we cannot tell anything at all :( so, we'll use all the sort memory and start from ~10 buffpeks. (see _create_index_by_sort) */ sort_info.max_records= 10 * max(param->sort_buffer_length, MIN_SORT_BUFFER) / sort_param.key_length; } sort_param.key_read=sort_ft_key_read; sort_param.key_write=sort_ft_key_write; } else { sort_param.key_read=sort_key_read; sort_param.key_write=sort_key_write; } if (_create_index_by_sort(&sort_param, (my_bool) (!(param->testflag & T_VERBOSE)), param->sort_buffer_length)) { param->retry_repair=1; goto err; } /* No need to calculate checksum again. */ sort_param.calc_checksum= 0; free_root(&sort_param.wordroot, MYF(0)); /* Set for next loop */ sort_info.max_records= (ha_rows) info->state->records; if (param->testflag & T_STATISTICS) update_key_parts(sort_param.keyinfo, rec_per_key_part, sort_param.unique, param->stats_method == MI_STATS_METHOD_IGNORE_NULLS? sort_param.notnull: NULL, (ulonglong) info->state->records); /* Enable this index in the permanent (not the copied) key_map. */ mi_set_key_active(share->state.key_map, sort_param.key); DBUG_PRINT("repair", ("set enabled index #: %u", sort_param.key)); if (sort_param.fix_datafile) { param->read_cache.end_of_file=sort_param.filepos; if (write_data_suffix(&sort_info,1) || end_io_cache(&info->rec_cache)) goto err; if (param->testflag & T_SAFE_REPAIR) { /* Don't repair if we loosed more than one row */ if (info->state->records+1 < start_records) { info->state->records=start_records; goto err; } } share->state.state.data_file_length = info->state->data_file_length= sort_param.filepos; /* Only whole records */ share->state.version=(ulong) time((time_t*) 0); mysql_file_close(info->dfile, MYF(0)); info->dfile=new_file; share->data_file_type=sort_info.new_data_file_type; share->pack.header_length=(ulong) new_header_length; sort_param.fix_datafile=0; } else info->state->data_file_length=sort_param.max_pos; param->read_cache.file=info->dfile; /* re-init read cache */ reinit_io_cache(&param->read_cache,READ_CACHE,share->pack.header_length, 1,1); } if (param->testflag & T_WRITE_LOOP) { (void) fputs(" \r",stdout); (void) fflush(stdout); } if (rep_quick && del+sort_info.dupp != info->state->del) { mi_check_print_error(param,"Couldn't fix table with quick recovery: Found wrong number of deleted records"); mi_check_print_error(param,"Run recovery again without -q"); got_error=1; param->retry_repair=1; param->testflag|=T_RETRY_WITHOUT_QUICK; goto err; } if (rep_quick & T_FORCE_UNIQUENESS) { my_off_t skr=info->state->data_file_length+ (share->options & HA_OPTION_COMPRESS_RECORD ? MEMMAP_EXTRA_MARGIN : 0); #ifdef USE_RELOC if (share->data_file_type == STATIC_RECORD && skr < share->base.reloc*share->base.min_pack_length) skr=share->base.reloc*share->base.min_pack_length; #endif if (skr != sort_info.filelength) if (mysql_file_chsize(info->dfile, skr, 0, MYF(0))) mi_check_print_warning(param, "Can't change size of datafile, error: %d", my_errno); } if (param->testflag & T_CALC_CHECKSUM) info->state->checksum=param->glob_crc; if (mysql_file_chsize(share->kfile, info->state->key_file_length, 0, MYF(0))) mi_check_print_warning(param, "Can't change size of indexfile, error: %d", my_errno); if (!(param->testflag & T_SILENT)) { if (start_records != info->state->records) printf("Data records: %s\n", llstr(info->state->records,llbuff)); if (sort_info.dupp) mi_check_print_warning(param, "%s records have been removed", llstr(sort_info.dupp,llbuff)); } got_error=0; if (&share->state.state != info->state) memcpy( &share->state.state, info->state, sizeof(*info->state)); err: got_error|= flush_blocks(param, share->key_cache, share->kfile); (void) end_io_cache(&info->rec_cache); if (!got_error) { /* Replace the actual file with the temporary file */ if (new_file >= 0) { mysql_file_close(new_file, MYF(0)); info->dfile=new_file= -1; if (change_to_newfile(share->data_file_name,MI_NAME_DEXT, DATA_TMP_EXT, (param->testflag & T_BACKUP_DATA ? MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) || mi_open_datafile(info,share,name,-1)) got_error=1; } } if (got_error) { if (! param->error_printed) mi_check_print_error(param,"%d when fixing table",my_errno); if (new_file >= 0) { (void) mysql_file_close(new_file, MYF(0)); (void) mysql_file_delete(mi_key_file_datatmp, param->temp_filename, MYF(MY_WME)); if (info->dfile == new_file) /* Retry with key cache */ if (unlikely(mi_open_datafile(info, share, name, -1))) param->retry_repair= 0; /* Safety */ } mi_mark_crashed_on_repair(info); } else if (key_map == share->state.key_map) share->state.changed&= ~STATE_NOT_OPTIMIZED_KEYS; share->state.changed|=STATE_NOT_SORTED_PAGES; my_free(mi_get_rec_buff_ptr(info, sort_param.rec_buff)); my_free(mi_get_rec_buff_ptr(info, sort_param.record)); my_free(sort_info.key_block); my_free(sort_info.ft_buf); my_free(sort_info.buff); (void) end_io_cache(&param->read_cache); info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); if (!got_error && (param->testflag & T_UNPACK)) { share->state.header.options[0]&= (uchar) ~HA_OPTION_COMPRESS_RECORD; share->pack.header_length=0; } DBUG_RETURN(got_error); } /* Threaded repair of table using sorting SYNOPSIS mi_repair_parallel() param Repair parameters info MyISAM handler to repair name Name of table (for warnings) rep_quick set to <> 0 if we should not change data file DESCRIPTION Same as mi_repair_by_sort but do it multithreaded Each key is handled by a separate thread. TODO: make a number of threads a parameter In parallel repair we use one thread per index. There are two modes: Quick Only the indexes are rebuilt. All threads share a read buffer. Every thread that needs fresh data in the buffer enters the shared cache lock. The last thread joining the lock reads the buffer from the data file and wakes all other threads. Non-quick The data file is rebuilt and all indexes are rebuilt to point to the new record positions. One thread is the master thread. It reads from the old data file and writes to the new data file. It also creates one of the indexes. The other threads read from a buffer which is filled by the master. If they need fresh data, they enter the shared cache lock. If the masters write buffer is full, it flushes it to the new data file and enters the shared cache lock too. When all threads joined in the lock, the master copies its write buffer to the read buffer for the other threads and wakes them. RESULT 0 ok <>0 Error */ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info, const char * name, int rep_quick) { int got_error; uint i,key, total_key_length, istep; ulong rec_length; ha_rows start_records; my_off_t new_header_length,del; File new_file; MI_SORT_PARAM *sort_param=0; MYISAM_SHARE *share=info->s; ulong *rec_per_key_part; HA_KEYSEG *keyseg; char llbuff[22]; IO_CACHE new_data_cache; /* For non-quick repair. */ IO_CACHE_SHARE io_share; SORT_INFO sort_info; ulonglong UNINIT_VAR(key_map); pthread_attr_t thr_attr; ulong max_pack_reclength; int error; DBUG_ENTER("mi_repair_parallel"); start_records=info->state->records; got_error=1; new_file= -1; new_header_length=(param->testflag & T_UNPACK) ? 0 : share->pack.header_length; if (!(param->testflag & T_SILENT)) { printf("- parallel recovering (with sort) MyISAM-table '%s'\n",name); printf("Data records: %s\n", llstr(start_records,llbuff)); } param->testflag|=T_REP; /* for easy checking */ if (info->s->options & (HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD)) param->testflag|=T_CALC_CHECKSUM; /* Quick repair (not touching data file, rebuilding indexes): { Read cache is (MI_CHECK *param)->read_cache using info->dfile. } Non-quick repair (rebuilding data file and indexes): { Master thread: Read cache is (MI_CHECK *param)->read_cache using info->dfile. Write cache is (MI_INFO *info)->rec_cache using new_file. Slave threads: Read cache is new_data_cache synced to master rec_cache. The final assignment of the filedescriptor for rec_cache is done after the cache creation. Don't check file size on new_data_cache, as the resulting file size is not known yet. As rec_cache and new_data_cache are synced, write_buffer_length is used for the read cache 'new_data_cache'. Both start at the same position 'new_header_length'. } */ DBUG_PRINT("info", ("is quick repair: %d", rep_quick)); bzero((char*)&sort_info,sizeof(sort_info)); /* Initialize pthread structures before goto err. */ mysql_mutex_init(mi_key_mutex_MI_SORT_INFO_mutex, &sort_info.mutex, MY_MUTEX_INIT_FAST); mysql_cond_init(mi_key_cond_MI_SORT_INFO_cond, &sort_info.cond, 0); mysql_mutex_init(mi_key_mutex_MI_CHECK_print_msg, &param->print_msg_mutex, MY_MUTEX_INIT_FAST); param->need_print_msg_lock= 1; if (!(sort_info.key_block= alloc_key_blocks(param, (uint) param->sort_key_blocks, share->base.max_key_block_length)) || init_io_cache(&param->read_cache, info->dfile, (uint) param->read_buffer_length, READ_CACHE, share->pack.header_length, 1, MYF(MY_WME)) || (!rep_quick && (init_io_cache(&info->rec_cache, info->dfile, (uint) param->write_buffer_length, WRITE_CACHE, new_header_length, 1, MYF(MY_WME | MY_WAIT_IF_FULL) & param->myf_rw) || init_io_cache(&new_data_cache, -1, (uint) param->write_buffer_length, READ_CACHE, new_header_length, 1, MYF(MY_WME | MY_DONT_CHECK_FILESIZE))))) goto err; sort_info.key_block_end=sort_info.key_block+param->sort_key_blocks; info->opt_flag|=WRITE_CACHE_USED; info->rec_cache.file=info->dfile; /* for sort_delete_record */ if (!rep_quick) { /* Get real path for data file */ if ((new_file= mysql_file_create(mi_key_file_datatmp, fn_format(param->temp_filename, share->data_file_name, "", DATA_TMP_EXT, 2+4), 0, param->tmpfile_createflag, MYF(0))) < 0) { mi_check_print_error(param,"Can't create new tempfile: '%s'", param->temp_filename); goto err; } if (new_header_length && filecopy(param, new_file,info->dfile,0L,new_header_length, "datafile-header")) goto err; if (param->testflag & T_UNPACK) { share->options&= ~HA_OPTION_COMPRESS_RECORD; mi_int2store(share->state.header.options,share->options); } share->state.dellink= HA_OFFSET_ERROR; info->rec_cache.file=new_file; } info->update= (short) (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED); /* Optionally drop indexes and optionally modify the key_map. */ mi_drop_all_indexes(param, info, FALSE); key_map= share->state.key_map; if (param->testflag & T_CREATE_MISSING_KEYS) { /* Invert the copied key_map to recreate all disabled indexes. */ key_map= ~key_map; } sort_info.info=info; sort_info.param = param; set_data_file_type(&sort_info, share); sort_info.dupp=0; sort_info.buff=0; param->read_cache.end_of_file=sort_info.filelength= mysql_file_seek(param->read_cache.file, 0L, MY_SEEK_END, MYF(0)); if (share->data_file_type == DYNAMIC_RECORD) rec_length=max(share->base.min_pack_length+1,share->base.min_block_length); else if (share->data_file_type == COMPRESSED_RECORD) rec_length=share->base.min_block_length; else rec_length=share->base.pack_reclength; /* +1 below is required hack for parallel repair mode. The info->state->records value, that is compared later to sort_info.max_records and cannot exceed it, is increased in sort_key_write. In mi_repair_by_sort, sort_key_write is called after sort_key_read, where the comparison is performed, but in parallel mode master thread can call sort_key_write before some other repair thread calls sort_key_read. Furthermore I'm not even sure +1 would be enough. May be sort_info.max_records shold be always set to max value in parallel mode. */ sort_info.max_records= ((param->testflag & T_CREATE_MISSING_KEYS) ? info->state->records + 1: (ha_rows) (sort_info.filelength/rec_length+1)); del=info->state->del; param->glob_crc=0; /* for compressed tables */ max_pack_reclength= share->base.pack_reclength; if (share->options & HA_OPTION_COMPRESS_RECORD) set_if_bigger(max_pack_reclength, share->max_pack_length); if (!(sort_param=(MI_SORT_PARAM *) my_malloc((uint) share->base.keys * (sizeof(MI_SORT_PARAM) + max_pack_reclength), MYF(MY_ZEROFILL)))) { mi_check_print_error(param,"Not enough memory for key!"); goto err; } total_key_length=0; rec_per_key_part= param->rec_per_key_part; info->state->records=info->state->del=share->state.split=0; info->state->empty=0; for (i=key=0, istep=1 ; key < share->base.keys ; rec_per_key_part+=sort_param[i].keyinfo->keysegs, i+=istep, key++) { sort_param[i].key=key; sort_param[i].keyinfo=share->keyinfo+key; sort_param[i].seg=sort_param[i].keyinfo->seg; /* Skip this index if it is marked disabled in the copied (and possibly inverted) key_map. */ if (! mi_is_key_active(key_map, key)) { /* Remember old statistics for key */ memcpy((char*) rec_per_key_part, (char*) (share->state.rec_per_key_part+ (uint) (rec_per_key_part - param->rec_per_key_part)), sort_param[i].keyinfo->keysegs*sizeof(*rec_per_key_part)); istep=0; continue; } istep=1; if ((!(param->testflag & T_SILENT))) printf ("- Fixing index %d\n",key+1); if (sort_param[i].keyinfo->flag & HA_FULLTEXT) { sort_param[i].key_read=sort_ft_key_read; sort_param[i].key_write=sort_ft_key_write; } else { sort_param[i].key_read=sort_key_read; sort_param[i].key_write=sort_key_write; } sort_param[i].key_cmp=sort_key_cmp; sort_param[i].lock_in_memory=lock_memory; sort_param[i].tmpdir=param->tmpdir; sort_param[i].sort_info=&sort_info; sort_param[i].master=0; sort_param[i].fix_datafile=0; sort_param[i].calc_checksum= 0; sort_param[i].filepos=new_header_length; sort_param[i].max_pos=sort_param[i].pos=share->pack.header_length; sort_param[i].record= (((uchar *)(sort_param+share->base.keys))+ (max_pack_reclength * i)); if (!mi_alloc_rec_buff(info, -1, &sort_param[i].rec_buff)) { mi_check_print_error(param,"Not enough memory!"); goto err; } sort_param[i].key_length=share->rec_reflength; for (keyseg=sort_param[i].seg; keyseg->type != HA_KEYTYPE_END; keyseg++) { sort_param[i].key_length+=keyseg->length; if (keyseg->flag & HA_SPACE_PACK) sort_param[i].key_length+=get_pack_length(keyseg->length); if (keyseg->flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART)) sort_param[i].key_length+=2 + test(keyseg->length >= 127); if (keyseg->flag & HA_NULL_PART) sort_param[i].key_length++; } total_key_length+=sort_param[i].key_length; if (sort_param[i].keyinfo->flag & HA_FULLTEXT) { uint ft_max_word_len_for_sort=FT_MAX_WORD_LEN_FOR_SORT* sort_param[i].keyinfo->seg->charset->mbmaxlen; sort_param[i].key_length+=ft_max_word_len_for_sort-HA_FT_MAXBYTELEN; init_alloc_root(&sort_param[i].wordroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0); } } sort_info.total_keys=i; sort_param[0].master= 1; sort_param[0].fix_datafile= (my_bool)(! rep_quick); sort_param[0].calc_checksum= test(param->testflag & T_CALC_CHECKSUM); if (!ftparser_alloc_param(info)) goto err; sort_info.got_error=0; mysql_mutex_lock(&sort_info.mutex); /* Initialize the I/O cache share for use with the read caches and, in case of non-quick repair, the write cache. When all threads join on the cache lock, the writer copies the write cache contents to the read caches. */ if (i > 1) { if (rep_quick) init_io_cache_share(&param->read_cache, &io_share, NULL, i); else init_io_cache_share(&new_data_cache, &io_share, &info->rec_cache, i); } else io_share.total_threads= 0; /* share not used */ (void) pthread_attr_init(&thr_attr); (void) pthread_attr_setdetachstate(&thr_attr,PTHREAD_CREATE_DETACHED); for (i=0 ; i < sort_info.total_keys ; i++) { /* Copy the properly initialized IO_CACHE structure so that every thread has its own copy. In quick mode param->read_cache is shared for use by all threads. In non-quick mode all threads but the first copy the shared new_data_cache, which is synchronized to the write cache of the first thread. The first thread copies param->read_cache, which is not shared. */ sort_param[i].read_cache= ((rep_quick || !i) ? param->read_cache : new_data_cache); DBUG_PRINT("io_cache_share", ("thread: %u read_cache: 0x%lx", i, (long) &sort_param[i].read_cache)); /* two approaches: the same amount of memory for each thread or the memory for the same number of keys for each thread... In the second one all the threads will fill their sort_buffers (and call write_keys) at the same time, putting more stress on i/o. */ sort_param[i].sortbuff_size= #ifndef USING_SECOND_APPROACH param->sort_buffer_length/sort_info.total_keys; #else param->sort_buffer_length*sort_param[i].key_length/total_key_length; #endif if ((error= mysql_thread_create(mi_key_thread_find_all_keys, &sort_param[i].thr, &thr_attr, thr_find_all_keys, (void *) (sort_param+i)))) { mi_check_print_error(param,"Cannot start a repair thread (errno= %d)", error); /* Cleanup: Detach from the share. Avoid others to be blocked. */ if (io_share.total_threads) remove_io_thread(&sort_param[i].read_cache); DBUG_PRINT("error", ("Cannot start a repair thread")); sort_info.got_error=1; } else sort_info.threads_running++; } (void) pthread_attr_destroy(&thr_attr); /* waiting for all threads to finish */ while (sort_info.threads_running) mysql_cond_wait(&sort_info.cond, &sort_info.mutex); mysql_mutex_unlock(&sort_info.mutex); if ((got_error= thr_write_keys(sort_param))) { param->retry_repair=1; goto err; } got_error=1; /* Assume the following may go wrong */ if (sort_param[0].fix_datafile) { /* Append some nuls to the end of a memory mapped file. Destroy the write cache. The master thread did already detach from the share by remove_io_thread() in sort.c:thr_find_all_keys(). */ if (write_data_suffix(&sort_info,1) || end_io_cache(&info->rec_cache)) goto err; if (param->testflag & T_SAFE_REPAIR) { /* Don't repair if we loosed more than one row */ if (info->state->records+1 < start_records) { info->state->records=start_records; goto err; } } share->state.state.data_file_length= info->state->data_file_length= sort_param->filepos; /* Only whole records */ share->state.version=(ulong) time((time_t*) 0); /* Exchange the data file descriptor of the table, so that we use the new file from now on. */ mysql_file_close(info->dfile, MYF(0)); info->dfile=new_file; share->data_file_type=sort_info.new_data_file_type; share->pack.header_length=(ulong) new_header_length; } else info->state->data_file_length=sort_param->max_pos; if (rep_quick && del+sort_info.dupp != info->state->del) { mi_check_print_error(param,"Couldn't fix table with quick recovery: Found wrong number of deleted records"); mi_check_print_error(param,"Run recovery again without -q"); param->retry_repair=1; param->testflag|=T_RETRY_WITHOUT_QUICK; goto err; } if (rep_quick & T_FORCE_UNIQUENESS) { my_off_t skr=info->state->data_file_length+ (share->options & HA_OPTION_COMPRESS_RECORD ? MEMMAP_EXTRA_MARGIN : 0); #ifdef USE_RELOC if (share->data_file_type == STATIC_RECORD && skr < share->base.reloc*share->base.min_pack_length) skr=share->base.reloc*share->base.min_pack_length; #endif if (skr != sort_info.filelength) if (mysql_file_chsize(info->dfile, skr, 0, MYF(0))) mi_check_print_warning(param, "Can't change size of datafile, error: %d", my_errno); } if (param->testflag & T_CALC_CHECKSUM) info->state->checksum=param->glob_crc; if (mysql_file_chsize(share->kfile, info->state->key_file_length, 0, MYF(0))) mi_check_print_warning(param, "Can't change size of indexfile, error: %d", my_errno); if (!(param->testflag & T_SILENT)) { if (start_records != info->state->records) printf("Data records: %s\n", llstr(info->state->records,llbuff)); if (sort_info.dupp) mi_check_print_warning(param, "%s records have been removed", llstr(sort_info.dupp,llbuff)); } got_error=0; if (&share->state.state != info->state) memcpy(&share->state.state, info->state, sizeof(*info->state)); err: got_error|= flush_blocks(param, share->key_cache, share->kfile); /* Destroy the write cache. The master thread did already detach from the share by remove_io_thread() or it was not yet started (if the error happend before creating the thread). */ (void) end_io_cache(&info->rec_cache); /* Destroy the new data cache in case of non-quick repair. All slave threads did either detach from the share by remove_io_thread() already or they were not yet started (if the error happend before creating the threads). */ if (!rep_quick) (void) end_io_cache(&new_data_cache); if (!got_error) { /* Replace the actual file with the temporary file */ if (new_file >= 0) { mysql_file_close(new_file, MYF(0)); info->dfile=new_file= -1; if (change_to_newfile(share->data_file_name, MI_NAME_DEXT, DATA_TMP_EXT, (param->testflag & T_BACKUP_DATA ? MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) || mi_open_datafile(info,share,name,-1)) got_error=1; } } if (got_error) { if (! param->error_printed) mi_check_print_error(param,"%d when fixing table",my_errno); if (new_file >= 0) { (void) mysql_file_close(new_file, MYF(0)); (void) mysql_file_delete(mi_key_file_datatmp, param->temp_filename, MYF(MY_WME)); if (info->dfile == new_file) /* Retry with key cache */ if (unlikely(mi_open_datafile(info, share, name, -1))) param->retry_repair= 0; /* Safety */ } mi_mark_crashed_on_repair(info); } else if (key_map == share->state.key_map) share->state.changed&= ~STATE_NOT_OPTIMIZED_KEYS; share->state.changed|=STATE_NOT_SORTED_PAGES; mysql_cond_destroy(&sort_info.cond); mysql_mutex_destroy(&sort_info.mutex); mysql_mutex_destroy(&param->print_msg_mutex); param->need_print_msg_lock= 0; my_free(sort_info.ft_buf); my_free(sort_info.key_block); my_free(sort_param); my_free(sort_info.buff); (void) end_io_cache(&param->read_cache); info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); if (!got_error && (param->testflag & T_UNPACK)) { share->state.header.options[0]&= (uchar) ~HA_OPTION_COMPRESS_RECORD; share->pack.header_length=0; } DBUG_RETURN(got_error); } /* Read next record and return next key */ static int sort_key_read(MI_SORT_PARAM *sort_param, void *key) { int error; SORT_INFO *sort_info=sort_param->sort_info; MI_INFO *info=sort_info->info; DBUG_ENTER("sort_key_read"); if ((error=sort_get_next_record(sort_param))) DBUG_RETURN(error); if (info->state->records == sort_info->max_records) { mi_check_print_error(sort_info->param, "Key %d - Found too many records; Can't continue", sort_param->key+1); DBUG_RETURN(1); } sort_param->real_key_length= (info->s->rec_reflength+ _mi_make_key(info, sort_param->key, (uchar*) key, sort_param->record, sort_param->filepos)); #ifdef HAVE_purify bzero(key+sort_param->real_key_length, (sort_param->key_length-sort_param->real_key_length)); #endif DBUG_RETURN(sort_write_record(sort_param)); } /* sort_key_read */ static int sort_ft_key_read(MI_SORT_PARAM *sort_param, void *key) { int error; SORT_INFO *sort_info=sort_param->sort_info; MI_INFO *info=sort_info->info; FT_WORD *wptr=0; DBUG_ENTER("sort_ft_key_read"); if (!sort_param->wordlist) { for (;;) { free_root(&sort_param->wordroot, MYF(MY_MARK_BLOCKS_FREE)); if ((error=sort_get_next_record(sort_param))) DBUG_RETURN(error); if (!(wptr=_mi_ft_parserecord(info,sort_param->key,sort_param->record, &sort_param->wordroot))) DBUG_RETURN(1); if (wptr->pos) break; error=sort_write_record(sort_param); } sort_param->wordptr=sort_param->wordlist=wptr; } else { error=0; wptr=(FT_WORD*)(sort_param->wordptr); } sort_param->real_key_length=(info->s->rec_reflength+ _ft_make_key(info, sort_param->key, key, wptr++, sort_param->filepos)); #ifdef HAVE_purify if (sort_param->key_length > sort_param->real_key_length) bzero(key+sort_param->real_key_length, (sort_param->key_length-sort_param->real_key_length)); #endif if (!wptr->pos) { free_root(&sort_param->wordroot, MYF(MY_MARK_BLOCKS_FREE)); sort_param->wordlist=0; error=sort_write_record(sort_param); } else sort_param->wordptr=(void*)wptr; DBUG_RETURN(error); } /* sort_ft_key_read */ /* Read next record from file using parameters in sort_info. SYNOPSIS sort_get_next_record() sort_param Information about and for the sort process NOTE Dynamic Records With Non-Quick Parallel Repair For non-quick parallel repair we use a synchronized read/write cache. This means that one thread is the master who fixes the data file by reading each record from the old data file and writing it to the new data file. By doing this the records in the new data file are written contiguously. Whenever the write buffer is full, it is copied to the read buffer. The slaves read from the read buffer, which is not associated with a file. Thus read_cache.file is -1. When using _mi_read_cache(), the slaves must always set flag to READING_NEXT so that the function never tries to read from file. This is safe because the records are contiguous. There is no need to read outside the cache. This condition is evaluated in the variable 'parallel_flag' for quick reference. read_cache.file must be >= 0 in every other case. RETURN -1 end of file 0 ok > 0 error */ static int sort_get_next_record(MI_SORT_PARAM *sort_param) { int searching; int parallel_flag; uint found_record,b_type,left_length; my_off_t pos; uchar *UNINIT_VAR(to); MI_BLOCK_INFO block_info; SORT_INFO *sort_info=sort_param->sort_info; MI_CHECK *param=sort_info->param; MI_INFO *info=sort_info->info; MYISAM_SHARE *share=info->s; char llbuff[22],llbuff2[22]; DBUG_ENTER("sort_get_next_record"); if (*killed_ptr(param)) DBUG_RETURN(1); switch (share->data_file_type) { case STATIC_RECORD: for (;;) { if (my_b_read(&sort_param->read_cache,sort_param->record, share->base.pack_reclength)) { if (sort_param->read_cache.error) param->out_flag |= O_DATA_LOST; param->retry_repair=1; param->testflag|=T_RETRY_WITHOUT_QUICK; DBUG_RETURN(-1); } sort_param->start_recpos=sort_param->pos; if (!sort_param->fix_datafile) { sort_param->filepos=sort_param->pos; if (sort_param->master) share->state.split++; } sort_param->max_pos=(sort_param->pos+=share->base.pack_reclength); if (*sort_param->record) { if (sort_param->calc_checksum) param->glob_crc+= (info->checksum= mi_static_checksum(info,sort_param->record)); DBUG_RETURN(0); } if (!sort_param->fix_datafile && sort_param->master) { info->state->del++; info->state->empty+=share->base.pack_reclength; } } case DYNAMIC_RECORD: LINT_INIT(to); pos=sort_param->pos; searching=(sort_param->fix_datafile && (param->testflag & T_EXTEND)); parallel_flag= (sort_param->read_cache.file < 0) ? READING_NEXT : 0; for (;;) { found_record=block_info.second_read= 0; left_length=1; if (searching) { pos=MY_ALIGN(pos,MI_DYN_ALIGN_SIZE); param->testflag|=T_RETRY_WITHOUT_QUICK; sort_param->start_recpos=pos; } do { if (pos > sort_param->max_pos) sort_param->max_pos=pos; if (pos & (MI_DYN_ALIGN_SIZE-1)) { if ((param->testflag & T_VERBOSE) || searching == 0) mi_check_print_info(param,"Wrong aligned block at %s", llstr(pos,llbuff)); if (searching) goto try_next; } if (found_record && pos == param->search_after_block) mi_check_print_info(param,"Block: %s used by record at %s", llstr(param->search_after_block,llbuff), llstr(sort_param->start_recpos,llbuff2)); if (_mi_read_cache(&sort_param->read_cache, (uchar*) block_info.header,pos, MI_BLOCK_INFO_HEADER_LENGTH, (! found_record ? READING_NEXT : 0) | parallel_flag | READING_HEADER)) { if (found_record) { mi_check_print_info(param, "Can't read whole record at %s (errno: %d)", llstr(sort_param->start_recpos,llbuff),errno); goto try_next; } DBUG_RETURN(-1); } if (searching && ! sort_param->fix_datafile) { param->error_printed=1; param->retry_repair=1; param->testflag|=T_RETRY_WITHOUT_QUICK; DBUG_RETURN(1); /* Something wrong with data */ } b_type=_mi_get_block_info(&block_info,-1,pos); if ((b_type & (BLOCK_ERROR | BLOCK_FATAL_ERROR)) || ((b_type & BLOCK_FIRST) && (block_info.rec_len < (uint) share->base.min_pack_length || block_info.rec_len > (uint) share->base.max_pack_length))) { uint i; if (param->testflag & T_VERBOSE || searching == 0) mi_check_print_info(param, "Wrong bytesec: %3d-%3d-%3d at %10s; Skipped", block_info.header[0],block_info.header[1], block_info.header[2],llstr(pos,llbuff)); if (found_record) goto try_next; block_info.second_read=0; searching=1; /* Search after block in read header string */ for (i=MI_DYN_ALIGN_SIZE ; i < MI_BLOCK_INFO_HEADER_LENGTH ; i+= MI_DYN_ALIGN_SIZE) if (block_info.header[i] >= 1 && block_info.header[i] <= MI_MAX_DYN_HEADER_BYTE) break; pos+=(ulong) i; sort_param->start_recpos=pos; continue; } if (b_type & BLOCK_DELETED) { my_bool error=0; if (block_info.block_len+ (uint) (block_info.filepos-pos) < share->base.min_block_length) { if (!searching) mi_check_print_info(param, "Deleted block with impossible length %u at %s", block_info.block_len,llstr(pos,llbuff)); error=1; } else { if ((block_info.next_filepos != HA_OFFSET_ERROR && block_info.next_filepos >= info->state->data_file_length) || (block_info.prev_filepos != HA_OFFSET_ERROR && block_info.prev_filepos >= info->state->data_file_length)) { if (!searching) mi_check_print_info(param, "Delete link points outside datafile at %s", llstr(pos,llbuff)); error=1; } } if (error) { if (found_record) goto try_next; searching=1; pos+= MI_DYN_ALIGN_SIZE; sort_param->start_recpos=pos; block_info.second_read=0; continue; } } else { if (block_info.block_len+ (uint) (block_info.filepos-pos) < share->base.min_block_length || block_info.block_len > (uint) share->base.max_pack_length+ MI_SPLIT_LENGTH) { if (!searching) mi_check_print_info(param, "Found block with impossible length %u at %s; Skipped", block_info.block_len+ (uint) (block_info.filepos-pos), llstr(pos,llbuff)); if (found_record) goto try_next; searching=1; pos+= MI_DYN_ALIGN_SIZE; sort_param->start_recpos=pos; block_info.second_read=0; continue; } } if (b_type & (BLOCK_DELETED | BLOCK_SYNC_ERROR)) { if (!sort_param->fix_datafile && sort_param->master && (b_type & BLOCK_DELETED)) { info->state->empty+=block_info.block_len; info->state->del++; share->state.split++; } if (found_record) goto try_next; if (searching) { pos+=MI_DYN_ALIGN_SIZE; sort_param->start_recpos=pos; } else pos=block_info.filepos+block_info.block_len; block_info.second_read=0; continue; } if (!sort_param->fix_datafile && sort_param->master) share->state.split++; if (! found_record++) { sort_param->find_length=left_length=block_info.rec_len; sort_param->start_recpos=pos; if (!sort_param->fix_datafile) sort_param->filepos=sort_param->start_recpos; if (sort_param->fix_datafile && (param->testflag & T_EXTEND)) sort_param->pos=block_info.filepos+1; else sort_param->pos=block_info.filepos+block_info.block_len; if (share->base.blobs) { if (!(to=mi_alloc_rec_buff(info,block_info.rec_len, &(sort_param->rec_buff)))) { if (param->max_record_length >= block_info.rec_len) { mi_check_print_error(param,"Not enough memory for blob at %s (need %lu)", llstr(sort_param->start_recpos,llbuff), (ulong) block_info.rec_len); DBUG_RETURN(1); } else { mi_check_print_info(param,"Not enough memory for blob at %s (need %lu); Row skipped", llstr(sort_param->start_recpos,llbuff), (ulong) block_info.rec_len); goto try_next; } } } else to= sort_param->rec_buff; } if (left_length < block_info.data_len || ! block_info.data_len) { mi_check_print_info(param, "Found block with too small length at %s; Skipped", llstr(sort_param->start_recpos,llbuff)); goto try_next; } if (block_info.filepos + block_info.data_len > sort_param->read_cache.end_of_file) { mi_check_print_info(param, "Found block that points outside data file at %s", llstr(sort_param->start_recpos,llbuff)); goto try_next; } /* Copy information that is already read. Avoid accessing data below the cache start. This could happen if the header streched over the end of the previous buffer contents. */ { uint header_len= (uint) (block_info.filepos - pos); uint prefetch_len= (MI_BLOCK_INFO_HEADER_LENGTH - header_len); if (prefetch_len > block_info.data_len) prefetch_len= block_info.data_len; if (prefetch_len) { memcpy(to, block_info.header + header_len, prefetch_len); block_info.filepos+= prefetch_len; block_info.data_len-= prefetch_len; left_length-= prefetch_len; to+= prefetch_len; } } if (block_info.data_len && _mi_read_cache(&sort_param->read_cache,to,block_info.filepos, block_info.data_len, (found_record == 1 ? READING_NEXT : 0) | parallel_flag)) { mi_check_print_info(param, "Read error for block at: %s (error: %d); Skipped", llstr(block_info.filepos,llbuff),my_errno); goto try_next; } left_length-=block_info.data_len; to+=block_info.data_len; pos=block_info.next_filepos; if (pos == HA_OFFSET_ERROR && left_length) { mi_check_print_info(param,"Wrong block with wrong total length starting at %s", llstr(sort_param->start_recpos,llbuff)); goto try_next; } if (pos + MI_BLOCK_INFO_HEADER_LENGTH > sort_param->read_cache.end_of_file) { mi_check_print_info(param,"Found link that points at %s (outside data file) at %s", llstr(pos,llbuff2), llstr(sort_param->start_recpos,llbuff)); goto try_next; } } while (left_length); if (_mi_rec_unpack(info,sort_param->record,sort_param->rec_buff, sort_param->find_length) != MY_FILE_ERROR) { if (sort_param->read_cache.error < 0) DBUG_RETURN(1); if (sort_param->calc_checksum) info->checksum= mi_checksum(info, sort_param->record); if ((param->testflag & (T_EXTEND | T_REP)) || searching) { if (_mi_rec_check(info, sort_param->record, sort_param->rec_buff, sort_param->find_length, (param->testflag & T_QUICK) && sort_param->calc_checksum && test(info->s->calc_checksum))) { mi_check_print_info(param,"Found wrong packed record at %s", llstr(sort_param->start_recpos,llbuff)); goto try_next; } } if (sort_param->calc_checksum) param->glob_crc+= info->checksum; DBUG_RETURN(0); } if (!searching) mi_check_print_info(param,"Key %d - Found wrong stored record at %s", sort_param->key+1, llstr(sort_param->start_recpos,llbuff)); try_next: pos=(sort_param->start_recpos+=MI_DYN_ALIGN_SIZE); searching=1; } case COMPRESSED_RECORD: for (searching=0 ;; searching=1, sort_param->pos++) { if (_mi_read_cache(&sort_param->read_cache,(uchar*) block_info.header, sort_param->pos, share->pack.ref_length,READING_NEXT)) DBUG_RETURN(-1); if (searching && ! sort_param->fix_datafile) { param->error_printed=1; param->retry_repair=1; param->testflag|=T_RETRY_WITHOUT_QUICK; DBUG_RETURN(1); /* Something wrong with data */ } sort_param->start_recpos=sort_param->pos; if (_mi_pack_get_block_info(info, &sort_param->bit_buff, &block_info, &sort_param->rec_buff, -1, sort_param->pos)) DBUG_RETURN(-1); if (!block_info.rec_len && sort_param->pos + MEMMAP_EXTRA_MARGIN == sort_param->read_cache.end_of_file) DBUG_RETURN(-1); if (block_info.rec_len < (uint) share->min_pack_length || block_info.rec_len > (uint) share->max_pack_length) { if (! searching) mi_check_print_info(param,"Found block with wrong recordlength: %d at %s\n", block_info.rec_len, llstr(sort_param->pos,llbuff)); continue; } if (_mi_read_cache(&sort_param->read_cache,(uchar*) sort_param->rec_buff, block_info.filepos, block_info.rec_len, READING_NEXT)) { if (! searching) mi_check_print_info(param,"Couldn't read whole record from %s", llstr(sort_param->pos,llbuff)); continue; } if (_mi_pack_rec_unpack(info, &sort_param->bit_buff, sort_param->record, sort_param->rec_buff, block_info.rec_len)) { if (! searching) mi_check_print_info(param,"Found wrong record at %s", llstr(sort_param->pos,llbuff)); continue; } if (!sort_param->fix_datafile) { sort_param->filepos=sort_param->pos; if (sort_param->master) share->state.split++; } sort_param->max_pos=(sort_param->pos=block_info.filepos+ block_info.rec_len); info->packed_length=block_info.rec_len; if (sort_param->calc_checksum) param->glob_crc+= (info->checksum= mi_checksum(info, sort_param->record)); DBUG_RETURN(0); } case BLOCK_RECORD: assert(0); /* Impossible */ } DBUG_RETURN(1); /* Impossible */ } /* Write record to new file. SYNOPSIS sort_write_record() sort_param Sort parameters. NOTE This is only called by a master thread if parallel repair is used. RETURN 0 OK 1 Error */ int sort_write_record(MI_SORT_PARAM *sort_param) { int flag; uint length; ulong block_length,reclength; uchar *from; uchar block_buff[8]; SORT_INFO *sort_info=sort_param->sort_info; MI_CHECK *param=sort_info->param; MI_INFO *info=sort_info->info; MYISAM_SHARE *share=info->s; DBUG_ENTER("sort_write_record"); if (sort_param->fix_datafile) { switch (sort_info->new_data_file_type) { case STATIC_RECORD: if (my_b_write(&info->rec_cache,sort_param->record, share->base.pack_reclength)) { mi_check_print_error(param,"%d when writing to datafile",my_errno); DBUG_RETURN(1); } sort_param->filepos+=share->base.pack_reclength; info->s->state.split++; /* sort_info->param->glob_crc+=mi_static_checksum(info, sort_param->record); */ break; case DYNAMIC_RECORD: if (! info->blobs) from=sort_param->rec_buff; else { /* must be sure that local buffer is big enough */ reclength=info->s->base.pack_reclength+ _my_calc_total_blob_length(info,sort_param->record)+ ALIGN_SIZE(MI_MAX_DYN_BLOCK_HEADER)+MI_SPLIT_LENGTH+ MI_DYN_DELETE_BLOCK_HEADER; if (sort_info->buff_length < reclength) { if (!(sort_info->buff=my_realloc(sort_info->buff, (uint) reclength, MYF(MY_FREE_ON_ERROR | MY_ALLOW_ZERO_PTR)))) DBUG_RETURN(1); sort_info->buff_length=reclength; } from= sort_info->buff+ALIGN_SIZE(MI_MAX_DYN_BLOCK_HEADER); } /* We can use info->checksum here as only one thread calls this. */ info->checksum=mi_checksum(info,sort_param->record); reclength=_mi_rec_pack(info,from,sort_param->record); flag=0; /* sort_info->param->glob_crc+=info->checksum; */ do { block_length=reclength+ 3 + test(reclength >= (65520-3)); if (block_length < share->base.min_block_length) block_length=share->base.min_block_length; info->update|=HA_STATE_WRITE_AT_END; block_length=MY_ALIGN(block_length,MI_DYN_ALIGN_SIZE); if (block_length > MI_MAX_BLOCK_LENGTH) block_length=MI_MAX_BLOCK_LENGTH; if (_mi_write_part_record(info,0L,block_length, sort_param->filepos+block_length, &from,&reclength,&flag)) { mi_check_print_error(param,"%d when writing to datafile",my_errno); DBUG_RETURN(1); } sort_param->filepos+=block_length; info->s->state.split++; } while (reclength); /* sort_info->param->glob_crc+=info->checksum; */ break; case COMPRESSED_RECORD: reclength=info->packed_length; length= save_pack_length((uint) share->pack.version, block_buff, reclength); if (info->s->base.blobs) length+= save_pack_length((uint) share->pack.version, block_buff + length, info->blob_length); if (my_b_write(&info->rec_cache,block_buff,length) || my_b_write(&info->rec_cache,(uchar*) sort_param->rec_buff,reclength)) { mi_check_print_error(param,"%d when writing to datafile",my_errno); DBUG_RETURN(1); } /* sort_info->param->glob_crc+=info->checksum; */ sort_param->filepos+=reclength+length; info->s->state.split++; break; case BLOCK_RECORD: assert(0); /* Impossible */ } } if (sort_param->master) { info->state->records++; if ((param->testflag & T_WRITE_LOOP) && (info->state->records % WRITE_COUNT) == 0) { char llbuff[22]; printf("%s\r", llstr(info->state->records,llbuff)); (void) fflush(stdout); } } DBUG_RETURN(0); } /* sort_write_record */ /* Compare two keys from _create_index_by_sort */ static int sort_key_cmp(MI_SORT_PARAM *sort_param, const void *a, const void *b) { uint not_used[2]; return (ha_key_cmp(sort_param->seg, *((uchar**) a), *((uchar**) b), USE_WHOLE_KEY, SEARCH_SAME, not_used)); } /* sort_key_cmp */ static int sort_key_write(MI_SORT_PARAM *sort_param, const void *a) { uint diff_pos[2]; char llbuff[22],llbuff2[22]; SORT_INFO *sort_info=sort_param->sort_info; MI_CHECK *param= sort_info->param; int cmp; if (sort_info->key_block->inited) { cmp=ha_key_cmp(sort_param->seg,sort_info->key_block->lastkey, (uchar*) a, USE_WHOLE_KEY,SEARCH_FIND | SEARCH_UPDATE, diff_pos); if (param->stats_method == MI_STATS_METHOD_NULLS_NOT_EQUAL) ha_key_cmp(sort_param->seg,sort_info->key_block->lastkey, (uchar*) a, USE_WHOLE_KEY, SEARCH_FIND | SEARCH_NULL_ARE_NOT_EQUAL, diff_pos); else if (param->stats_method == MI_STATS_METHOD_IGNORE_NULLS) { diff_pos[0]= mi_collect_stats_nonulls_next(sort_param->seg, sort_param->notnull, sort_info->key_block->lastkey, (uchar*)a); } sort_param->unique[diff_pos[0]-1]++; } else { cmp= -1; if (param->stats_method == MI_STATS_METHOD_IGNORE_NULLS) mi_collect_stats_nonulls_first(sort_param->seg, sort_param->notnull, (uchar*)a); } if ((sort_param->keyinfo->flag & HA_NOSAME) && cmp == 0) { sort_info->dupp++; sort_info->info->lastpos=get_record_for_key(sort_info->info, sort_param->keyinfo, (uchar*) a); mi_check_print_warning(param, "Duplicate key for record at %10s against record at %10s", llstr(sort_info->info->lastpos,llbuff), llstr(get_record_for_key(sort_info->info, sort_param->keyinfo, sort_info->key_block-> lastkey), llbuff2)); param->testflag|=T_RETRY_WITHOUT_QUICK; if (sort_info->param->testflag & T_VERBOSE) _mi_print_key(stdout,sort_param->seg,(uchar*) a, USE_WHOLE_KEY); return (sort_delete_record(sort_param)); } #ifndef DBUG_OFF if (cmp > 0) { mi_check_print_error(param, "Internal error: Keys are not in order from sort"); return(1); } #endif return (sort_insert_key(sort_param,sort_info->key_block, (uchar*) a, HA_OFFSET_ERROR)); } /* sort_key_write */ int sort_ft_buf_flush(MI_SORT_PARAM *sort_param) { SORT_INFO *sort_info=sort_param->sort_info; SORT_KEY_BLOCKS *key_block=sort_info->key_block; MYISAM_SHARE *share=sort_info->info->s; uint val_off, val_len; int error; SORT_FT_BUF *ft_buf=sort_info->ft_buf; uchar *from, *to; val_len=share->ft2_keyinfo.keylength; get_key_full_length_rdonly(val_off, ft_buf->lastkey); to=ft_buf->lastkey+val_off; if (ft_buf->buf) { /* flushing first-level tree */ error=sort_insert_key(sort_param,key_block,ft_buf->lastkey, HA_OFFSET_ERROR); for (from=to+val_len; !error && from < ft_buf->buf; from+= val_len) { memcpy(to, from, val_len); error=sort_insert_key(sort_param,key_block,ft_buf->lastkey, HA_OFFSET_ERROR); } return error; } /* flushing second-level tree keyblocks */ error=flush_pending_blocks(sort_param); /* updating lastkey with second-level tree info */ ft_intXstore(ft_buf->lastkey+val_off, -ft_buf->count); _mi_dpointer(sort_info->info, ft_buf->lastkey+val_off+HA_FT_WLEN, share->state.key_root[sort_param->key]); /* restoring first level tree data in sort_info/sort_param */ sort_info->key_block=sort_info->key_block_end- sort_info->param->sort_key_blocks; sort_param->keyinfo=share->keyinfo+sort_param->key; share->state.key_root[sort_param->key]=HA_OFFSET_ERROR; /* writing lastkey in first-level tree */ return error ? error : sort_insert_key(sort_param,sort_info->key_block, ft_buf->lastkey,HA_OFFSET_ERROR); } static int sort_ft_key_write(MI_SORT_PARAM *sort_param, const void *a) { uint a_len, val_off, val_len, error; uchar *p; SORT_INFO *sort_info=sort_param->sort_info; SORT_FT_BUF *ft_buf=sort_info->ft_buf; SORT_KEY_BLOCKS *key_block=sort_info->key_block; val_len= HA_FT_WLEN + sort_info->info->s->rec_reflength; get_key_full_length_rdonly(a_len, (uchar *)a); if (!ft_buf) { /* use two-level tree only if key_reflength fits in rec_reflength place and row format is NOT static - for _mi_dpointer not to garble offsets */ if ((sort_info->info->s->base.key_reflength <= sort_info->info->s->rec_reflength) && (sort_info->info->s->options & (HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD))) ft_buf=(SORT_FT_BUF *)my_malloc(sort_param->keyinfo->block_length + sizeof(SORT_FT_BUF), MYF(MY_WME)); if (!ft_buf) { sort_param->key_write=sort_key_write; return sort_key_write(sort_param, a); } sort_info->ft_buf=ft_buf; goto word_init_ft_buf; /* no need to duplicate the code */ } get_key_full_length_rdonly(val_off, ft_buf->lastkey); if (ha_compare_text(sort_param->seg->charset, ((uchar *)a)+1,a_len-1, ft_buf->lastkey+1,val_off-1, 0, 0)==0) { if (!ft_buf->buf) /* store in second-level tree */ { ft_buf->count++; return sort_insert_key(sort_param,key_block, ((uchar *)a)+a_len, HA_OFFSET_ERROR); } /* storing the key in the buffer. */ memcpy (ft_buf->buf, (char *)a+a_len, val_len); ft_buf->buf+=val_len; if (ft_buf->buf < ft_buf->end) return 0; /* converting to two-level tree */ p=ft_buf->lastkey+val_off; while (key_block->inited) key_block++; sort_info->key_block=key_block; sort_param->keyinfo=& sort_info->info->s->ft2_keyinfo; ft_buf->count=(uint) (ft_buf->buf - p)/val_len; /* flushing buffer to second-level tree */ for (error=0; !error && p < ft_buf->buf; p+= val_len) error=sort_insert_key(sort_param,key_block,p,HA_OFFSET_ERROR); ft_buf->buf=0; return error; } /* flushing buffer */ if ((error=sort_ft_buf_flush(sort_param))) return error; word_init_ft_buf: a_len+=val_len; memcpy(ft_buf->lastkey, a, a_len); ft_buf->buf=ft_buf->lastkey+a_len; /* 32 is just a safety margin here (at least max(val_len, sizeof(nod_flag)) should be there). May be better performance could be achieved if we'd put (sort_info->keyinfo->block_length-32)/XXX instead. TODO: benchmark the best value for XXX. */ ft_buf->end=ft_buf->lastkey+ (sort_param->keyinfo->block_length-32); return 0; } /* sort_ft_key_write */ /* get pointer to record from a key */ static my_off_t get_record_for_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key) { return _mi_dpos(info,0,key+_mi_keylength(keyinfo,key)); } /* get_record_for_key */ /* Insert a key in sort-key-blocks */ static int sort_insert_key(MI_SORT_PARAM *sort_param, register SORT_KEY_BLOCKS *key_block, uchar *key, my_off_t prev_block) { uint a_length,t_length,nod_flag; my_off_t filepos,key_file_length; uchar *anc_buff,*lastkey; MI_KEY_PARAM s_temp; MI_INFO *info; MI_KEYDEF *keyinfo=sort_param->keyinfo; SORT_INFO *sort_info= sort_param->sort_info; MI_CHECK *param=sort_info->param; DBUG_ENTER("sort_insert_key"); anc_buff=key_block->buff; info=sort_info->info; lastkey=key_block->lastkey; nod_flag= (key_block == sort_info->key_block ? 0 : info->s->base.key_reflength); if (!key_block->inited) { key_block->inited=1; if (key_block == sort_info->key_block_end) { mi_check_print_error(param,"To many key-block-levels; Try increasing sort_key_blocks"); DBUG_RETURN(1); } a_length=2+nod_flag; key_block->end_pos=anc_buff+2; lastkey=0; /* No previous key in block */ } else a_length=mi_getint(anc_buff); /* Save pointer to previous block */ if (nod_flag) _mi_kpointer(info,key_block->end_pos,prev_block); t_length=(*keyinfo->pack_key)(keyinfo,nod_flag, (uchar*) 0,lastkey,lastkey,key, &s_temp); (*keyinfo->store_key)(keyinfo, key_block->end_pos+nod_flag,&s_temp); a_length+=t_length; mi_putint(anc_buff,a_length,nod_flag); key_block->end_pos+=t_length; if (a_length <= keyinfo->block_length) { (void) _mi_move_key(keyinfo,key_block->lastkey,key); key_block->last_length=a_length-t_length; DBUG_RETURN(0); } /* Fill block with end-zero and write filled block */ mi_putint(anc_buff,key_block->last_length,nod_flag); bzero((uchar*) anc_buff+key_block->last_length, keyinfo->block_length- key_block->last_length); key_file_length=info->state->key_file_length; if ((filepos=_mi_new(info,keyinfo,DFLT_INIT_HITS)) == HA_OFFSET_ERROR) DBUG_RETURN(1); /* If we read the page from the key cache, we have to write it back to it */ if (key_file_length == info->state->key_file_length) { if (_mi_write_keypage(info, keyinfo, filepos, DFLT_INIT_HITS, anc_buff)) DBUG_RETURN(1); } else if (mysql_file_pwrite(info->s->kfile, (uchar*) anc_buff, (uint) keyinfo->block_length, filepos, param->myf_rw)) DBUG_RETURN(1); DBUG_DUMP("buff",(uchar*) anc_buff,mi_getint(anc_buff)); /* Write separator-key to block in next level */ if (sort_insert_key(sort_param,key_block+1,key_block->lastkey,filepos)) DBUG_RETURN(1); /* clear old block and write new key in it */ key_block->inited=0; DBUG_RETURN(sort_insert_key(sort_param, key_block,key,prev_block)); } /* sort_insert_key */ /* Delete record when we found a duplicated key */ static int sort_delete_record(MI_SORT_PARAM *sort_param) { uint i; int old_file,error; uchar *key; SORT_INFO *sort_info=sort_param->sort_info; MI_CHECK *param=sort_info->param; MI_INFO *info=sort_info->info; DBUG_ENTER("sort_delete_record"); if ((param->testflag & (T_FORCE_UNIQUENESS|T_QUICK)) == T_QUICK) { mi_check_print_error(param, "Quick-recover aborted; Run recovery without switch -q or with switch -qq"); DBUG_RETURN(1); } if (info->s->options & HA_OPTION_COMPRESS_RECORD) { mi_check_print_error(param, "Recover aborted; Can't run standard recovery on compressed tables with errors in data-file. Use switch 'myisamchk --safe-recover' to fix it\n",stderr);; DBUG_RETURN(1); } old_file=info->dfile; info->dfile=info->rec_cache.file; if (sort_info->current_key) { key=info->lastkey+info->s->base.max_key_length; if ((error=(*info->s->read_rnd)(info,sort_param->record,info->lastpos,0)) && error != HA_ERR_RECORD_DELETED) { mi_check_print_error(param,"Can't read record to be removed"); info->dfile=old_file; DBUG_RETURN(1); } for (i=0 ; i < sort_info->current_key ; i++) { uint key_length=_mi_make_key(info,i,key,sort_param->record,info->lastpos); if (_mi_ck_delete(info,i,key,key_length)) { mi_check_print_error(param,"Can't delete key %d from record to be removed",i+1); info->dfile=old_file; DBUG_RETURN(1); } } if (sort_param->calc_checksum) param->glob_crc-=(*info->s->calc_checksum)(info, sort_param->record); } error=flush_io_cache(&info->rec_cache) || (*info->s->delete_record)(info); info->dfile=old_file; /* restore actual value */ info->state->records--; DBUG_RETURN(error); } /* sort_delete_record */ /* Fix all pending blocks and flush everything to disk */ int flush_pending_blocks(MI_SORT_PARAM *sort_param) { uint nod_flag,length; my_off_t filepos,key_file_length; SORT_KEY_BLOCKS *key_block; SORT_INFO *sort_info= sort_param->sort_info; myf myf_rw=sort_info->param->myf_rw; MI_INFO *info=sort_info->info; MI_KEYDEF *keyinfo=sort_param->keyinfo; DBUG_ENTER("flush_pending_blocks"); filepos= HA_OFFSET_ERROR; /* if empty file */ nod_flag=0; for (key_block=sort_info->key_block ; key_block->inited ; key_block++) { key_block->inited=0; length=mi_getint(key_block->buff); if (nod_flag) _mi_kpointer(info,key_block->end_pos,filepos); key_file_length=info->state->key_file_length; bzero((uchar*) key_block->buff+length, keyinfo->block_length-length); if ((filepos=_mi_new(info,keyinfo,DFLT_INIT_HITS)) == HA_OFFSET_ERROR) DBUG_RETURN(1); /* If we read the page from the key cache, we have to write it back */ if (key_file_length == info->state->key_file_length) { if (_mi_write_keypage(info, keyinfo, filepos, DFLT_INIT_HITS, key_block->buff)) DBUG_RETURN(1); } else if (mysql_file_pwrite(info->s->kfile, (uchar*) key_block->buff, (uint) keyinfo->block_length, filepos, myf_rw)) DBUG_RETURN(1); DBUG_DUMP("buff",(uchar*) key_block->buff,length); nod_flag=1; } info->s->state.key_root[sort_param->key]=filepos; /* Last is root for tree */ DBUG_RETURN(0); } /* flush_pending_blocks */ /* alloc space and pointers for key_blocks */ static SORT_KEY_BLOCKS *alloc_key_blocks(MI_CHECK *param, uint blocks, uint buffer_length) { reg1 uint i; SORT_KEY_BLOCKS *block; DBUG_ENTER("alloc_key_blocks"); if (!(block=(SORT_KEY_BLOCKS*) my_malloc((sizeof(SORT_KEY_BLOCKS)+ buffer_length+IO_SIZE)*blocks, MYF(0)))) { mi_check_print_error(param,"Not enough memory for sort-key-blocks"); return(0); } for (i=0 ; i < blocks ; i++) { block[i].inited=0; block[i].buff=(uchar*) (block+blocks)+(buffer_length+IO_SIZE)*i; } DBUG_RETURN(block); } /* alloc_key_blocks */ /* Check if file is almost full */ int test_if_almost_full(MI_INFO *info) { if (info->s->options & HA_OPTION_COMPRESS_RECORD) return 0; return mysql_file_seek(info->s->kfile, 0L, MY_SEEK_END, MYF(MY_THREADSAFE)) / 10 * 9 > (my_off_t) info->s->base.max_key_file_length || mysql_file_seek(info->dfile, 0L, MY_SEEK_END, MYF(0)) / 10 * 9 > (my_off_t) info->s->base.max_data_file_length; } /* Recreate table with bigger more alloced record-data */ int recreate_table(MI_CHECK *param, MI_INFO **org_info, char *filename) { int error; MI_INFO info; MYISAM_SHARE share; MI_KEYDEF *keyinfo,*key,*key_end; HA_KEYSEG *keysegs,*keyseg; MI_COLUMNDEF *recdef,*rec,*end; MI_UNIQUEDEF *uniquedef,*u_ptr,*u_end; MI_STATUS_INFO status_info; uint unpack,key_parts; ha_rows max_records; ulonglong file_length,tmp_length; MI_CREATE_INFO create_info; DBUG_ENTER("recreate_table"); error=1; /* Default error */ info= **org_info; status_info= (*org_info)->state[0]; info.state= &status_info; share= *(*org_info)->s; unpack= (share.options & HA_OPTION_COMPRESS_RECORD) && (param->testflag & T_UNPACK); if (!(keyinfo=(MI_KEYDEF*) my_alloca(sizeof(MI_KEYDEF)*share.base.keys))) DBUG_RETURN(0); memcpy((uchar*) keyinfo,(uchar*) share.keyinfo, (size_t) (sizeof(MI_KEYDEF)*share.base.keys)); key_parts= share.base.all_key_parts; if (!(keysegs=(HA_KEYSEG*) my_alloca(sizeof(HA_KEYSEG)* (key_parts+share.base.keys)))) { my_afree((uchar*) keyinfo); DBUG_RETURN(1); } if (!(recdef=(MI_COLUMNDEF*) my_alloca(sizeof(MI_COLUMNDEF)*(share.base.fields+1)))) { my_afree((uchar*) keyinfo); my_afree((uchar*) keysegs); DBUG_RETURN(1); } if (!(uniquedef=(MI_UNIQUEDEF*) my_alloca(sizeof(MI_UNIQUEDEF)*(share.state.header.uniques+1)))) { my_afree((uchar*) recdef); my_afree((uchar*) keyinfo); my_afree((uchar*) keysegs); DBUG_RETURN(1); } /* Copy the column definitions */ memcpy((uchar*) recdef,(uchar*) share.rec, (size_t) (sizeof(MI_COLUMNDEF)*(share.base.fields+1))); for (rec=recdef,end=recdef+share.base.fields; rec != end ; rec++) { if (unpack && !(share.options & HA_OPTION_PACK_RECORD) && rec->type != FIELD_BLOB && rec->type != FIELD_VARCHAR && rec->type != FIELD_CHECK) rec->type=(int) FIELD_NORMAL; } /* Change the new key to point at the saved key segments */ memcpy((uchar*) keysegs,(uchar*) share.keyparts, (size_t) (sizeof(HA_KEYSEG)*(key_parts+share.base.keys+ share.state.header.uniques))); keyseg=keysegs; for (key=keyinfo,key_end=keyinfo+share.base.keys; key != key_end ; key++) { key->seg=keyseg; for (; keyseg->type ; keyseg++) { if (param->language) keyseg->language=param->language; /* change language */ } keyseg++; /* Skip end pointer */ } /* Copy the unique definitions and change them to point at the new key segments*/ memcpy((uchar*) uniquedef,(uchar*) share.uniqueinfo, (size_t) (sizeof(MI_UNIQUEDEF)*(share.state.header.uniques))); for (u_ptr=uniquedef,u_end=uniquedef+share.state.header.uniques; u_ptr != u_end ; u_ptr++) { u_ptr->seg=keyseg; keyseg+=u_ptr->keysegs+1; } unpack= (share.options & HA_OPTION_COMPRESS_RECORD) && (param->testflag & T_UNPACK); share.options&= ~HA_OPTION_TEMP_COMPRESS_RECORD; file_length=(ulonglong) mysql_file_seek(info.dfile, 0L, MY_SEEK_END, MYF(0)); tmp_length= file_length+file_length/10; set_if_bigger(file_length,param->max_data_file_length); set_if_bigger(file_length,tmp_length); set_if_bigger(file_length,(ulonglong) share.base.max_data_file_length); if (share.options & HA_OPTION_COMPRESS_RECORD) share.base.records= max_records= info.state->records; else if (!(share.options & HA_OPTION_PACK_RECORD)) max_records= (ha_rows) (file_length / share.base.pack_reclength); else max_records= 0; (void) mi_close(*org_info); bzero((char*) &create_info,sizeof(create_info)); create_info.max_rows= max_records; create_info.reloc_rows=share.base.reloc; create_info.old_options=(share.options | (unpack ? HA_OPTION_TEMP_COMPRESS_RECORD : 0)); create_info.data_file_length=file_length; create_info.auto_increment=share.state.auto_increment; create_info.language = (param->language ? param->language : share.state.header.language); create_info.key_file_length= status_info.key_file_length; /* Allow for creating an auto_increment key. This has an effect only if an auto_increment key exists in the original table. */ create_info.with_auto_increment= TRUE; /* We don't have to handle symlinks here because we are using HA_DONT_TOUCH_DATA */ if (mi_create(filename, share.base.keys - share.state.header.uniques, keyinfo, share.base.fields, recdef, share.state.header.uniques, uniquedef, &create_info, HA_DONT_TOUCH_DATA)) { mi_check_print_error(param,"Got error %d when trying to recreate indexfile",my_errno); goto end; } *org_info=mi_open(filename,O_RDWR, (param->testflag & T_WAIT_FOREVER) ? HA_OPEN_WAIT_IF_LOCKED : (param->testflag & T_DESCRIPT) ? HA_OPEN_IGNORE_IF_LOCKED : HA_OPEN_ABORT_IF_LOCKED); if (!*org_info) { mi_check_print_error(param,"Got error %d when trying to open re-created indexfile", my_errno); goto end; } /* We are modifing */ (*org_info)->s->options&= ~HA_OPTION_READ_ONLY_DATA; (void) _mi_readinfo(*org_info,F_WRLCK,0); (*org_info)->state->records=info.state->records; if (share.state.create_time) (*org_info)->s->state.create_time=share.state.create_time; (*org_info)->s->state.unique=(*org_info)->this_unique= share.state.unique; (*org_info)->state->checksum=info.state->checksum; (*org_info)->state->del=info.state->del; (*org_info)->s->state.dellink=share.state.dellink; (*org_info)->state->empty=info.state->empty; (*org_info)->state->data_file_length=info.state->data_file_length; if (update_state_info(param,*org_info,UPDATE_TIME | UPDATE_STAT | UPDATE_OPEN_COUNT)) goto end; error=0; end: my_afree((uchar*) uniquedef); my_afree((uchar*) keyinfo); my_afree((uchar*) recdef); my_afree((uchar*) keysegs); DBUG_RETURN(error); } /* write suffix to data file if neaded */ int write_data_suffix(SORT_INFO *sort_info, my_bool fix_datafile) { MI_INFO *info=sort_info->info; if (info->s->options & HA_OPTION_COMPRESS_RECORD && fix_datafile) { uchar buff[MEMMAP_EXTRA_MARGIN]; bzero(buff,sizeof(buff)); if (my_b_write(&info->rec_cache,buff,sizeof(buff))) { mi_check_print_error(sort_info->param, "%d when writing to datafile",my_errno); return 1; } sort_info->param->read_cache.end_of_file+=sizeof(buff); } return 0; } /* Update state and myisamchk_time of indexfile */ int update_state_info(MI_CHECK *param, MI_INFO *info,uint update) { MYISAM_SHARE *share=info->s; if (update & UPDATE_OPEN_COUNT) { share->state.open_count=0; share->global_changed=0; } if (update & UPDATE_STAT) { uint i, key_parts= mi_uint2korr(share->state.header.key_parts); share->state.rec_per_key_rows=info->state->records; share->state.changed&= ~STATE_NOT_ANALYZED; if (info->state->records) { for (i=0; i<key_parts; i++) { if (!(share->state.rec_per_key_part[i]=param->rec_per_key_part[i])) share->state.changed|= STATE_NOT_ANALYZED; } } } if (update & (UPDATE_STAT | UPDATE_SORT | UPDATE_TIME | UPDATE_AUTO_INC)) { if (update & UPDATE_TIME) { share->state.check_time= (long) time((time_t*) 0); if (!share->state.create_time) share->state.create_time=share->state.check_time; } /* When tables are locked we haven't synched the share state and the real state for a while so we better do it here before synching the share state to disk. Only when table is write locked is it necessary to perform this synch. */ if (info->lock_type == F_WRLCK) share->state.state= *info->state; if (mi_state_info_write(share->kfile,&share->state,1+2)) goto err; share->changed=0; } { /* Force update of status */ int error; uint r_locks=share->r_locks,w_locks=share->w_locks; share->r_locks= share->w_locks= share->tot_locks= 0; error=_mi_writeinfo(info,WRITEINFO_NO_UNLOCK); share->r_locks=r_locks; share->w_locks=w_locks; share->tot_locks=r_locks+w_locks; if (!error) return 0; } err: mi_check_print_error(param,"%d when updating keyfile",my_errno); return 1; } /* Update auto increment value for a table When setting the 'repair_only' flag we only want to change the old auto_increment value if its wrong (smaller than some given key). The reason is that we shouldn't change the auto_increment value for a table without good reason when only doing a repair; If the user have inserted and deleted rows, the auto_increment value may be bigger than the biggest current row and this is ok. If repair_only is not set, we will update the flag to the value in param->auto_increment is bigger than the biggest key. */ void update_auto_increment_key(MI_CHECK *param, MI_INFO *info, my_bool repair_only) { uchar *record= 0; DBUG_ENTER("update_auto_increment_key"); if (!info->s->base.auto_key || ! mi_is_key_active(info->s->state.key_map, info->s->base.auto_key - 1)) { if (!(param->testflag & T_VERY_SILENT)) mi_check_print_info(param, "Table: %s doesn't have an auto increment key\n", param->isam_file_name); DBUG_VOID_RETURN; } if (!(param->testflag & T_SILENT) && !(param->testflag & T_REP)) printf("Updating MyISAM file: %s\n", param->isam_file_name); /* We have to use an allocated buffer instead of info->rec_buff as _mi_put_key_in_record() may use info->rec_buff */ if (!mi_alloc_rec_buff(info, -1, &record)) { mi_check_print_error(param,"Not enough memory for extra record"); DBUG_VOID_RETURN; } mi_extra(info,HA_EXTRA_KEYREAD,0); if (mi_rlast(info, record, info->s->base.auto_key-1)) { if (my_errno != HA_ERR_END_OF_FILE) { mi_extra(info,HA_EXTRA_NO_KEYREAD,0); my_free(mi_get_rec_buff_ptr(info, record)); mi_check_print_error(param,"%d when reading last record",my_errno); DBUG_VOID_RETURN; } if (!repair_only) info->s->state.auto_increment=param->auto_increment_value; } else { ulonglong auto_increment= retrieve_auto_increment(info, record); set_if_bigger(info->s->state.auto_increment,auto_increment); if (!repair_only) set_if_bigger(info->s->state.auto_increment, param->auto_increment_value); } mi_extra(info,HA_EXTRA_NO_KEYREAD,0); my_free(mi_get_rec_buff_ptr(info, record)); update_state_info(param, info, UPDATE_AUTO_INC); DBUG_VOID_RETURN; } /* Update statistics for each part of an index SYNOPSIS update_key_parts() keyinfo IN Index information (only key->keysegs used) rec_per_key_part OUT Store statistics here unique IN Array of (#distinct tuples) notnull_tuples IN Array of (#tuples), or NULL records Number of records in the table DESCRIPTION This function is called produce index statistics values from unique and notnull_tuples arrays after these arrays were produced with sequential index scan (the scan is done in two places: chk_index() and sort_key_write()). This function handles all 3 index statistics collection methods. Unique is an array: unique[0]= (#different values of {keypart1}) - 1 unique[1]= (#different values of {keypart1,keypart2} tuple)-unique[0]-1 ... For MI_STATS_METHOD_IGNORE_NULLS method, notnull_tuples is an array too: notnull_tuples[0]= (#of {keypart1} tuples such that keypart1 is not NULL) notnull_tuples[1]= (#of {keypart1,keypart2} tuples such that all keypart{i} are not NULL) ... For all other statistics collection methods notnull_tuples==NULL. Output is an array: rec_per_key_part[k] = = E(#records in the table such that keypart_1=c_1 AND ... AND keypart_k=c_k for arbitrary constants c_1 ... c_k) = {assuming that values have uniform distribution and index contains all tuples from the domain (or that {c_1, ..., c_k} tuple is choosen from index tuples} = #tuples-in-the-index / #distinct-tuples-in-the-index. The #tuples-in-the-index and #distinct-tuples-in-the-index have different meaning depending on which statistics collection method is used: MI_STATS_METHOD_* how are nulls compared? which tuples are counted? NULLS_EQUAL NULL == NULL all tuples in table NULLS_NOT_EQUAL NULL != NULL all tuples in table IGNORE_NULLS n/a tuples that don't have NULLs */ void update_key_parts(MI_KEYDEF *keyinfo, ulong *rec_per_key_part, ulonglong *unique, ulonglong *notnull, ulonglong records) { ulonglong count=0,tmp, unique_tuples; ulonglong tuples= records; uint parts; for (parts=0 ; parts < keyinfo->keysegs ; parts++) { count+=unique[parts]; unique_tuples= count + 1; if (notnull) { tuples= notnull[parts]; /* #(unique_tuples not counting tuples with NULLs) = #(unique_tuples counting tuples with NULLs as different) - #(tuples with NULLs) */ unique_tuples -= (records - notnull[parts]); } if (unique_tuples == 0) tmp= 1; else if (count == 0) tmp= tuples; /* 1 unique tuple */ else tmp= (tuples + unique_tuples/2) / unique_tuples; /* for some weird keys (e.g. FULLTEXT) tmp can be <1 here. let's ensure it is not */ set_if_bigger(tmp,1); if (tmp >= (ulonglong) ~(ulong) 0) tmp=(ulonglong) ~(ulong) 0; *rec_per_key_part=(ulong) tmp; rec_per_key_part++; } } static ha_checksum mi_byte_checksum(const uchar *buf, uint length) { ha_checksum crc; const uchar *end=buf+length; for (crc=0; buf != end; buf++) crc=((crc << 1) + *((uchar*) buf)) + test(crc & (((ha_checksum) 1) << (8*sizeof(ha_checksum)-1))); return crc; } static my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows) { uint key_maxlength=key->maxlength; if (key->flag & HA_FULLTEXT) { uint ft_max_word_len_for_sort=FT_MAX_WORD_LEN_FOR_SORT* key->seg->charset->mbmaxlen; key_maxlength+=ft_max_word_len_for_sort-HA_FT_MAXBYTELEN; } return (key->flag & HA_SPATIAL) || (key->flag & (HA_BINARY_PACK_KEY | HA_VAR_LENGTH_KEY | HA_FULLTEXT) && ((ulonglong) rows * key_maxlength > myisam_max_temp_length)); } /* Deactivate all not unique index that can be recreated fast These include packed keys on which sorting will use more temporary space than the max allowed file length or for which the unpacked keys will take much more space than packed keys. Note that 'rows' may be zero for the case when we don't know how many rows we will put into the file. */ void mi_disable_non_unique_index(MI_INFO *info, ha_rows rows) { MYISAM_SHARE *share=info->s; MI_KEYDEF *key=share->keyinfo; uint i; DBUG_ASSERT(info->state->records == 0 && (!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES)); for (i=0 ; i < share->base.keys ; i++,key++) { if (!(key->flag & (HA_NOSAME | HA_SPATIAL | HA_AUTO_KEY)) && ! mi_too_big_key_for_sort(key,rows) && info->s->base.auto_key != i+1) { mi_clear_key_active(share->state.key_map, i); info->update|= HA_STATE_CHANGED; } } } /* Return TRUE if we can use repair by sorting One can set the force argument to force to use sorting even if the temporary file would be quite big! */ my_bool mi_test_if_sort_rep(MI_INFO *info, ha_rows rows, ulonglong key_map, my_bool force) { MYISAM_SHARE *share=info->s; MI_KEYDEF *key=share->keyinfo; uint i; /* mi_repair_by_sort only works if we have at least one key. If we don't have any keys, we should use the normal repair. */ if (! mi_is_any_key_active(key_map)) return FALSE; /* Can't use sort */ for (i=0 ; i < share->base.keys ; i++,key++) { if (!force && mi_too_big_key_for_sort(key,rows)) return FALSE; } return TRUE; } static void set_data_file_type(SORT_INFO *sort_info, MYISAM_SHARE *share) { if ((sort_info->new_data_file_type=share->data_file_type) == COMPRESSED_RECORD && sort_info->param->testflag & T_UNPACK) { MYISAM_SHARE tmp; if (share->options & HA_OPTION_PACK_RECORD) sort_info->new_data_file_type = DYNAMIC_RECORD; else sort_info->new_data_file_type = STATIC_RECORD; /* Set delete_function for sort_delete_record() */ memcpy((char*) &tmp, share, sizeof(*share)); tmp.options= ~HA_OPTION_COMPRESS_RECORD; mi_setup_functions(&tmp); share->delete_record=tmp.delete_record; } } /* Find the first NULL value in index-suffix values tuple SYNOPSIS ha_find_null() keyseg Array of keyparts for key suffix a Key suffix value tuple DESCRIPTION Find the first NULL value in index-suffix values tuple. TODO Consider optimizing this function or its use so we don't search for NULL values in completely NOT NULL index suffixes. RETURN First key part that has NULL as value in values tuple, or the last key part (with keyseg->type==HA_TYPE_END) if values tuple doesn't contain NULLs. */ static HA_KEYSEG *ha_find_null(HA_KEYSEG *keyseg, uchar *a) { for (; (enum ha_base_keytype) keyseg->type != HA_KEYTYPE_END; keyseg++) { uchar *end; if (keyseg->null_bit) { if (!*a++) return keyseg; } end= a+ keyseg->length; switch ((enum ha_base_keytype) keyseg->type) { case HA_KEYTYPE_TEXT: case HA_KEYTYPE_BINARY: case HA_KEYTYPE_BIT: if (keyseg->flag & HA_SPACE_PACK) { int a_length; get_key_length(a_length, a); a += a_length; break; } else a= end; break; case HA_KEYTYPE_VARTEXT1: case HA_KEYTYPE_VARTEXT2: case HA_KEYTYPE_VARBINARY1: case HA_KEYTYPE_VARBINARY2: { int a_length; get_key_length(a_length, a); a+= a_length; break; } case HA_KEYTYPE_NUM: if (keyseg->flag & HA_SPACE_PACK) { int alength= *a++; end= a+alength; } a= end; break; case HA_KEYTYPE_INT8: case HA_KEYTYPE_SHORT_INT: case HA_KEYTYPE_USHORT_INT: case HA_KEYTYPE_LONG_INT: case HA_KEYTYPE_ULONG_INT: case HA_KEYTYPE_INT24: case HA_KEYTYPE_UINT24: #ifdef HAVE_LONG_LONG case HA_KEYTYPE_LONGLONG: case HA_KEYTYPE_ULONGLONG: #endif case HA_KEYTYPE_FLOAT: case HA_KEYTYPE_DOUBLE: a= end; break; case HA_KEYTYPE_END: /* purecov: inspected */ /* keep compiler happy */ DBUG_ASSERT(0); break; } } return keyseg; }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_5222_4
crossvul-cpp_data_bad_1757_0
/* * linux/ipc/msg.c * Copyright (C) 1992 Krishna Balasubramanian * * Removed all the remaining kerneld mess * Catch the -EFAULT stuff properly * Use GFP_KERNEL for messages as in 1.2 * Fixed up the unchecked user space derefs * Copyright (C) 1998 Alan Cox & Andi Kleen * * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com> * * mostly rewritten, threaded and wake-one semantics added * MSGMAX limit removed, sysctl's added * (c) 1999 Manfred Spraul <manfred@colorfullife.com> * * support for audit of ipc object properties and permission changes * Dustin Kirkland <dustin.kirkland@us.ibm.com> * * namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> */ #include <linux/capability.h> #include <linux/msg.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/list.h> #include <linux/security.h> #include <linux/sched.h> #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/seq_file.h> #include <linux/rwsem.h> #include <linux/nsproxy.h> #include <linux/ipc_namespace.h> #include <asm/current.h> #include <linux/uaccess.h> #include "util.h" /* one msg_receiver structure for each sleeping receiver */ struct msg_receiver { struct list_head r_list; struct task_struct *r_tsk; int r_mode; long r_msgtype; long r_maxsize; /* * Mark r_msg volatile so that the compiler * does not try to get smart and optimize * it. We rely on this for the lockless * receive algorithm. */ struct msg_msg *volatile r_msg; }; /* one msg_sender for each sleeping sender */ struct msg_sender { struct list_head list; struct task_struct *tsk; }; #define SEARCH_ANY 1 #define SEARCH_EQUAL 2 #define SEARCH_NOTEQUAL 3 #define SEARCH_LESSEQUAL 4 #define SEARCH_NUMBER 5 #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS]) static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&msg_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct msg_queue, q_perm); } static inline struct msg_queue *msq_obtain_object_check(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&msg_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct msg_queue, q_perm); } static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) { ipc_rmid(&msg_ids(ns), &s->q_perm); } static void msg_rcu_free(struct rcu_head *head) { struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); struct msg_queue *msq = ipc_rcu_to_struct(p); security_msg_queue_free(msq); ipc_rcu_free(head); } /** * newque - Create a new msg queue * @ns: namespace * @params: ptr to the structure that contains the key and msgflg * * Called with msg_ids.rwsem held (writer) */ static int newque(struct ipc_namespace *ns, struct ipc_params *params) { struct msg_queue *msq; int id, retval; key_t key = params->key; int msgflg = params->flg; msq = ipc_rcu_alloc(sizeof(*msq)); if (!msq) return -ENOMEM; msq->q_perm.mode = msgflg & S_IRWXUGO; msq->q_perm.key = key; msq->q_perm.security = NULL; retval = security_msg_queue_alloc(msq); if (retval) { ipc_rcu_putref(msq, ipc_rcu_free); return retval; } /* ipc_addid() locks msq upon success. */ id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); if (id < 0) { ipc_rcu_putref(msq, msg_rcu_free); return id; } msq->q_stime = msq->q_rtime = 0; msq->q_ctime = get_seconds(); msq->q_cbytes = msq->q_qnum = 0; msq->q_qbytes = ns->msg_ctlmnb; msq->q_lspid = msq->q_lrpid = 0; INIT_LIST_HEAD(&msq->q_messages); INIT_LIST_HEAD(&msq->q_receivers); INIT_LIST_HEAD(&msq->q_senders); ipc_unlock_object(&msq->q_perm); rcu_read_unlock(); return msq->q_perm.id; } static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss) { mss->tsk = current; __set_current_state(TASK_INTERRUPTIBLE); list_add_tail(&mss->list, &msq->q_senders); } static inline void ss_del(struct msg_sender *mss) { if (mss->list.next != NULL) list_del(&mss->list); } static void ss_wakeup(struct list_head *h, int kill) { struct msg_sender *mss, *t; list_for_each_entry_safe(mss, t, h, list) { if (kill) mss->list.next = NULL; wake_up_process(mss->tsk); } } static void expunge_all(struct msg_queue *msq, int res) { struct msg_receiver *msr, *t; list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { msr->r_msg = NULL; /* initialize expunge ordering */ wake_up_process(msr->r_tsk); /* * Ensure that the wakeup is visible before setting r_msg as * the receiving end depends on it: either spinning on a nil, * or dealing with -EAGAIN cases. See lockless receive part 1 * and 2 in do_msgrcv(). */ smp_wmb(); /* barrier (B) */ msr->r_msg = ERR_PTR(res); } } /* * freeque() wakes up waiters on the sender and receiver waiting queue, * removes the message queue from message queue ID IDR, and cleans up all the * messages associated with this queue. * * msg_ids.rwsem (writer) and the spinlock for this message queue are held * before freeque() is called. msg_ids.rwsem remains locked on exit. */ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct msg_msg *msg, *t; struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); expunge_all(msq, -EIDRM); ss_wakeup(&msq->q_senders, 1); msg_rmid(ns, msq); ipc_unlock_object(&msq->q_perm); rcu_read_unlock(); list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) { atomic_dec(&ns->msg_hdrs); free_msg(msg); } atomic_sub(msq->q_cbytes, &ns->msg_bytes); ipc_rcu_putref(msq, msg_rcu_free); } /* * Called with msg_ids.rwsem and ipcp locked. */ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg) { struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); return security_msg_queue_associate(msq, msgflg); } SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) { struct ipc_namespace *ns; static const struct ipc_ops msg_ops = { .getnew = newque, .associate = msg_security, }; struct ipc_params msg_params; ns = current->nsproxy->ipc_ns; msg_params.key = key; msg_params.flg = msgflg; return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params); } static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) { switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct msqid_ds out; memset(&out, 0, sizeof(out)); ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm); out.msg_stime = in->msg_stime; out.msg_rtime = in->msg_rtime; out.msg_ctime = in->msg_ctime; if (in->msg_cbytes > USHRT_MAX) out.msg_cbytes = USHRT_MAX; else out.msg_cbytes = in->msg_cbytes; out.msg_lcbytes = in->msg_cbytes; if (in->msg_qnum > USHRT_MAX) out.msg_qnum = USHRT_MAX; else out.msg_qnum = in->msg_qnum; if (in->msg_qbytes > USHRT_MAX) out.msg_qbytes = USHRT_MAX; else out.msg_qbytes = in->msg_qbytes; out.msg_lqbytes = in->msg_qbytes; out.msg_lspid = in->msg_lspid; out.msg_lrpid = in->msg_lrpid; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } static inline unsigned long copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) { switch (version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; return 0; case IPC_OLD: { struct msqid_ds tbuf_old; if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; out->msg_perm.uid = tbuf_old.msg_perm.uid; out->msg_perm.gid = tbuf_old.msg_perm.gid; out->msg_perm.mode = tbuf_old.msg_perm.mode; if (tbuf_old.msg_qbytes == 0) out->msg_qbytes = tbuf_old.msg_lqbytes; else out->msg_qbytes = tbuf_old.msg_qbytes; return 0; } default: return -EINVAL; } } /* * This function handles some msgctl commands which require the rwsem * to be held in write mode. * NOTE: no locks must be held, the rwsem is taken inside this function. */ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, struct msqid_ds __user *buf, int version) { struct kern_ipc_perm *ipcp; struct msqid64_ds uninitialized_var(msqid64); struct msg_queue *msq; int err; if (cmd == IPC_SET) { if (copy_msqid_from_user(&msqid64, buf, version)) return -EFAULT; } down_write(&msg_ids(ns).rwsem); rcu_read_lock(); ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd, &msqid64.msg_perm, msqid64.msg_qbytes); if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); goto out_unlock1; } msq = container_of(ipcp, struct msg_queue, q_perm); err = security_msg_queue_msgctl(msq, cmd); if (err) goto out_unlock1; switch (cmd) { case IPC_RMID: ipc_lock_object(&msq->q_perm); /* freeque unlocks the ipc object and rcu */ freeque(ns, ipcp); goto out_up; case IPC_SET: if (msqid64.msg_qbytes > ns->msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) { err = -EPERM; goto out_unlock1; } ipc_lock_object(&msq->q_perm); err = ipc_update_perm(&msqid64.msg_perm, ipcp); if (err) goto out_unlock0; msq->q_qbytes = msqid64.msg_qbytes; msq->q_ctime = get_seconds(); /* sleeping receivers might be excluded by * stricter permissions. */ expunge_all(msq, -EAGAIN); /* sleeping senders might be able to send * due to a larger queue size. */ ss_wakeup(&msq->q_senders, 0); break; default: err = -EINVAL; goto out_unlock1; } out_unlock0: ipc_unlock_object(&msq->q_perm); out_unlock1: rcu_read_unlock(); out_up: up_write(&msg_ids(ns).rwsem); return err; } static int msgctl_nolock(struct ipc_namespace *ns, int msqid, int cmd, int version, void __user *buf) { int err; struct msg_queue *msq; switch (cmd) { case IPC_INFO: case MSG_INFO: { struct msginfo msginfo; int max_id; if (!buf) return -EFAULT; /* * We must not return kernel stack data. * due to padding, it's not enough * to set all member fields. */ err = security_msg_queue_msgctl(NULL, cmd); if (err) return err; memset(&msginfo, 0, sizeof(msginfo)); msginfo.msgmni = ns->msg_ctlmni; msginfo.msgmax = ns->msg_ctlmax; msginfo.msgmnb = ns->msg_ctlmnb; msginfo.msgssz = MSGSSZ; msginfo.msgseg = MSGSEG; down_read(&msg_ids(ns).rwsem); if (cmd == MSG_INFO) { msginfo.msgpool = msg_ids(ns).in_use; msginfo.msgmap = atomic_read(&ns->msg_hdrs); msginfo.msgtql = atomic_read(&ns->msg_bytes); } else { msginfo.msgmap = MSGMAP; msginfo.msgpool = MSGPOOL; msginfo.msgtql = MSGTQL; } max_id = ipc_get_maxid(&msg_ids(ns)); up_read(&msg_ids(ns).rwsem); if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) return -EFAULT; return (max_id < 0) ? 0 : max_id; } case MSG_STAT: case IPC_STAT: { struct msqid64_ds tbuf; int success_return; if (!buf) return -EFAULT; memset(&tbuf, 0, sizeof(tbuf)); rcu_read_lock(); if (cmd == MSG_STAT) { msq = msq_obtain_object(ns, msqid); if (IS_ERR(msq)) { err = PTR_ERR(msq); goto out_unlock; } success_return = msq->q_perm.id; } else { msq = msq_obtain_object_check(ns, msqid); if (IS_ERR(msq)) { err = PTR_ERR(msq); goto out_unlock; } success_return = 0; } err = -EACCES; if (ipcperms(ns, &msq->q_perm, S_IRUGO)) goto out_unlock; err = security_msg_queue_msgctl(msq, cmd); if (err) goto out_unlock; kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm); tbuf.msg_stime = msq->q_stime; tbuf.msg_rtime = msq->q_rtime; tbuf.msg_ctime = msq->q_ctime; tbuf.msg_cbytes = msq->q_cbytes; tbuf.msg_qnum = msq->q_qnum; tbuf.msg_qbytes = msq->q_qbytes; tbuf.msg_lspid = msq->q_lspid; tbuf.msg_lrpid = msq->q_lrpid; rcu_read_unlock(); if (copy_msqid_to_user(buf, &tbuf, version)) return -EFAULT; return success_return; } default: return -EINVAL; } return err; out_unlock: rcu_read_unlock(); return err; } SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) { int version; struct ipc_namespace *ns; if (msqid < 0 || cmd < 0) return -EINVAL; version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; switch (cmd) { case IPC_INFO: case MSG_INFO: case MSG_STAT: /* msqid is an index rather than a msg queue id */ case IPC_STAT: return msgctl_nolock(ns, msqid, cmd, version, buf); case IPC_SET: case IPC_RMID: return msgctl_down(ns, msqid, cmd, buf, version); default: return -EINVAL; } } static int testmsg(struct msg_msg *msg, long type, int mode) { switch (mode) { case SEARCH_ANY: case SEARCH_NUMBER: return 1; case SEARCH_LESSEQUAL: if (msg->m_type <= type) return 1; break; case SEARCH_EQUAL: if (msg->m_type == type) return 1; break; case SEARCH_NOTEQUAL: if (msg->m_type != type) return 1; break; } return 0; } static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) { struct msg_receiver *msr, *t; list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { if (testmsg(msg, msr->r_msgtype, msr->r_mode) && !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { list_del(&msr->r_list); if (msr->r_maxsize < msg->m_ts) { /* initialize pipelined send ordering */ msr->r_msg = NULL; wake_up_process(msr->r_tsk); /* barrier (B) see barrier comment below */ smp_wmb(); msr->r_msg = ERR_PTR(-E2BIG); } else { msr->r_msg = NULL; msq->q_lrpid = task_pid_vnr(msr->r_tsk); msq->q_rtime = get_seconds(); wake_up_process(msr->r_tsk); /* * Ensure that the wakeup is visible before * setting r_msg, as the receiving can otherwise * exit - once r_msg is set, the receiver can * continue. See lockless receive part 1 and 2 * in do_msgrcv(). Barrier (B). */ smp_wmb(); msr->r_msg = msg; return 1; } } } return 0; } long do_msgsnd(int msqid, long mtype, void __user *mtext, size_t msgsz, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; int err; struct ipc_namespace *ns; ns = current->nsproxy->ipc_ns; if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0) return -EINVAL; if (mtype < 1) return -EINVAL; msg = load_msg(mtext, msgsz); if (IS_ERR(msg)) return PTR_ERR(msg); msg->m_type = mtype; msg->m_ts = msgsz; rcu_read_lock(); msq = msq_obtain_object_check(ns, msqid); if (IS_ERR(msq)) { err = PTR_ERR(msq); goto out_unlock1; } ipc_lock_object(&msq->q_perm); for (;;) { struct msg_sender s; err = -EACCES; if (ipcperms(ns, &msq->q_perm, S_IWUGO)) goto out_unlock0; /* raced with RMID? */ if (!ipc_valid_object(&msq->q_perm)) { err = -EIDRM; goto out_unlock0; } err = security_msg_queue_msgsnd(msq, msg, msgflg); if (err) goto out_unlock0; if (msgsz + msq->q_cbytes <= msq->q_qbytes && 1 + msq->q_qnum <= msq->q_qbytes) { break; } /* queue full, wait: */ if (msgflg & IPC_NOWAIT) { err = -EAGAIN; goto out_unlock0; } /* enqueue the sender and prepare to block */ ss_add(msq, &s); if (!ipc_rcu_getref(msq)) { err = -EIDRM; goto out_unlock0; } ipc_unlock_object(&msq->q_perm); rcu_read_unlock(); schedule(); rcu_read_lock(); ipc_lock_object(&msq->q_perm); ipc_rcu_putref(msq, ipc_rcu_free); /* raced with RMID? */ if (!ipc_valid_object(&msq->q_perm)) { err = -EIDRM; goto out_unlock0; } ss_del(&s); if (signal_pending(current)) { err = -ERESTARTNOHAND; goto out_unlock0; } } msq->q_lspid = task_tgid_vnr(current); msq->q_stime = get_seconds(); if (!pipelined_send(msq, msg)) { /* no one is waiting for this message, enqueue it */ list_add_tail(&msg->m_list, &msq->q_messages); msq->q_cbytes += msgsz; msq->q_qnum++; atomic_add(msgsz, &ns->msg_bytes); atomic_inc(&ns->msg_hdrs); } err = 0; msg = NULL; out_unlock0: ipc_unlock_object(&msq->q_perm); out_unlock1: rcu_read_unlock(); if (msg != NULL) free_msg(msg); return err; } SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, int, msgflg) { long mtype; if (get_user(mtype, &msgp->mtype)) return -EFAULT; return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg); } static inline int convert_mode(long *msgtyp, int msgflg) { if (msgflg & MSG_COPY) return SEARCH_NUMBER; /* * find message of correct type. * msgtyp = 0 => get first. * msgtyp > 0 => get first message of matching type. * msgtyp < 0 => get message with least type must be < abs(msgtype). */ if (*msgtyp == 0) return SEARCH_ANY; if (*msgtyp < 0) { *msgtyp = -*msgtyp; return SEARCH_LESSEQUAL; } if (msgflg & MSG_EXCEPT) return SEARCH_NOTEQUAL; return SEARCH_EQUAL; } static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz) { struct msgbuf __user *msgp = dest; size_t msgsz; if (put_user(msg->m_type, &msgp->mtype)) return -EFAULT; msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz; if (store_msg(msgp->mtext, msg, msgsz)) return -EFAULT; return msgsz; } #ifdef CONFIG_CHECKPOINT_RESTORE /* * This function creates new kernel message structure, large enough to store * bufsz message bytes. */ static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz) { struct msg_msg *copy; /* * Create dummy message to copy real message to. */ copy = load_msg(buf, bufsz); if (!IS_ERR(copy)) copy->m_ts = bufsz; return copy; } static inline void free_copy(struct msg_msg *copy) { if (copy) free_msg(copy); } #else static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz) { return ERR_PTR(-ENOSYS); } static inline void free_copy(struct msg_msg *copy) { } #endif static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode) { struct msg_msg *msg, *found = NULL; long count = 0; list_for_each_entry(msg, &msq->q_messages, m_list) { if (testmsg(msg, *msgtyp, mode) && !security_msg_queue_msgrcv(msq, msg, current, *msgtyp, mode)) { if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) { *msgtyp = msg->m_type - 1; found = msg; } else if (mode == SEARCH_NUMBER) { if (*msgtyp == count) return msg; } else return msg; count++; } } return found ?: ERR_PTR(-EAGAIN); } long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg, long (*msg_handler)(void __user *, struct msg_msg *, size_t)) { int mode; struct msg_queue *msq; struct ipc_namespace *ns; struct msg_msg *msg, *copy = NULL; ns = current->nsproxy->ipc_ns; if (msqid < 0 || (long) bufsz < 0) return -EINVAL; if (msgflg & MSG_COPY) { if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT)) return -EINVAL; copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax)); if (IS_ERR(copy)) return PTR_ERR(copy); } mode = convert_mode(&msgtyp, msgflg); rcu_read_lock(); msq = msq_obtain_object_check(ns, msqid); if (IS_ERR(msq)) { rcu_read_unlock(); free_copy(copy); return PTR_ERR(msq); } for (;;) { struct msg_receiver msr_d; msg = ERR_PTR(-EACCES); if (ipcperms(ns, &msq->q_perm, S_IRUGO)) goto out_unlock1; ipc_lock_object(&msq->q_perm); /* raced with RMID? */ if (!ipc_valid_object(&msq->q_perm)) { msg = ERR_PTR(-EIDRM); goto out_unlock0; } msg = find_msg(msq, &msgtyp, mode); if (!IS_ERR(msg)) { /* * Found a suitable message. * Unlink it from the queue. */ if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { msg = ERR_PTR(-E2BIG); goto out_unlock0; } /* * If we are copying, then do not unlink message and do * not update queue parameters. */ if (msgflg & MSG_COPY) { msg = copy_msg(msg, copy); goto out_unlock0; } list_del(&msg->m_list); msq->q_qnum--; msq->q_rtime = get_seconds(); msq->q_lrpid = task_tgid_vnr(current); msq->q_cbytes -= msg->m_ts; atomic_sub(msg->m_ts, &ns->msg_bytes); atomic_dec(&ns->msg_hdrs); ss_wakeup(&msq->q_senders, 0); goto out_unlock0; } /* No message waiting. Wait for a message */ if (msgflg & IPC_NOWAIT) { msg = ERR_PTR(-ENOMSG); goto out_unlock0; } list_add_tail(&msr_d.r_list, &msq->q_receivers); msr_d.r_tsk = current; msr_d.r_msgtype = msgtyp; msr_d.r_mode = mode; if (msgflg & MSG_NOERROR) msr_d.r_maxsize = INT_MAX; else msr_d.r_maxsize = bufsz; msr_d.r_msg = ERR_PTR(-EAGAIN); __set_current_state(TASK_INTERRUPTIBLE); ipc_unlock_object(&msq->q_perm); rcu_read_unlock(); schedule(); /* Lockless receive, part 1: * Disable preemption. We don't hold a reference to the queue * and getting a reference would defeat the idea of a lockless * operation, thus the code relies on rcu to guarantee the * existence of msq: * Prior to destruction, expunge_all(-EIRDM) changes r_msg. * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. * rcu_read_lock() prevents preemption between reading r_msg * and acquiring the q_perm.lock in ipc_lock_object(). */ rcu_read_lock(); /* Lockless receive, part 2: * Wait until pipelined_send or expunge_all are outside of * wake_up_process(). There is a race with exit(), see * ipc/mqueue.c for the details. The correct serialization * ensures that a receiver cannot continue without the wakeup * being visibible _before_ setting r_msg: * * CPU 0 CPU 1 * <loop receiver> * smp_rmb(); (A) <-- pair -. <waker thread> * <load ->r_msg> | msr->r_msg = NULL; * | wake_up_process(); * <continue> `------> smp_wmb(); (B) * msr->r_msg = msg; * * Where (A) orders the message value read and where (B) orders * the write to the r_msg -- done in both pipelined_send and * expunge_all. */ for (;;) { /* * Pairs with writer barrier in pipelined_send * or expunge_all. */ smp_rmb(); /* barrier (A) */ msg = (struct msg_msg *)msr_d.r_msg; if (msg) break; /* * The cpu_relax() call is a compiler barrier * which forces everything in this loop to be * re-loaded. */ cpu_relax(); } /* Lockless receive, part 3: * If there is a message or an error then accept it without * locking. */ if (msg != ERR_PTR(-EAGAIN)) goto out_unlock1; /* Lockless receive, part 3: * Acquire the queue spinlock. */ ipc_lock_object(&msq->q_perm); /* Lockless receive, part 4: * Repeat test after acquiring the spinlock. */ msg = (struct msg_msg *)msr_d.r_msg; if (msg != ERR_PTR(-EAGAIN)) goto out_unlock0; list_del(&msr_d.r_list); if (signal_pending(current)) { msg = ERR_PTR(-ERESTARTNOHAND); goto out_unlock0; } ipc_unlock_object(&msq->q_perm); } out_unlock0: ipc_unlock_object(&msq->q_perm); out_unlock1: rcu_read_unlock(); if (IS_ERR(msg)) { free_copy(copy); return PTR_ERR(msg); } bufsz = msg_handler(buf, msg, bufsz); free_msg(msg); return bufsz; } SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, long, msgtyp, int, msgflg) { return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill); } void msg_init_ns(struct ipc_namespace *ns) { ns->msg_ctlmax = MSGMAX; ns->msg_ctlmnb = MSGMNB; ns->msg_ctlmni = MSGMNI; atomic_set(&ns->msg_bytes, 0); atomic_set(&ns->msg_hdrs, 0); ipc_init_ids(&ns->ids[IPC_MSG_IDS]); } #ifdef CONFIG_IPC_NS void msg_exit_ns(struct ipc_namespace *ns) { free_ipcs(ns, &msg_ids(ns), freeque); idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr); } #endif #ifdef CONFIG_PROC_FS static int sysvipc_msg_proc_show(struct seq_file *s, void *it) { struct user_namespace *user_ns = seq_user_ns(s); struct msg_queue *msq = it; seq_printf(s, "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n", msq->q_perm.key, msq->q_perm.id, msq->q_perm.mode, msq->q_cbytes, msq->q_qnum, msq->q_lspid, msq->q_lrpid, from_kuid_munged(user_ns, msq->q_perm.uid), from_kgid_munged(user_ns, msq->q_perm.gid), from_kuid_munged(user_ns, msq->q_perm.cuid), from_kgid_munged(user_ns, msq->q_perm.cgid), msq->q_stime, msq->q_rtime, msq->q_ctime); return 0; } #endif void __init msg_init(void) { msg_init_ns(&init_ipc_ns); ipc_init_proc_interface("sysvipc/msg", " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", IPC_MSG_IDS, sysvipc_msg_proc_show); }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_1757_0
crossvul-cpp_data_bad_3059_0
/* su for Linux. Run a shell with substitute user and group IDs. Copyright (C) 1992-2006 Free Software Foundation, Inc. Copyright (C) 2012 SUSE Linux Products GmbH, Nuernberg This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Run a shell with the real and effective UID and GID and groups of USER, default `root'. The shell run is taken from USER's password entry, /bin/sh if none is specified there. If the account has a password, su prompts for a password unless run by a user with real UID 0. Does not change the current directory. Sets `HOME' and `SHELL' from the password entry for USER, and if USER is not root, sets `USER' and `LOGNAME' to USER. The subshell is not a login shell. If one or more ARGs are given, they are passed as additional arguments to the subshell. Does not handle /bin/sh or other shells specially (setting argv[0] to "-su", passing -c only to certain shells, etc.). I don't see the point in doing that, and it's ugly. Based on an implementation by David MacKenzie <djm@gnu.ai.mit.edu>. */ enum { EXIT_CANNOT_INVOKE = 126, EXIT_ENOENT = 127 }; #include <config.h> #include <stdio.h> #include <getopt.h> #include <sys/types.h> #include <pwd.h> #include <grp.h> #include <security/pam_appl.h> #ifdef HAVE_SECURITY_PAM_MISC_H # include <security/pam_misc.h> #elif defined(HAVE_SECURITY_OPENPAM_H) # include <security/openpam.h> #endif #include <signal.h> #include <sys/wait.h> #include <syslog.h> #include <utmpx.h> #include "err.h" #include <stdbool.h> #include "c.h" #include "xalloc.h" #include "nls.h" #include "pathnames.h" #include "env.h" #include "closestream.h" #include "strutils.h" #include "ttyutils.h" /* name of the pam configuration files. separate configs for su and su - */ #define PAM_SRVNAME_SU "su" #define PAM_SRVNAME_SU_L "su-l" #define PAM_SRVNAME_RUNUSER "runuser" #define PAM_SRVNAME_RUNUSER_L "runuser-l" #define _PATH_LOGINDEFS_SU "/etc/default/su" #define _PATH_LOGINDEFS_RUNUSER "/etc/default/runuser" #define is_pam_failure(_rc) ((_rc) != PAM_SUCCESS) #include "logindefs.h" #include "su-common.h" /* The shell to run if none is given in the user's passwd entry. */ #define DEFAULT_SHELL "/bin/sh" /* The user to become if none is specified. */ #define DEFAULT_USER "root" #ifndef HAVE_ENVIRON_DECL extern char **environ; #endif static void run_shell (char const *, char const *, char **, size_t) __attribute__ ((__noreturn__)); /* If true, pass the `-f' option to the subshell. */ static bool fast_startup; /* If true, simulate a login instead of just starting a shell. */ static bool simulate_login; /* If true, change some environment vars to indicate the user su'd to. */ static bool change_environment; /* If true, then don't call setsid() with a command. */ static int same_session = 0; /* SU_MODE_{RUNUSER,SU} */ static int su_mode; /* Don't print PAM info messages (Last login, etc.). */ static int suppress_pam_info; static bool _pam_session_opened; static bool _pam_cred_established; static sig_atomic_t volatile caught_signal = false; static pam_handle_t *pamh = NULL; static int restricted = 1; /* zero for root user */ static struct passwd * current_getpwuid(void) { uid_t ruid; /* GNU Hurd implementation has an extension where a process can exist in a * non-conforming environment, and thus be outside the realms of POSIX * process identifiers; on this platform, getuid() fails with a status of * (uid_t)(-1) and sets errno if a program is run from a non-conforming * environment. * * http://austingroupbugs.net/view.php?id=511 */ errno = 0; ruid = getuid (); return errno == 0 ? getpwuid (ruid) : NULL; } /* Log the fact that someone has run su to the user given by PW; if SUCCESSFUL is true, they gave the correct password, etc. */ static void log_syslog(struct passwd const *pw, bool successful) { const char *new_user, *old_user, *tty; new_user = pw->pw_name; /* The utmp entry (via getlogin) is probably the best way to identify the user, especially if someone su's from a su-shell. */ old_user = getlogin (); if (!old_user) { /* getlogin can fail -- usually due to lack of utmp entry. Resort to getpwuid. */ struct passwd *pwd = current_getpwuid(); old_user = pwd ? pwd->pw_name : ""; } if (get_terminal_name(NULL, &tty, NULL) != 0 || !tty) tty = "none"; openlog (program_invocation_short_name, 0 , LOG_AUTH); syslog (LOG_NOTICE, "%s(to %s) %s on %s", successful ? "" : su_mode == RUNUSER_MODE ? "FAILED RUNUSER " : "FAILED SU ", new_user, old_user, tty); closelog (); } /* * Log failed login attempts in _PATH_BTMP if that exists. */ static void log_btmp(struct passwd const *pw) { struct utmpx ut; struct timeval tv; const char *tty_name, *tty_num; memset(&ut, 0, sizeof(ut)); strncpy(ut.ut_user, pw && pw->pw_name ? pw->pw_name : "(unknown)", sizeof(ut.ut_user)); get_terminal_name(NULL, &tty_name, &tty_num); if (tty_num) xstrncpy(ut.ut_id, tty_num, sizeof(ut.ut_id)); if (tty_name) xstrncpy(ut.ut_line, tty_name, sizeof(ut.ut_line)); gettimeofday(&tv, NULL); ut.ut_tv.tv_sec = tv.tv_sec; ut.ut_tv.tv_usec = tv.tv_usec; ut.ut_type = LOGIN_PROCESS; /* XXX doesn't matter */ ut.ut_pid = getpid(); updwtmpx(_PATH_BTMP, &ut); } static int su_pam_conv(int num_msg, const struct pam_message **msg, struct pam_response **resp, void *appdata_ptr) { if (suppress_pam_info && num_msg == 1 && msg && msg[0]->msg_style == PAM_TEXT_INFO) return PAM_SUCCESS; #ifdef HAVE_SECURITY_PAM_MISC_H return misc_conv(num_msg, msg, resp, appdata_ptr); #elif defined(HAVE_SECURITY_OPENPAM_H) return openpam_ttyconv(num_msg, msg, resp, appdata_ptr); #endif } static struct pam_conv conv = { su_pam_conv, NULL }; static void cleanup_pam (int retcode) { int saved_errno = errno; if (_pam_session_opened) pam_close_session (pamh, 0); if (_pam_cred_established) pam_setcred (pamh, PAM_DELETE_CRED | PAM_SILENT); pam_end(pamh, retcode); errno = saved_errno; } /* Signal handler for parent process. */ static void su_catch_sig (int sig) { caught_signal = sig; } /* Export env variables declared by PAM modules. */ static void export_pamenv (void) { char **env; /* This is a copy but don't care to free as we exec later anyways. */ env = pam_getenvlist (pamh); while (env && *env) { if (putenv (*env) != 0) err (EXIT_FAILURE, NULL); env++; } } static void create_watching_parent (void) { pid_t child; sigset_t ourset; struct sigaction oldact[3]; int status = 0; int retval; retval = pam_open_session (pamh, 0); if (is_pam_failure(retval)) { cleanup_pam (retval); errx (EXIT_FAILURE, _("cannot open session: %s"), pam_strerror (pamh, retval)); } else _pam_session_opened = 1; memset(oldact, 0, sizeof(oldact)); child = fork (); if (child == (pid_t) -1) { cleanup_pam (PAM_ABORT); err (EXIT_FAILURE, _("cannot create child process")); } /* the child proceeds to run the shell */ if (child == 0) return; /* In the parent watch the child. */ /* su without pam support does not have a helper that keeps sitting on any directory so let's go to /. */ if (chdir ("/") != 0) warn (_("cannot change directory to %s"), "/"); sigfillset (&ourset); if (sigprocmask (SIG_BLOCK, &ourset, NULL)) { warn (_("cannot block signals")); caught_signal = true; } if (!caught_signal) { struct sigaction action; action.sa_handler = su_catch_sig; sigemptyset (&action.sa_mask); action.sa_flags = 0; sigemptyset (&ourset); if (!same_session) { if (sigaddset(&ourset, SIGINT) || sigaddset(&ourset, SIGQUIT)) { warn (_("cannot set signal handler")); caught_signal = true; } } if (!caught_signal && (sigaddset(&ourset, SIGTERM) || sigaddset(&ourset, SIGALRM) || sigaction(SIGTERM, &action, &oldact[0]) || sigprocmask(SIG_UNBLOCK, &ourset, NULL))) { warn (_("cannot set signal handler")); caught_signal = true; } if (!caught_signal && !same_session && (sigaction(SIGINT, &action, &oldact[1]) || sigaction(SIGQUIT, &action, &oldact[2]))) { warn (_("cannot set signal handler")); caught_signal = true; } } if (!caught_signal) { pid_t pid; for (;;) { pid = waitpid (child, &status, WUNTRACED); if (pid != (pid_t)-1 && WIFSTOPPED (status)) { kill (getpid (), SIGSTOP); /* once we get here, we must have resumed */ kill (pid, SIGCONT); } else break; } if (pid != (pid_t)-1) { if (WIFSIGNALED (status)) { fprintf (stderr, "%s%s\n", strsignal (WTERMSIG (status)), WCOREDUMP (status) ? _(" (core dumped)") : ""); status = WTERMSIG (status) + 128; } else status = WEXITSTATUS (status); } else if (caught_signal) status = caught_signal + 128; else status = 1; } else status = 1; if (caught_signal) { fprintf (stderr, _("\nSession terminated, killing shell...")); kill (child, SIGTERM); } cleanup_pam (PAM_SUCCESS); if (caught_signal) { sleep (2); kill (child, SIGKILL); fprintf (stderr, _(" ...killed.\n")); /* Let's terminate itself with the received signal. * * It seems that shells use WIFSIGNALED() rather than our exit status * value to detect situations when is necessary to cleanup (reset) * terminal settings (kzak -- Jun 2013). */ switch (caught_signal) { case SIGTERM: sigaction(SIGTERM, &oldact[0], NULL); break; case SIGINT: sigaction(SIGINT, &oldact[1], NULL); break; case SIGQUIT: sigaction(SIGQUIT, &oldact[2], NULL); break; default: /* just in case that signal stuff initialization failed and * caught_signal = true */ caught_signal = SIGKILL; break; } kill(getpid(), caught_signal); } exit (status); } static void authenticate (const struct passwd *pw) { const struct passwd *lpw = NULL; const char *cp, *srvname = NULL; int retval; switch (su_mode) { case SU_MODE: srvname = simulate_login ? PAM_SRVNAME_SU_L : PAM_SRVNAME_SU; break; case RUNUSER_MODE: srvname = simulate_login ? PAM_SRVNAME_RUNUSER_L : PAM_SRVNAME_RUNUSER; break; default: abort(); break; } retval = pam_start (srvname, pw->pw_name, &conv, &pamh); if (is_pam_failure(retval)) goto done; if (isatty (0) && (cp = ttyname (0)) != NULL) { const char *tty; if (strncmp (cp, "/dev/", 5) == 0) tty = cp + 5; else tty = cp; retval = pam_set_item (pamh, PAM_TTY, tty); if (is_pam_failure(retval)) goto done; } lpw = current_getpwuid (); if (lpw && lpw->pw_name) { retval = pam_set_item (pamh, PAM_RUSER, (const void *) lpw->pw_name); if (is_pam_failure(retval)) goto done; } if (su_mode == RUNUSER_MODE) { /* * This is the only difference between runuser(1) and su(1). The command * runuser(1) does not required authentication, because user is root. */ if (restricted) errx(EXIT_FAILURE, _("may not be used by non-root users")); return; } retval = pam_authenticate (pamh, 0); if (is_pam_failure(retval)) goto done; retval = pam_acct_mgmt (pamh, 0); if (retval == PAM_NEW_AUTHTOK_REQD) { /* Password has expired. Offer option to change it. */ retval = pam_chauthtok (pamh, PAM_CHANGE_EXPIRED_AUTHTOK); } done: log_syslog(pw, !is_pam_failure(retval)); if (is_pam_failure(retval)) { const char *msg; log_btmp(pw); msg = pam_strerror(pamh, retval); pam_end(pamh, retval); sleep (getlogindefs_num ("FAIL_DELAY", 1)); errx (EXIT_FAILURE, "%s", msg?msg:_("incorrect password")); } } static void set_path(const struct passwd* pw) { int r; if (pw->pw_uid) r = logindefs_setenv("PATH", "ENV_PATH", _PATH_DEFPATH); else if ((r = logindefs_setenv("PATH", "ENV_ROOTPATH", NULL)) != 0) r = logindefs_setenv("PATH", "ENV_SUPATH", _PATH_DEFPATH_ROOT); if (r != 0) err (EXIT_FAILURE, _("failed to set the %s environment variable"), "PATH"); } /* Update `environ' for the new shell based on PW, with SHELL being the value for the SHELL environment variable. */ static void modify_environment (const struct passwd *pw, const char *shell) { if (simulate_login) { /* Leave TERM unchanged. Set HOME, SHELL, USER, LOGNAME, PATH. Unset all other environment variables. */ char *term = getenv ("TERM"); if (term) term = xstrdup (term); environ = xmalloc ((6 + !!term) * sizeof (char *)); environ[0] = NULL; if (term) { xsetenv ("TERM", term, 1); free(term); } xsetenv ("HOME", pw->pw_dir, 1); if (shell) xsetenv ("SHELL", shell, 1); xsetenv ("USER", pw->pw_name, 1); xsetenv ("LOGNAME", pw->pw_name, 1); set_path(pw); } else { /* Set HOME, SHELL, and (if not becoming a superuser) USER and LOGNAME. */ if (change_environment) { xsetenv ("HOME", pw->pw_dir, 1); if (shell) xsetenv ("SHELL", shell, 1); if (getlogindefs_bool ("ALWAYS_SET_PATH", 0)) set_path(pw); if (pw->pw_uid) { xsetenv ("USER", pw->pw_name, 1); xsetenv ("LOGNAME", pw->pw_name, 1); } } } export_pamenv (); } /* Become the user and group(s) specified by PW. */ static void init_groups (const struct passwd *pw, gid_t *groups, size_t num_groups) { int retval; errno = 0; if (num_groups) retval = setgroups (num_groups, groups); else retval = initgroups (pw->pw_name, pw->pw_gid); if (retval == -1) { cleanup_pam (PAM_ABORT); err (EXIT_FAILURE, _("cannot set groups")); } endgrent (); retval = pam_setcred (pamh, PAM_ESTABLISH_CRED); if (is_pam_failure(retval)) errx (EXIT_FAILURE, "%s", pam_strerror (pamh, retval)); else _pam_cred_established = 1; } static void change_identity (const struct passwd *pw) { if (setgid (pw->pw_gid)) err (EXIT_FAILURE, _("cannot set group id")); if (setuid (pw->pw_uid)) err (EXIT_FAILURE, _("cannot set user id")); } /* Run SHELL, or DEFAULT_SHELL if SHELL is empty. If COMMAND is nonzero, pass it to the shell with the -c option. Pass ADDITIONAL_ARGS to the shell as more arguments; there are N_ADDITIONAL_ARGS extra arguments. */ static void run_shell (char const *shell, char const *command, char **additional_args, size_t n_additional_args) { size_t n_args = 1 + fast_startup + 2 * !!command + n_additional_args + 1; char const **args = xcalloc (n_args, sizeof *args); size_t argno = 1; if (simulate_login) { char *arg0; char *shell_basename; shell_basename = basename (shell); arg0 = xmalloc (strlen (shell_basename) + 2); arg0[0] = '-'; strcpy (arg0 + 1, shell_basename); args[0] = arg0; } else args[0] = basename (shell); if (fast_startup) args[argno++] = "-f"; if (command) { args[argno++] = "-c"; args[argno++] = command; } memcpy (args + argno, additional_args, n_additional_args * sizeof *args); args[argno + n_additional_args] = NULL; execv (shell, (char **) args); { int exit_status = (errno == ENOENT ? EXIT_ENOENT : EXIT_CANNOT_INVOKE); warn (_("failed to execute %s"), shell); exit (exit_status); } } /* Return true if SHELL is a restricted shell (one not returned by getusershell), else false, meaning it is a standard shell. */ static bool restricted_shell (const char *shell) { char *line; setusershell (); while ((line = getusershell ()) != NULL) { if (*line != '#' && !strcmp (line, shell)) { endusershell (); return false; } } endusershell (); return true; } static void __attribute__((__noreturn__)) usage (int status) { if (su_mode == RUNUSER_MODE) { fputs(USAGE_HEADER, stdout); printf (_(" %s [options] -u <user> [[--] <command>]\n"), program_invocation_short_name); printf (_(" %s [options] [-] [<user> [<argument>...]]\n"), program_invocation_short_name); fputs (_("\n" "Run <command> with the effective user ID and group ID of <user>. If -u is\n" "not given, fall back to su(1)-compatible semantics and execute standard shell.\n" "The options -c, -f, -l, and -s are mutually exclusive with -u.\n"), stdout); fputs(USAGE_OPTIONS, stdout); fputs (_(" -u, --user <user> username\n"), stdout); } else { fputs(USAGE_HEADER, stdout); printf (_(" %s [options] [-] [<user> [<argument>...]]\n"), program_invocation_short_name); fputs (_("\n" "Change the effective user ID and group ID to that of <user>.\n" "A mere - implies -l. If <user> is not given, root is assumed.\n"), stdout); fputs(USAGE_OPTIONS, stdout); } fputs (_(" -m, -p, --preserve-environment do not reset environment variables\n"), stdout); fputs (_(" -g, --group <group> specify the primary group\n"), stdout); fputs (_(" -G, --supp-group <group> specify a supplemental group\n\n"), stdout); fputs (_(" -, -l, --login make the shell a login shell\n"), stdout); fputs (_(" -c, --command <command> pass a single command to the shell with -c\n"), stdout); fputs (_(" --session-command <command> pass a single command to the shell with -c\n" " and do not create a new session\n"), stdout); fputs (_(" -f, --fast pass -f to the shell (for csh or tcsh)\n"), stdout); fputs (_(" -s, --shell <shell> run <shell> if /etc/shells allows it\n"), stdout); fputs(USAGE_SEPARATOR, stdout); fputs(USAGE_HELP, stdout); fputs(USAGE_VERSION, stdout); printf(USAGE_MAN_TAIL(su_mode == SU_MODE ? "su(1)" : "runuser(1)")); exit (status); } static void load_config(void) { switch (su_mode) { case SU_MODE: logindefs_load_file(_PATH_LOGINDEFS_SU); break; case RUNUSER_MODE: logindefs_load_file(_PATH_LOGINDEFS_RUNUSER); break; } logindefs_load_file(_PATH_LOGINDEFS); } /* * Returns 1 if the current user is not root */ static int evaluate_uid(void) { uid_t ruid = getuid(); uid_t euid = geteuid(); /* if we're really root and aren't running setuid */ return (uid_t) 0 == ruid && ruid == euid ? 0 : 1; } static gid_t add_supp_group(const char *name, gid_t **groups, size_t *ngroups) { struct group *gr; if (*ngroups >= NGROUPS_MAX) errx(EXIT_FAILURE, P_("specifying more than %d supplemental group is not possible", "specifying more than %d supplemental groups is not possible", NGROUPS_MAX - 1), NGROUPS_MAX - 1); gr = getgrnam(name); if (!gr) errx(EXIT_FAILURE, _("group %s does not exist"), name); *groups = xrealloc(*groups, sizeof(gid_t) * (*ngroups + 1)); (*groups)[*ngroups] = gr->gr_gid; (*ngroups)++; return gr->gr_gid; } int su_main (int argc, char **argv, int mode) { int optc; const char *new_user = DEFAULT_USER, *runuser_user = NULL; char *command = NULL; int request_same_session = 0; char *shell = NULL; struct passwd *pw; struct passwd pw_copy; gid_t *groups = NULL; size_t ngroups = 0; bool use_supp = false; bool use_gid = false; gid_t gid = 0; static const struct option longopts[] = { {"command", required_argument, NULL, 'c'}, {"session-command", required_argument, NULL, 'C'}, {"fast", no_argument, NULL, 'f'}, {"login", no_argument, NULL, 'l'}, {"preserve-environment", no_argument, NULL, 'p'}, {"shell", required_argument, NULL, 's'}, {"group", required_argument, NULL, 'g'}, {"supp-group", required_argument, NULL, 'G'}, {"user", required_argument, NULL, 'u'}, /* runuser only */ {"help", no_argument, 0, 'h'}, {"version", no_argument, 0, 'V'}, {NULL, 0, NULL, 0} }; setlocale (LC_ALL, ""); bindtextdomain (PACKAGE, LOCALEDIR); textdomain (PACKAGE); atexit(close_stdout); su_mode = mode; fast_startup = false; simulate_login = false; change_environment = true; while ((optc = getopt_long (argc, argv, "c:fg:G:lmps:u:hV", longopts, NULL)) != -1) { switch (optc) { case 'c': command = optarg; break; case 'C': command = optarg; request_same_session = 1; break; case 'f': fast_startup = true; break; case 'g': use_gid = true; gid = add_supp_group(optarg, &groups, &ngroups); break; case 'G': use_supp = true; add_supp_group(optarg, &groups, &ngroups); break; case 'l': simulate_login = true; break; case 'm': case 'p': change_environment = false; break; case 's': shell = optarg; break; case 'u': if (su_mode != RUNUSER_MODE) usage (EXIT_FAILURE); runuser_user = optarg; break; case 'h': usage(0); case 'V': printf(UTIL_LINUX_VERSION); exit(EXIT_SUCCESS); default: errtryhelp(EXIT_FAILURE); } } restricted = evaluate_uid (); if (optind < argc && !strcmp (argv[optind], "-")) { simulate_login = true; ++optind; } if (simulate_login && !change_environment) { warnx(_("ignoring --preserve-environment, it's mutually exclusive with --login")); change_environment = true; } switch (su_mode) { case RUNUSER_MODE: if (runuser_user) { /* runuser -u <user> <command> */ new_user = runuser_user; if (shell || fast_startup || command || simulate_login) { errx(EXIT_FAILURE, _("options --{shell,fast,command,session-command,login} and " "--user are mutually exclusive")); } if (optind == argc) errx(EXIT_FAILURE, _("no command was specified")); break; } /* fallthrough if -u <user> is not specified, then follow * traditional su(1) behavior */ case SU_MODE: if (optind < argc) new_user = argv[optind++]; break; } if ((use_supp || use_gid) && restricted) errx(EXIT_FAILURE, _("only root can specify alternative groups")); logindefs_load_defaults = load_config; pw = getpwnam (new_user); if (! (pw && pw->pw_name && pw->pw_name[0] && pw->pw_dir && pw->pw_dir[0] && pw->pw_passwd)) errx (EXIT_FAILURE, _("user %s does not exist"), new_user); /* Make a copy of the password information and point pw at the local copy instead. Otherwise, some systems (e.g. Linux) would clobber the static data through the getlogin call from log_su. Also, make sure pw->pw_shell is a nonempty string. It may be NULL when NEW_USER is a username that is retrieved via NIS (YP), but that doesn't have a default shell listed. */ pw_copy = *pw; pw = &pw_copy; pw->pw_name = xstrdup (pw->pw_name); pw->pw_passwd = xstrdup (pw->pw_passwd); pw->pw_dir = xstrdup (pw->pw_dir); pw->pw_shell = xstrdup (pw->pw_shell && pw->pw_shell[0] ? pw->pw_shell : DEFAULT_SHELL); endpwent (); if (use_supp && !use_gid) pw->pw_gid = groups[0]; else if (use_gid) pw->pw_gid = gid; authenticate (pw); if (request_same_session || !command || !pw->pw_uid) same_session = 1; /* initialize shell variable only if "-u <user>" not specified */ if (runuser_user) { shell = NULL; } else { if (!shell && !change_environment) shell = getenv ("SHELL"); if (shell && getuid () != 0 && restricted_shell (pw->pw_shell)) { /* The user being su'd to has a nonstandard shell, and so is probably a uucp account or has restricted access. Don't compromise the account by allowing access with a standard shell. */ warnx (_("using restricted shell %s"), pw->pw_shell); shell = NULL; } shell = xstrdup (shell ? shell : pw->pw_shell); } init_groups (pw, groups, ngroups); if (!simulate_login || command) suppress_pam_info = 1; /* don't print PAM info messages */ create_watching_parent (); /* Now we're in the child. */ change_identity (pw); if (!same_session) setsid (); /* Set environment after pam_open_session, which may put KRB5CCNAME into the pam_env, etc. */ modify_environment (pw, shell); if (simulate_login && chdir (pw->pw_dir) != 0) warn (_("warning: cannot change directory to %s"), pw->pw_dir); if (shell) run_shell (shell, command, argv + optind, max (0, argc - optind)); else { execvp(argv[optind], &argv[optind]); err(EXIT_FAILURE, _("failed to execute %s"), argv[optind]); } } // vim: sw=2 cinoptions=>4,n-2,{2,^-2,\:2,=2,g0,h2,p5,t0,+2,(0,u0,w1,m1
./CrossVul/dataset_final_sorted/CWE-362/c/bad_3059_0
crossvul-cpp_data_good_829_4
/* * mm/mmap.c * * Written by obz. * * Address space accounting code <alan@lxorguk.ukuu.org.uk> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/backing-dev.h> #include <linux/mm.h> #include <linux/vmacache.h> #include <linux/shm.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/syscalls.h> #include <linux/capability.h> #include <linux/init.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/personality.h> #include <linux/security.h> #include <linux/hugetlb.h> #include <linux/shmem_fs.h> #include <linux/profile.h> #include <linux/export.h> #include <linux/mount.h> #include <linux/mempolicy.h> #include <linux/rmap.h> #include <linux/mmu_notifier.h> #include <linux/mmdebug.h> #include <linux/perf_event.h> #include <linux/audit.h> #include <linux/khugepaged.h> #include <linux/uprobes.h> #include <linux/rbtree_augmented.h> #include <linux/notifier.h> #include <linux/memory.h> #include <linux/printk.h> #include <linux/userfaultfd_k.h> #include <linux/moduleparam.h> #include <linux/pkeys.h> #include <linux/oom.h> #include <linux/sched/mm.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlb.h> #include <asm/mmu_context.h> #include "internal.h" #ifndef arch_mmap_check #define arch_mmap_check(addr, len, flags) (0) #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX; int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; #endif static bool ignore_rlimit_data; core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end); /* description of effects of mapping type and prot in current implementation. * this is due to the limited x86 page protection hardware. The expected * behavior is in parens: * * map_type prot * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes * w: (no) no w: (no) no w: (yes) yes w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes * * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes * w: (no) no w: (no) no w: (copy) copy w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes * * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and * MAP_PRIVATE: * r: (no) no * w: (no) no * x: (yes) yes */ pgprot_t protection_map[16] __ro_after_init = { __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 }; #ifndef CONFIG_ARCH_HAS_FILTER_PGPROT static inline pgprot_t arch_filter_pgprot(pgprot_t prot) { return prot; } #endif pgprot_t vm_get_page_prot(unsigned long vm_flags) { pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | pgprot_val(arch_vm_get_page_prot(vm_flags))); return arch_filter_pgprot(ret); } EXPORT_SYMBOL(vm_get_page_prot); static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) { return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); } /* Update vma->vm_page_prot to reflect vma->vm_flags. */ void vma_set_page_prot(struct vm_area_struct *vma) { unsigned long vm_flags = vma->vm_flags; pgprot_t vm_page_prot; vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); if (vma_wants_writenotify(vma, vm_page_prot)) { vm_flags &= ~VM_SHARED; vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); } /* remove_protection_ptes reads vma->vm_page_prot without mmap_sem */ WRITE_ONCE(vma->vm_page_prot, vm_page_prot); } /* * Requires inode->i_mapping->i_mmap_rwsem */ static void __remove_shared_vm_struct(struct vm_area_struct *vma, struct file *file, struct address_space *mapping) { if (vma->vm_flags & VM_DENYWRITE) atomic_inc(&file_inode(file)->i_writecount); if (vma->vm_flags & VM_SHARED) mapping_unmap_writable(mapping); flush_dcache_mmap_lock(mapping); vma_interval_tree_remove(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); } /* * Unlink a file-based vm structure from its interval tree, to hide * vma from rmap and vmtruncate before freeing its page tables. */ void unlink_file_vma(struct vm_area_struct *vma) { struct file *file = vma->vm_file; if (file) { struct address_space *mapping = file->f_mapping; i_mmap_lock_write(mapping); __remove_shared_vm_struct(vma, file, mapping); i_mmap_unlock_write(mapping); } } /* * Close a vm structure and free it, returning the next. */ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) { struct vm_area_struct *next = vma->vm_next; might_sleep(); if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); vm_area_free(vma); return next; } static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf); SYSCALL_DEFINE1(brk, unsigned long, brk) { unsigned long retval; unsigned long newbrk, oldbrk, origbrk; struct mm_struct *mm = current->mm; struct vm_area_struct *next; unsigned long min_brk; bool populate; bool downgraded = false; LIST_HEAD(uf); if (down_write_killable(&mm->mmap_sem)) return -EINTR; origbrk = mm->brk; #ifdef CONFIG_COMPAT_BRK /* * CONFIG_COMPAT_BRK can still be overridden by setting * randomize_va_space to 2, which will still cause mm->start_brk * to be arbitrarily shifted */ if (current->brk_randomized) min_brk = mm->start_brk; else min_brk = mm->end_data; #else min_brk = mm->start_brk; #endif if (brk < min_brk) goto out; /* * Check against rlimit here. If this check is done later after the test * of oldbrk with newbrk then it can escape the test and let the data * segment grow beyond its set limit the in case where the limit is * not page aligned -Ram Gupta */ if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, mm->end_data, mm->start_data)) goto out; newbrk = PAGE_ALIGN(brk); oldbrk = PAGE_ALIGN(mm->brk); if (oldbrk == newbrk) { mm->brk = brk; goto success; } /* * Always allow shrinking brk. * __do_munmap() may downgrade mmap_sem to read. */ if (brk <= mm->brk) { int ret; /* * mm->brk must to be protected by write mmap_sem so update it * before downgrading mmap_sem. When __do_munmap() fails, * mm->brk will be restored from origbrk. */ mm->brk = brk; ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true); if (ret < 0) { mm->brk = origbrk; goto out; } else if (ret == 1) { downgraded = true; } goto success; } /* Check against existing mmap mappings. */ next = find_vma(mm, oldbrk); if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) goto out; /* Ok, looks good - let it rip. */ if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0) goto out; mm->brk = brk; success: populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; if (downgraded) up_read(&mm->mmap_sem); else up_write(&mm->mmap_sem); userfaultfd_unmap_complete(mm, &uf); if (populate) mm_populate(oldbrk, newbrk - oldbrk); return brk; out: retval = origbrk; up_write(&mm->mmap_sem); return retval; } static long vma_compute_subtree_gap(struct vm_area_struct *vma) { unsigned long max, prev_end, subtree_gap; /* * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we * allow two stack_guard_gaps between them here, and when choosing * an unmapped area; whereas when expanding we only require one. * That's a little inconsistent, but keeps the code here simpler. */ max = vm_start_gap(vma); if (vma->vm_prev) { prev_end = vm_end_gap(vma->vm_prev); if (max > prev_end) max -= prev_end; else max = 0; } if (vma->vm_rb.rb_left) { subtree_gap = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb)->rb_subtree_gap; if (subtree_gap > max) max = subtree_gap; } if (vma->vm_rb.rb_right) { subtree_gap = rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb)->rb_subtree_gap; if (subtree_gap > max) max = subtree_gap; } return max; } #ifdef CONFIG_DEBUG_VM_RB static int browse_rb(struct mm_struct *mm) { struct rb_root *root = &mm->mm_rb; int i = 0, j, bug = 0; struct rb_node *nd, *pn = NULL; unsigned long prev = 0, pend = 0; for (nd = rb_first(root); nd; nd = rb_next(nd)) { struct vm_area_struct *vma; vma = rb_entry(nd, struct vm_area_struct, vm_rb); if (vma->vm_start < prev) { pr_emerg("vm_start %lx < prev %lx\n", vma->vm_start, prev); bug = 1; } if (vma->vm_start < pend) { pr_emerg("vm_start %lx < pend %lx\n", vma->vm_start, pend); bug = 1; } if (vma->vm_start > vma->vm_end) { pr_emerg("vm_start %lx > vm_end %lx\n", vma->vm_start, vma->vm_end); bug = 1; } spin_lock(&mm->page_table_lock); if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { pr_emerg("free gap %lx, correct %lx\n", vma->rb_subtree_gap, vma_compute_subtree_gap(vma)); bug = 1; } spin_unlock(&mm->page_table_lock); i++; pn = nd; prev = vma->vm_start; pend = vma->vm_end; } j = 0; for (nd = pn; nd; nd = rb_prev(nd)) j++; if (i != j) { pr_emerg("backwards %d, forwards %d\n", j, i); bug = 1; } return bug ? -1 : i; } static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) { struct rb_node *nd; for (nd = rb_first(root); nd; nd = rb_next(nd)) { struct vm_area_struct *vma; vma = rb_entry(nd, struct vm_area_struct, vm_rb); VM_BUG_ON_VMA(vma != ignore && vma->rb_subtree_gap != vma_compute_subtree_gap(vma), vma); } } static void validate_mm(struct mm_struct *mm) { int bug = 0; int i = 0; unsigned long highest_address = 0; struct vm_area_struct *vma = mm->mmap; while (vma) { struct anon_vma *anon_vma = vma->anon_vma; struct anon_vma_chain *avc; if (anon_vma) { anon_vma_lock_read(anon_vma); list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_verify(avc); anon_vma_unlock_read(anon_vma); } highest_address = vm_end_gap(vma); vma = vma->vm_next; i++; } if (i != mm->map_count) { pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); bug = 1; } if (highest_address != mm->highest_vm_end) { pr_emerg("mm->highest_vm_end %lx, found %lx\n", mm->highest_vm_end, highest_address); bug = 1; } i = browse_rb(mm); if (i != mm->map_count) { if (i != -1) pr_emerg("map_count %d rb %d\n", mm->map_count, i); bug = 1; } VM_BUG_ON_MM(bug, mm); } #else #define validate_mm_rb(root, ignore) do { } while (0) #define validate_mm(mm) do { } while (0) #endif RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb, unsigned long, rb_subtree_gap, vma_compute_subtree_gap) /* * Update augmented rbtree rb_subtree_gap values after vma->vm_start or * vma->vm_prev->vm_end values changed, without modifying the vma's position * in the rbtree. */ static void vma_gap_update(struct vm_area_struct *vma) { /* * As it turns out, RB_DECLARE_CALLBACKS() already created a callback * function that does exactly what we want. */ vma_gap_callbacks_propagate(&vma->vm_rb, NULL); } static inline void vma_rb_insert(struct vm_area_struct *vma, struct rb_root *root) { /* All rb_subtree_gap values must be consistent prior to insertion */ validate_mm_rb(root, NULL); rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); } static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) { /* * Note rb_erase_augmented is a fairly large inline function, * so make sure we instantiate it only once with our desired * augmented rbtree callbacks. */ rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); } static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma, struct rb_root *root, struct vm_area_struct *ignore) { /* * All rb_subtree_gap values must be consistent prior to erase, * with the possible exception of the "next" vma being erased if * next->vm_start was reduced. */ validate_mm_rb(root, ignore); __vma_rb_erase(vma, root); } static __always_inline void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) { /* * All rb_subtree_gap values must be consistent prior to erase, * with the possible exception of the vma being erased. */ validate_mm_rb(root, vma); __vma_rb_erase(vma, root); } /* * vma has some anon_vma assigned, and is already inserted on that * anon_vma's interval trees. * * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the * vma must be removed from the anon_vma's interval trees using * anon_vma_interval_tree_pre_update_vma(). * * After the update, the vma will be reinserted using * anon_vma_interval_tree_post_update_vma(). * * The entire update must be protected by exclusive mmap_sem and by * the root anon_vma's mutex. */ static inline void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) { struct anon_vma_chain *avc; list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); } static inline void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) { struct anon_vma_chain *avc; list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); } static int find_vma_links(struct mm_struct *mm, unsigned long addr, unsigned long end, struct vm_area_struct **pprev, struct rb_node ***rb_link, struct rb_node **rb_parent) { struct rb_node **__rb_link, *__rb_parent, *rb_prev; __rb_link = &mm->mm_rb.rb_node; rb_prev = __rb_parent = NULL; while (*__rb_link) { struct vm_area_struct *vma_tmp; __rb_parent = *__rb_link; vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); if (vma_tmp->vm_end > addr) { /* Fail if an existing vma overlaps the area */ if (vma_tmp->vm_start < end) return -ENOMEM; __rb_link = &__rb_parent->rb_left; } else { rb_prev = __rb_parent; __rb_link = &__rb_parent->rb_right; } } *pprev = NULL; if (rb_prev) *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); *rb_link = __rb_link; *rb_parent = __rb_parent; return 0; } static unsigned long count_vma_pages_range(struct mm_struct *mm, unsigned long addr, unsigned long end) { unsigned long nr_pages = 0; struct vm_area_struct *vma; /* Find first overlaping mapping */ vma = find_vma_intersection(mm, addr, end); if (!vma) return 0; nr_pages = (min(end, vma->vm_end) - max(addr, vma->vm_start)) >> PAGE_SHIFT; /* Iterate over the rest of the overlaps */ for (vma = vma->vm_next; vma; vma = vma->vm_next) { unsigned long overlap_len; if (vma->vm_start > end) break; overlap_len = min(end, vma->vm_end) - vma->vm_start; nr_pages += overlap_len >> PAGE_SHIFT; } return nr_pages; } void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, struct rb_node **rb_link, struct rb_node *rb_parent) { /* Update tracking information for the gap following the new vma. */ if (vma->vm_next) vma_gap_update(vma->vm_next); else mm->highest_vm_end = vm_end_gap(vma); /* * vma->vm_prev wasn't known when we followed the rbtree to find the * correct insertion point for that vma. As a result, we could not * update the vma vm_rb parents rb_subtree_gap values on the way down. * So, we first insert the vma with a zero rb_subtree_gap value * (to be consistent with what we did on the way down), and then * immediately update the gap to the correct value. Finally we * rebalance the rbtree after all augmented values have been set. */ rb_link_node(&vma->vm_rb, rb_parent, rb_link); vma->rb_subtree_gap = 0; vma_gap_update(vma); vma_rb_insert(vma, &mm->mm_rb); } static void __vma_link_file(struct vm_area_struct *vma) { struct file *file; file = vma->vm_file; if (file) { struct address_space *mapping = file->f_mapping; if (vma->vm_flags & VM_DENYWRITE) atomic_dec(&file_inode(file)->i_writecount); if (vma->vm_flags & VM_SHARED) atomic_inc(&mapping->i_mmap_writable); flush_dcache_mmap_lock(mapping); vma_interval_tree_insert(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); } } static void __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, struct rb_node *rb_parent) { __vma_link_list(mm, vma, prev, rb_parent); __vma_link_rb(mm, vma, rb_link, rb_parent); } static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, struct rb_node *rb_parent) { struct address_space *mapping = NULL; if (vma->vm_file) { mapping = vma->vm_file->f_mapping; i_mmap_lock_write(mapping); } __vma_link(mm, vma, prev, rb_link, rb_parent); __vma_link_file(vma); if (mapping) i_mmap_unlock_write(mapping); mm->map_count++; validate_mm(mm); } /* * Helper for vma_adjust() in the split_vma insert case: insert a vma into the * mm's list and rbtree. It has already been inserted into the interval tree. */ static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { struct vm_area_struct *prev; struct rb_node **rb_link, *rb_parent; if (find_vma_links(mm, vma->vm_start, vma->vm_end, &prev, &rb_link, &rb_parent)) BUG(); __vma_link(mm, vma, prev, rb_link, rb_parent); mm->map_count++; } static __always_inline void __vma_unlink_common(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, bool has_prev, struct vm_area_struct *ignore) { struct vm_area_struct *next; vma_rb_erase_ignore(vma, &mm->mm_rb, ignore); next = vma->vm_next; if (has_prev) prev->vm_next = next; else { prev = vma->vm_prev; if (prev) prev->vm_next = next; else mm->mmap = next; } if (next) next->vm_prev = prev; /* Kill the cache */ vmacache_invalidate(mm); } static inline void __vma_unlink_prev(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev) { __vma_unlink_common(mm, vma, prev, true, vma); } /* * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that * is already present in an i_mmap tree without adjusting the tree. * The following helper function should be used when such adjustments * are necessary. The "insert" vma (if any) is to be inserted * before we drop the necessary locks. */ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, struct vm_area_struct *expand) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next = vma->vm_next, *orig_vma = vma; struct address_space *mapping = NULL; struct rb_root_cached *root = NULL; struct anon_vma *anon_vma = NULL; struct file *file = vma->vm_file; bool start_changed = false, end_changed = false; long adjust_next = 0; int remove_next = 0; if (next && !insert) { struct vm_area_struct *exporter = NULL, *importer = NULL; if (end >= next->vm_end) { /* * vma expands, overlapping all the next, and * perhaps the one after too (mprotect case 6). * The only other cases that gets here are * case 1, case 7 and case 8. */ if (next == expand) { /* * The only case where we don't expand "vma" * and we expand "next" instead is case 8. */ VM_WARN_ON(end != next->vm_end); /* * remove_next == 3 means we're * removing "vma" and that to do so we * swapped "vma" and "next". */ remove_next = 3; VM_WARN_ON(file != next->vm_file); swap(vma, next); } else { VM_WARN_ON(expand != vma); /* * case 1, 6, 7, remove_next == 2 is case 6, * remove_next == 1 is case 1 or 7. */ remove_next = 1 + (end > next->vm_end); VM_WARN_ON(remove_next == 2 && end != next->vm_next->vm_end); VM_WARN_ON(remove_next == 1 && end != next->vm_end); /* trim end to next, for case 6 first pass */ end = next->vm_end; } exporter = next; importer = vma; /* * If next doesn't have anon_vma, import from vma after * next, if the vma overlaps with it. */ if (remove_next == 2 && !next->anon_vma) exporter = next->vm_next; } else if (end > next->vm_start) { /* * vma expands, overlapping part of the next: * mprotect case 5 shifting the boundary up. */ adjust_next = (end - next->vm_start) >> PAGE_SHIFT; exporter = next; importer = vma; VM_WARN_ON(expand != importer); } else if (end < vma->vm_end) { /* * vma shrinks, and !insert tells it's not * split_vma inserting another: so it must be * mprotect case 4 shifting the boundary down. */ adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT); exporter = vma; importer = next; VM_WARN_ON(expand != importer); } /* * Easily overlooked: when mprotect shifts the boundary, * make sure the expanding vma has anon_vma set if the * shrinking vma had, to cover any anon pages imported. */ if (exporter && exporter->anon_vma && !importer->anon_vma) { int error; importer->anon_vma = exporter->anon_vma; error = anon_vma_clone(importer, exporter); if (error) return error; } } again: vma_adjust_trans_huge(orig_vma, start, end, adjust_next); if (file) { mapping = file->f_mapping; root = &mapping->i_mmap; uprobe_munmap(vma, vma->vm_start, vma->vm_end); if (adjust_next) uprobe_munmap(next, next->vm_start, next->vm_end); i_mmap_lock_write(mapping); if (insert) { /* * Put into interval tree now, so instantiated pages * are visible to arm/parisc __flush_dcache_page * throughout; but we cannot insert into address * space until vma start or end is updated. */ __vma_link_file(insert); } } anon_vma = vma->anon_vma; if (!anon_vma && adjust_next) anon_vma = next->anon_vma; if (anon_vma) { VM_WARN_ON(adjust_next && next->anon_vma && anon_vma != next->anon_vma); anon_vma_lock_write(anon_vma); anon_vma_interval_tree_pre_update_vma(vma); if (adjust_next) anon_vma_interval_tree_pre_update_vma(next); } if (root) { flush_dcache_mmap_lock(mapping); vma_interval_tree_remove(vma, root); if (adjust_next) vma_interval_tree_remove(next, root); } if (start != vma->vm_start) { vma->vm_start = start; start_changed = true; } if (end != vma->vm_end) { vma->vm_end = end; end_changed = true; } vma->vm_pgoff = pgoff; if (adjust_next) { next->vm_start += adjust_next << PAGE_SHIFT; next->vm_pgoff += adjust_next; } if (root) { if (adjust_next) vma_interval_tree_insert(next, root); vma_interval_tree_insert(vma, root); flush_dcache_mmap_unlock(mapping); } if (remove_next) { /* * vma_merge has merged next into vma, and needs * us to remove next before dropping the locks. */ if (remove_next != 3) __vma_unlink_prev(mm, next, vma); else /* * vma is not before next if they've been * swapped. * * pre-swap() next->vm_start was reduced so * tell validate_mm_rb to ignore pre-swap() * "next" (which is stored in post-swap() * "vma"). */ __vma_unlink_common(mm, next, NULL, false, vma); if (file) __remove_shared_vm_struct(next, file, mapping); } else if (insert) { /* * split_vma has split insert from vma, and needs * us to insert it before dropping the locks * (it may either follow vma or precede it). */ __insert_vm_struct(mm, insert); } else { if (start_changed) vma_gap_update(vma); if (end_changed) { if (!next) mm->highest_vm_end = vm_end_gap(vma); else if (!adjust_next) vma_gap_update(next); } } if (anon_vma) { anon_vma_interval_tree_post_update_vma(vma); if (adjust_next) anon_vma_interval_tree_post_update_vma(next); anon_vma_unlock_write(anon_vma); } if (mapping) i_mmap_unlock_write(mapping); if (root) { uprobe_mmap(vma); if (adjust_next) uprobe_mmap(next); } if (remove_next) { if (file) { uprobe_munmap(next, next->vm_start, next->vm_end); fput(file); } if (next->anon_vma) anon_vma_merge(vma, next); mm->map_count--; mpol_put(vma_policy(next)); vm_area_free(next); /* * In mprotect's case 6 (see comments on vma_merge), * we must remove another next too. It would clutter * up the code too much to do both in one go. */ if (remove_next != 3) { /* * If "next" was removed and vma->vm_end was * expanded (up) over it, in turn * "next->vm_prev->vm_end" changed and the * "vma->vm_next" gap must be updated. */ next = vma->vm_next; } else { /* * For the scope of the comment "next" and * "vma" considered pre-swap(): if "vma" was * removed, next->vm_start was expanded (down) * over it and the "next" gap must be updated. * Because of the swap() the post-swap() "vma" * actually points to pre-swap() "next" * (post-swap() "next" as opposed is now a * dangling pointer). */ next = vma; } if (remove_next == 2) { remove_next = 1; end = next->vm_end; goto again; } else if (next) vma_gap_update(next); else { /* * If remove_next == 2 we obviously can't * reach this path. * * If remove_next == 3 we can't reach this * path because pre-swap() next is always not * NULL. pre-swap() "next" is not being * removed and its next->vm_end is not altered * (and furthermore "end" already matches * next->vm_end in remove_next == 3). * * We reach this only in the remove_next == 1 * case if the "next" vma that was removed was * the highest vma of the mm. However in such * case next->vm_end == "end" and the extended * "vma" has vma->vm_end == next->vm_end so * mm->highest_vm_end doesn't need any update * in remove_next == 1 case. */ VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); } } if (insert && file) uprobe_mmap(insert); validate_mm(mm); return 0; } /* * If the vma has a ->close operation then the driver probably needs to release * per-vma resources, so we don't attempt to merge those. */ static inline int is_mergeable_vma(struct vm_area_struct *vma, struct file *file, unsigned long vm_flags, struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { /* * VM_SOFTDIRTY should not prevent from VMA merging, if we * match the flags but dirty bit -- the caller should mark * merged VMA as dirty. If dirty bit won't be excluded from * comparison, we increase pressure on the memory system forcing * the kernel to generate new VMAs when old one could be * extended instead. */ if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) return 0; if (vma->vm_file != file) return 0; if (vma->vm_ops && vma->vm_ops->close) return 0; if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) return 0; return 1; } static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, struct anon_vma *anon_vma2, struct vm_area_struct *vma) { /* * The list_is_singular() test is to avoid merging VMA cloned from * parents. This can improve scalability caused by anon_vma lock. */ if ((!anon_vma1 || !anon_vma2) && (!vma || list_is_singular(&vma->anon_vma_chain))) return 1; return anon_vma1 == anon_vma2; } /* * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) * in front of (at a lower virtual address and file offset than) the vma. * * We cannot merge two vmas if they have differently assigned (non-NULL) * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. * * We don't check here for the merged mmap wrapping around the end of pagecache * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which * wrap, nor mmaps which cover the final page at index -1UL. */ static int can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { if (vma->vm_pgoff == vm_pgoff) return 1; } return 0; } /* * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) * beyond (at a higher virtual address and file offset than) the vma. * * We cannot merge two vmas if they have differently assigned (non-NULL) * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. */ static int can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { pgoff_t vm_pglen; vm_pglen = vma_pages(vma); if (vma->vm_pgoff + vm_pglen == vm_pgoff) return 1; } return 0; } /* * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out * whether that can be merged with its predecessor or its successor. * Or both (it neatly fills a hole). * * In most cases - when called for mmap, brk or mremap - [addr,end) is * certain not to be mapped by the time vma_merge is called; but when * called for mprotect, it is certain to be already mapped (either at * an offset within prev, or at the start of next), and the flags of * this area are about to be changed to vm_flags - and the no-change * case has already been eliminated. * * The following mprotect cases have to be considered, where AAAA is * the area passed down from mprotect_fixup, never extending beyond one * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after: * * AAAA AAAA AAAA AAAA * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX * cannot merge might become might become might become * PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or * mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or * mremap move: PPPPXXXXXXXX 8 * AAAA * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN * might become case 1 below case 2 below case 3 below * * It is important for case 8 that the vma NNNN overlapping the * region AAAA is never going to extended over XXXX. Instead XXXX must * be extended in region AAAA and NNNN must be removed. This way in * all cases where vma_merge succeeds, the moment vma_adjust drops the * rmap_locks, the properties of the merged vma will be already * correct for the whole merged range. Some of those properties like * vm_page_prot/vm_flags may be accessed by rmap_walks and they must * be correct for the whole merged range immediately after the * rmap_locks are released. Otherwise if XXXX would be removed and * NNNN would be extended over the XXXX range, remove_migration_ptes * or other rmap walkers (if working on addresses beyond the "end" * parameter) may establish ptes with the wrong permissions of NNNN * instead of the right permissions of XXXX. */ struct vm_area_struct *vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t pgoff, struct mempolicy *policy, struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { pgoff_t pglen = (end - addr) >> PAGE_SHIFT; struct vm_area_struct *area, *next; int err; /* * We later require that vma->vm_flags == vm_flags, * so this tests vma->vm_flags & VM_SPECIAL, too. */ if (vm_flags & VM_SPECIAL) return NULL; if (prev) next = prev->vm_next; else next = mm->mmap; area = next; if (area && area->vm_end == end) /* cases 6, 7, 8 */ next = next->vm_next; /* verify some invariant that must be enforced by the caller */ VM_WARN_ON(prev && addr <= prev->vm_start); VM_WARN_ON(area && end > area->vm_end); VM_WARN_ON(addr >= end); /* * Can it merge with the predecessor? */ if (prev && prev->vm_end == addr && mpol_equal(vma_policy(prev), policy) && can_vma_merge_after(prev, vm_flags, anon_vma, file, pgoff, vm_userfaultfd_ctx)) { /* * OK, it can. Can we now merge in the successor as well? */ if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, vm_userfaultfd_ctx) && is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { /* cases 1, 6 */ err = __vma_adjust(prev, prev->vm_start, next->vm_end, prev->vm_pgoff, NULL, prev); } else /* cases 2, 5, 7 */ err = __vma_adjust(prev, prev->vm_start, end, prev->vm_pgoff, NULL, prev); if (err) return NULL; khugepaged_enter_vma_merge(prev, vm_flags); return prev; } /* * Can this new request be merged in front of next? */ if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, vm_userfaultfd_ctx)) { if (prev && addr < prev->vm_end) /* case 4 */ err = __vma_adjust(prev, prev->vm_start, addr, prev->vm_pgoff, NULL, next); else { /* cases 3, 8 */ err = __vma_adjust(area, addr, next->vm_end, next->vm_pgoff - pglen, NULL, next); /* * In case 3 area is already equal to next and * this is a noop, but in case 8 "area" has * been removed and next was expanded over it. */ area = next; } if (err) return NULL; khugepaged_enter_vma_merge(area, vm_flags); return area; } return NULL; } /* * Rough compatbility check to quickly see if it's even worth looking * at sharing an anon_vma. * * They need to have the same vm_file, and the flags can only differ * in things that mprotect may change. * * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that * we can merge the two vma's. For example, we refuse to merge a vma if * there is a vm_ops->close() function, because that indicates that the * driver is doing some kind of reference counting. But that doesn't * really matter for the anon_vma sharing case. */ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) { return a->vm_end == b->vm_start && mpol_equal(vma_policy(a), vma_policy(b)) && a->vm_file == b->vm_file && !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) && b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); } /* * Do some basic sanity checking to see if we can re-use the anon_vma * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be * the same as 'old', the other will be the new one that is trying * to share the anon_vma. * * NOTE! This runs with mm_sem held for reading, so it is possible that * the anon_vma of 'old' is concurrently in the process of being set up * by another page fault trying to merge _that_. But that's ok: if it * is being set up, that automatically means that it will be a singleton * acceptable for merging, so we can do all of this optimistically. But * we do that READ_ONCE() to make sure that we never re-load the pointer. * * IOW: that the "list_is_singular()" test on the anon_vma_chain only * matters for the 'stable anon_vma' case (ie the thing we want to avoid * is to return an anon_vma that is "complex" due to having gone through * a fork). * * We also make sure that the two vma's are compatible (adjacent, * and with the same memory policies). That's all stable, even with just * a read lock on the mm_sem. */ static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) { if (anon_vma_compatible(a, b)) { struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); if (anon_vma && list_is_singular(&old->anon_vma_chain)) return anon_vma; } return NULL; } /* * find_mergeable_anon_vma is used by anon_vma_prepare, to check * neighbouring vmas for a suitable anon_vma, before it goes off * to allocate a new anon_vma. It checks because a repetitive * sequence of mprotects and faults may otherwise lead to distinct * anon_vmas being allocated, preventing vma merge in subsequent * mprotect. */ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) { struct anon_vma *anon_vma; struct vm_area_struct *near; near = vma->vm_next; if (!near) goto try_prev; anon_vma = reusable_anon_vma(near, vma, near); if (anon_vma) return anon_vma; try_prev: near = vma->vm_prev; if (!near) goto none; anon_vma = reusable_anon_vma(near, near, vma); if (anon_vma) return anon_vma; none: /* * There's no absolute need to look only at touching neighbours: * we could search further afield for "compatible" anon_vmas. * But it would probably just be a waste of time searching, * or lead to too many vmas hanging off the same anon_vma. * We're trying to allow mprotect remerging later on, * not trying to minimize memory used for anon_vmas. */ return NULL; } /* * If a hint addr is less than mmap_min_addr change hint to be as * low as possible but still greater than mmap_min_addr */ static inline unsigned long round_hint_to_min(unsigned long hint) { hint &= PAGE_MASK; if (((void *)hint != NULL) && (hint < mmap_min_addr)) return PAGE_ALIGN(mmap_min_addr); return hint; } static inline int mlock_future_check(struct mm_struct *mm, unsigned long flags, unsigned long len) { unsigned long locked, lock_limit; /* mlock MCL_FUTURE? */ if (flags & VM_LOCKED) { locked = len >> PAGE_SHIFT; locked += mm->locked_vm; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) return -EAGAIN; } return 0; } static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) { if (S_ISREG(inode->i_mode)) return MAX_LFS_FILESIZE; if (S_ISBLK(inode->i_mode)) return MAX_LFS_FILESIZE; /* Special "we do even unsigned file positions" case */ if (file->f_mode & FMODE_UNSIGNED_OFFSET) return 0; /* Yes, random drivers might want more. But I'm tired of buggy drivers */ return ULONG_MAX; } static inline bool file_mmap_ok(struct file *file, struct inode *inode, unsigned long pgoff, unsigned long len) { u64 maxsize = file_mmap_size_max(file, inode); if (maxsize && len > maxsize) return false; maxsize -= len; if (pgoff > maxsize >> PAGE_SHIFT) return false; return true; } /* * The caller must hold down_write(&current->mm->mmap_sem). */ unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, struct list_head *uf) { struct mm_struct *mm = current->mm; int pkey = 0; *populate = 0; if (!len) return -EINVAL; /* * Does the application expect PROT_READ to imply PROT_EXEC? * * (the exception is when the underlying filesystem is noexec * mounted, in which case we dont add PROT_EXEC.) */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) if (!(file && path_noexec(&file->f_path))) prot |= PROT_EXEC; /* force arch specific MAP_FIXED handling in get_unmapped_area */ if (flags & MAP_FIXED_NOREPLACE) flags |= MAP_FIXED; if (!(flags & MAP_FIXED)) addr = round_hint_to_min(addr); /* Careful about overflows.. */ len = PAGE_ALIGN(len); if (!len) return -ENOMEM; /* offset overflow? */ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) return -EOVERFLOW; /* Too many mappings? */ if (mm->map_count > sysctl_max_map_count) return -ENOMEM; /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. */ addr = get_unmapped_area(file, addr, len, pgoff, flags); if (offset_in_page(addr)) return addr; if (flags & MAP_FIXED_NOREPLACE) { struct vm_area_struct *vma = find_vma(mm, addr); if (vma && vma->vm_start < addr + len) return -EEXIST; } if (prot == PROT_EXEC) { pkey = execute_only_pkey(mm); if (pkey < 0) pkey = 0; } /* Do simple checking here so the lower-level routines won't have * to. we assume access permissions have been handled by the open * of the memory object, so we don't do any here. */ vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; if (flags & MAP_LOCKED) if (!can_do_mlock()) return -EPERM; if (mlock_future_check(mm, vm_flags, len)) return -EAGAIN; if (file) { struct inode *inode = file_inode(file); unsigned long flags_mask; if (!file_mmap_ok(file, inode, pgoff, len)) return -EOVERFLOW; flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags; switch (flags & MAP_TYPE) { case MAP_SHARED: /* * Force use of MAP_SHARED_VALIDATE with non-legacy * flags. E.g. MAP_SYNC is dangerous to use with * MAP_SHARED as you don't know which consistency model * you will get. We silently ignore unsupported flags * with MAP_SHARED to preserve backward compatibility. */ flags &= LEGACY_MAP_MASK; /* fall through */ case MAP_SHARED_VALIDATE: if (flags & ~flags_mask) return -EOPNOTSUPP; if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE)) return -EACCES; /* * Make sure we don't allow writing to an append-only * file.. */ if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) return -EACCES; /* * Make sure there are no mandatory locks on the file. */ if (locks_verify_locked(file)) return -EAGAIN; vm_flags |= VM_SHARED | VM_MAYSHARE; if (!(file->f_mode & FMODE_WRITE)) vm_flags &= ~(VM_MAYWRITE | VM_SHARED); /* fall through */ case MAP_PRIVATE: if (!(file->f_mode & FMODE_READ)) return -EACCES; if (path_noexec(&file->f_path)) { if (vm_flags & VM_EXEC) return -EPERM; vm_flags &= ~VM_MAYEXEC; } if (!file->f_op->mmap) return -ENODEV; if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) return -EINVAL; break; default: return -EINVAL; } } else { switch (flags & MAP_TYPE) { case MAP_SHARED: if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) return -EINVAL; /* * Ignore pgoff. */ pgoff = 0; vm_flags |= VM_SHARED | VM_MAYSHARE; break; case MAP_PRIVATE: /* * Set pgoff according to addr for anon_vma. */ pgoff = addr >> PAGE_SHIFT; break; default: return -EINVAL; } } /* * Set 'VM_NORESERVE' if we should not account for the * memory use of this mapping. */ if (flags & MAP_NORESERVE) { /* We honor MAP_NORESERVE if allowed to overcommit */ if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) vm_flags |= VM_NORESERVE; /* hugetlb applies strict overcommit unless MAP_NORESERVE */ if (file && is_file_hugepages(file)) vm_flags |= VM_NORESERVE; } addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); if (!IS_ERR_VALUE(addr) && ((vm_flags & VM_LOCKED) || (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) *populate = len; return addr; } unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { struct file *file = NULL; unsigned long retval; if (!(flags & MAP_ANONYMOUS)) { audit_mmap_fd(fd, flags); file = fget(fd); if (!file) return -EBADF; if (is_file_hugepages(file)) len = ALIGN(len, huge_page_size(hstate_file(file))); retval = -EINVAL; if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file))) goto out_fput; } else if (flags & MAP_HUGETLB) { struct user_struct *user = NULL; struct hstate *hs; hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (!hs) return -EINVAL; len = ALIGN(len, huge_page_size(hs)); /* * VM_NORESERVE is used because the reservations will be * taken when vm_ops->mmap() is called * A dummy user value is used because we are not locking * memory so no accounting is necessary */ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, &user, HUGETLB_ANONHUGE_INODE, (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (IS_ERR(file)) return PTR_ERR(file); } flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); out_fput: if (file) fput(file); return retval; } SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff) { return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); } #ifdef __ARCH_WANT_SYS_OLD_MMAP struct mmap_arg_struct { unsigned long addr; unsigned long len; unsigned long prot; unsigned long flags; unsigned long fd; unsigned long offset; }; SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) { struct mmap_arg_struct a; if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; if (offset_in_page(a.offset)) return -EINVAL; return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); } #endif /* __ARCH_WANT_SYS_OLD_MMAP */ /* * Some shared mappings will want the pages marked read-only * to track write events. If so, we'll downgrade vm_page_prot * to the private version (using protection_map[] without the * VM_SHARED bit). */ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) { vm_flags_t vm_flags = vma->vm_flags; const struct vm_operations_struct *vm_ops = vma->vm_ops; /* If it was private or non-writable, the write bit is already clear */ if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) return 0; /* The backer wishes to know when pages are first written to? */ if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite)) return 1; /* The open routine did something to the protections that pgprot_modify * won't preserve? */ if (pgprot_val(vm_page_prot) != pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags))) return 0; /* Do we need to track softdirty? */ if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY)) return 1; /* Specialty mapping? */ if (vm_flags & VM_PFNMAP) return 0; /* Can the mapping track the dirty pages? */ return vma->vm_file && vma->vm_file->f_mapping && mapping_cap_account_dirty(vma->vm_file->f_mapping); } /* * We account for memory if it's a private writeable mapping, * not hugepages and VM_NORESERVE wasn't set. */ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) { /* * hugetlb has its own accounting separate from the core VM * VM_HUGETLB may not be set yet so we cannot check for that flag. */ if (file && is_file_hugepages(file)) return 0; return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; } unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, struct list_head *uf) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; int error; struct rb_node **rb_link, *rb_parent; unsigned long charged = 0; /* Check against address space limit. */ if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { unsigned long nr_pages; /* * MAP_FIXED may remove pages of mappings that intersects with * requested mapping. Account for the pages it would unmap. */ nr_pages = count_vma_pages_range(mm, addr, addr + len); if (!may_expand_vm(mm, vm_flags, (len >> PAGE_SHIFT) - nr_pages)) return -ENOMEM; } /* Clear old maps */ while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { if (do_munmap(mm, addr, len, uf)) return -ENOMEM; } /* * Private writable mapping: check memory availability */ if (accountable_mapping(file, vm_flags)) { charged = len >> PAGE_SHIFT; if (security_vm_enough_memory_mm(mm, charged)) return -ENOMEM; vm_flags |= VM_ACCOUNT; } /* * Can we just expand an old mapping? */ vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX); if (vma) goto out; /* * Determine the object being mapped and call the appropriate * specific mapper. the address has already been validated, but * not unmapped, but the maps are removed from the list. */ vma = vm_area_alloc(mm); if (!vma) { error = -ENOMEM; goto unacct_error; } vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags; vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; if (file) { if (vm_flags & VM_DENYWRITE) { error = deny_write_access(file); if (error) goto free_vma; } if (vm_flags & VM_SHARED) { error = mapping_map_writable(file->f_mapping); if (error) goto allow_write_and_free_vma; } /* ->mmap() can change vma->vm_file, but must guarantee that * vma_link() below can deny write-access if VM_DENYWRITE is set * and map writably if VM_SHARED is set. This usually means the * new file must not have been exposed to user-space, yet. */ vma->vm_file = get_file(file); error = call_mmap(file, vma); if (error) goto unmap_and_free_vma; /* Can addr have changed?? * * Answer: Yes, several device drivers can do it in their * f_op->mmap method. -DaveM * Bug: If addr is changed, prev, rb_link, rb_parent should * be updated for vma_link() */ WARN_ON_ONCE(addr != vma->vm_start); addr = vma->vm_start; vm_flags = vma->vm_flags; } else if (vm_flags & VM_SHARED) { error = shmem_zero_setup(vma); if (error) goto free_vma; } else { vma_set_anonymous(vma); } vma_link(mm, vma, prev, rb_link, rb_parent); /* Once vma denies write, undo our temporary denial count */ if (file) { if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); if (vm_flags & VM_DENYWRITE) allow_write_access(file); } file = vma->vm_file; out: perf_event_mmap(vma); vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); if (vm_flags & VM_LOCKED) { if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) vma->vm_flags &= VM_LOCKED_CLEAR_MASK; else mm->locked_vm += (len >> PAGE_SHIFT); } if (file) uprobe_mmap(vma); /* * New (or expanded) vma always get soft dirty status. * Otherwise user-space soft-dirty page tracker won't * be able to distinguish situation when vma area unmapped, * then new mapped in-place (which must be aimed as * a completely new data area). */ vma->vm_flags |= VM_SOFTDIRTY; vma_set_page_prot(vma); return addr; unmap_and_free_vma: vma->vm_file = NULL; fput(file); /* Undo any partial mapping done by a device driver. */ unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); charged = 0; if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); allow_write_and_free_vma: if (vm_flags & VM_DENYWRITE) allow_write_access(file); free_vma: vm_area_free(vma); unacct_error: if (charged) vm_unacct_memory(charged); return error; } unsigned long unmapped_area(struct vm_unmapped_area_info *info) { /* * We implement the search by looking for an rbtree node that * immediately follows a suitable gap. That is, * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length; * - gap_end = vma->vm_start >= info->low_limit + length; * - gap_end - gap_start >= length */ struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long length, low_limit, high_limit, gap_start, gap_end; /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) return -ENOMEM; /* Adjust search limits by the desired length */ if (info->high_limit < length) return -ENOMEM; high_limit = info->high_limit - length; if (info->low_limit > high_limit) return -ENOMEM; low_limit = info->low_limit + length; /* Check if rbtree root looks promising */ if (RB_EMPTY_ROOT(&mm->mm_rb)) goto check_highest; vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); if (vma->rb_subtree_gap < length) goto check_highest; while (true) { /* Visit left subtree if it looks promising */ gap_end = vm_start_gap(vma); if (gap_end >= low_limit && vma->vm_rb.rb_left) { struct vm_area_struct *left = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb); if (left->rb_subtree_gap >= length) { vma = left; continue; } } gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; check_current: /* Check if current node has a suitable gap */ if (gap_start > high_limit) return -ENOMEM; if (gap_end >= low_limit && gap_end > gap_start && gap_end - gap_start >= length) goto found; /* Visit right subtree if it looks promising */ if (vma->vm_rb.rb_right) { struct vm_area_struct *right = rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb); if (right->rb_subtree_gap >= length) { vma = right; continue; } } /* Go back up the rbtree to find next candidate node */ while (true) { struct rb_node *prev = &vma->vm_rb; if (!rb_parent(prev)) goto check_highest; vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_left) { gap_start = vm_end_gap(vma->vm_prev); gap_end = vm_start_gap(vma); goto check_current; } } } check_highest: /* Check highest gap, which does not precede any rbtree node */ gap_start = mm->highest_vm_end; gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */ if (gap_start > high_limit) return -ENOMEM; found: /* We found a suitable gap. Clip it with the original low_limit. */ if (gap_start < info->low_limit) gap_start = info->low_limit; /* Adjust gap address to the desired alignment */ gap_start += (info->align_offset - gap_start) & info->align_mask; VM_BUG_ON(gap_start + info->length > info->high_limit); VM_BUG_ON(gap_start + info->length > gap_end); return gap_start; } unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long length, low_limit, high_limit, gap_start, gap_end; /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) return -ENOMEM; /* * Adjust search limits by the desired length. * See implementation comment at top of unmapped_area(). */ gap_end = info->high_limit; if (gap_end < length) return -ENOMEM; high_limit = gap_end - length; if (info->low_limit > high_limit) return -ENOMEM; low_limit = info->low_limit + length; /* Check highest gap, which does not precede any rbtree node */ gap_start = mm->highest_vm_end; if (gap_start <= high_limit) goto found_highest; /* Check if rbtree root looks promising */ if (RB_EMPTY_ROOT(&mm->mm_rb)) return -ENOMEM; vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); if (vma->rb_subtree_gap < length) return -ENOMEM; while (true) { /* Visit right subtree if it looks promising */ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; if (gap_start <= high_limit && vma->vm_rb.rb_right) { struct vm_area_struct *right = rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb); if (right->rb_subtree_gap >= length) { vma = right; continue; } } check_current: /* Check if current node has a suitable gap */ gap_end = vm_start_gap(vma); if (gap_end < low_limit) return -ENOMEM; if (gap_start <= high_limit && gap_end > gap_start && gap_end - gap_start >= length) goto found; /* Visit left subtree if it looks promising */ if (vma->vm_rb.rb_left) { struct vm_area_struct *left = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb); if (left->rb_subtree_gap >= length) { vma = left; continue; } } /* Go back up the rbtree to find next candidate node */ while (true) { struct rb_node *prev = &vma->vm_rb; if (!rb_parent(prev)) return -ENOMEM; vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_right) { gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; goto check_current; } } } found: /* We found a suitable gap. Clip it with the original high_limit. */ if (gap_end > info->high_limit) gap_end = info->high_limit; found_highest: /* Compute highest gap address at the desired alignment */ gap_end -= info->length; gap_end -= (gap_end - info->align_offset) & info->align_mask; VM_BUG_ON(gap_end < info->low_limit); VM_BUG_ON(gap_end < gap_start); return gap_end; } #ifndef arch_get_mmap_end #define arch_get_mmap_end(addr) (TASK_SIZE) #endif #ifndef arch_get_mmap_base #define arch_get_mmap_base(addr, base) (base) #endif /* Get an address range which is currently unmapped. * For shmat() with addr=0. * * Ugly calling convention alert: * Return value with the low bits set means error value, * ie * if (ret & ~PAGE_MASK) * error = ret; * * This function "knows" that -ENOMEM has the bits set. */ #ifndef HAVE_ARCH_UNMAPPED_AREA unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr); if (len > mmap_end - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) return addr; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) return addr; } info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; info.high_limit = mmap_end; info.align_mask = 0; return vm_unmapped_area(&info); } #endif /* * This mmap-allocator allocates new areas top-down from below the * stack's low limit (the base): */ #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr); /* requested length too big for entire address space */ if (len > mmap_end - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); info.align_mask = 0; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (offset_in_page(addr)) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = mmap_end; addr = vm_unmapped_area(&info); } return addr; } #endif unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long error = arch_mmap_check(addr, len, flags); if (error) return error; /* Careful about overflows.. */ if (len > TASK_SIZE) return -ENOMEM; get_area = current->mm->get_unmapped_area; if (file) { if (file->f_op->get_unmapped_area) get_area = file->f_op->get_unmapped_area; } else if (flags & MAP_SHARED) { /* * mmap_region() will call shmem_zero_setup() to create a file, * so use shmem's get_unmapped_area in case it can be huge. * do_mmap_pgoff() will clear pgoff, so match alignment. */ pgoff = 0; get_area = shmem_get_unmapped_area; } addr = get_area(file, addr, len, pgoff, flags); if (IS_ERR_VALUE(addr)) return addr; if (addr > TASK_SIZE - len) return -ENOMEM; if (offset_in_page(addr)) return -EINVAL; error = security_mmap_addr(addr); return error ? error : addr; } EXPORT_SYMBOL(get_unmapped_area); /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) { struct rb_node *rb_node; struct vm_area_struct *vma; /* Check the cache first. */ vma = vmacache_find(mm, addr); if (likely(vma)) return vma; rb_node = mm->mm_rb.rb_node; while (rb_node) { struct vm_area_struct *tmp; tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); if (tmp->vm_end > addr) { vma = tmp; if (tmp->vm_start <= addr) break; rb_node = rb_node->rb_left; } else rb_node = rb_node->rb_right; } if (vma) vmacache_update(addr, vma); return vma; } EXPORT_SYMBOL(find_vma); /* * Same as find_vma, but also return a pointer to the previous VMA in *pprev. */ struct vm_area_struct * find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev) { struct vm_area_struct *vma; vma = find_vma(mm, addr); if (vma) { *pprev = vma->vm_prev; } else { struct rb_node *rb_node = mm->mm_rb.rb_node; *pprev = NULL; while (rb_node) { *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb); rb_node = rb_node->rb_right; } } return vma; } /* * Verify that the stack growth is acceptable and * update accounting. This is shared with both the * grow-up and grow-down cases. */ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) { struct mm_struct *mm = vma->vm_mm; unsigned long new_start; /* address space limit tests */ if (!may_expand_vm(mm, vma->vm_flags, grow)) return -ENOMEM; /* Stack limit test */ if (size > rlimit(RLIMIT_STACK)) return -ENOMEM; /* mlock limit tests */ if (vma->vm_flags & VM_LOCKED) { unsigned long locked; unsigned long limit; locked = mm->locked_vm + grow; limit = rlimit(RLIMIT_MEMLOCK); limit >>= PAGE_SHIFT; if (locked > limit && !capable(CAP_IPC_LOCK)) return -ENOMEM; } /* Check to ensure the stack will not grow into a hugetlb-only region */ new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : vma->vm_end - size; if (is_hugepage_only_range(vma->vm_mm, new_start, size)) return -EFAULT; /* * Overcommit.. This must be the final test, as it will * update security statistics. */ if (security_vm_enough_memory_mm(mm, grow)) return -ENOMEM; return 0; } #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) /* * PA-RISC uses this for its stack; IA64 for its Register Backing Store. * vma is the last one with address > vma->vm_end. Have to extend vma. */ int expand_upwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next; unsigned long gap_addr; int error = 0; if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; /* Guard against exceeding limits of the address space. */ address &= PAGE_MASK; if (address >= (TASK_SIZE & PAGE_MASK)) return -ENOMEM; address += PAGE_SIZE; /* Enforce stack_guard_gap */ gap_addr = address + stack_guard_gap; /* Guard against overflow */ if (gap_addr < address || gap_addr > TASK_SIZE) gap_addr = TASK_SIZE; next = vma->vm_next; if (next && next->vm_start < gap_addr && (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { if (!(next->vm_flags & VM_GROWSUP)) return -ENOMEM; /* Check that both stack segments have the same anon_vma? */ } /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ anon_vma_lock_write(vma->anon_vma); /* Somebody else might have raced and expanded it already */ if (address > vma->vm_end) { unsigned long size, grow; size = address - vma->vm_start; grow = (address - vma->vm_end) >> PAGE_SHIFT; error = -ENOMEM; if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { error = acct_stack_growth(vma, size, grow); if (!error) { /* * vma_gap_update() doesn't support concurrent * updates, but we only hold a shared mmap_sem * lock here, so we need to protect against * concurrent vma expansions. * anon_vma_lock_write() doesn't help here, as * we don't guarantee that all growable vmas * in a mm share the same root anon vma. * So, we reuse mm->page_table_lock to guard * against concurrent vma expansions. */ spin_lock(&mm->page_table_lock); if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, grow); anon_vma_interval_tree_pre_update_vma(vma); vma->vm_end = address; anon_vma_interval_tree_post_update_vma(vma); if (vma->vm_next) vma_gap_update(vma->vm_next); else mm->highest_vm_end = vm_end_gap(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); } } } anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(mm); return error; } #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ /* * vma is the first one with address < vma->vm_start. Have to extend vma. */ int expand_downwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *prev; int error = 0; address &= PAGE_MASK; if (address < mmap_min_addr) return -EPERM; /* Enforce stack_guard_gap */ prev = vma->vm_prev; /* Check that both stack segments have the same anon_vma? */ if (prev && !(prev->vm_flags & VM_GROWSDOWN) && (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { if (address - prev->vm_end < stack_guard_gap) return -ENOMEM; } /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ anon_vma_lock_write(vma->anon_vma); /* Somebody else might have raced and expanded it already */ if (address < vma->vm_start) { unsigned long size, grow; size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; error = -ENOMEM; if (grow <= vma->vm_pgoff) { error = acct_stack_growth(vma, size, grow); if (!error) { /* * vma_gap_update() doesn't support concurrent * updates, but we only hold a shared mmap_sem * lock here, so we need to protect against * concurrent vma expansions. * anon_vma_lock_write() doesn't help here, as * we don't guarantee that all growable vmas * in a mm share the same root anon vma. * So, we reuse mm->page_table_lock to guard * against concurrent vma expansions. */ spin_lock(&mm->page_table_lock); if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, grow); anon_vma_interval_tree_pre_update_vma(vma); vma->vm_start = address; vma->vm_pgoff -= grow; anon_vma_interval_tree_post_update_vma(vma); vma_gap_update(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); } } } anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(mm); return error; } /* enforced gap between the expanding stack and other mappings. */ unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; static int __init cmdline_parse_stack_guard_gap(char *p) { unsigned long val; char *endptr; val = simple_strtoul(p, &endptr, 10); if (!*endptr) stack_guard_gap = val << PAGE_SHIFT; return 0; } __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); #ifdef CONFIG_STACK_GROWSUP int expand_stack(struct vm_area_struct *vma, unsigned long address) { return expand_upwards(vma, address); } struct vm_area_struct * find_extend_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma, *prev; addr &= PAGE_MASK; vma = find_vma_prev(mm, addr, &prev); if (vma && (vma->vm_start <= addr)) return vma; /* don't alter vm_end if the coredump is running */ if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr)) return NULL; if (prev->vm_flags & VM_LOCKED) populate_vma_page_range(prev, addr, prev->vm_end, NULL); return prev; } #else int expand_stack(struct vm_area_struct *vma, unsigned long address) { return expand_downwards(vma, address); } struct vm_area_struct * find_extend_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; unsigned long start; addr &= PAGE_MASK; vma = find_vma(mm, addr); if (!vma) return NULL; if (vma->vm_start <= addr) return vma; if (!(vma->vm_flags & VM_GROWSDOWN)) return NULL; /* don't alter vm_start if the coredump is running */ if (!mmget_still_valid(mm)) return NULL; start = vma->vm_start; if (expand_stack(vma, addr)) return NULL; if (vma->vm_flags & VM_LOCKED) populate_vma_page_range(vma, addr, start, NULL); return vma; } #endif EXPORT_SYMBOL_GPL(find_extend_vma); /* * Ok - we have the memory areas we should free on the vma list, * so release them, and do the vma updates. * * Called with the mm semaphore held. */ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) { unsigned long nr_accounted = 0; /* Update high watermark before we lower total_vm */ update_hiwater_vm(mm); do { long nrpages = vma_pages(vma); if (vma->vm_flags & VM_ACCOUNT) nr_accounted += nrpages; vm_stat_account(mm, vma->vm_flags, -nrpages); vma = remove_vma(vma); } while (vma); vm_unacct_memory(nr_accounted); validate_mm(mm); } /* * Get rid of page table information in the indicated region. * * Called with the mm semaphore held. */ static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end) { struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap; struct mmu_gather tlb; lru_add_drain(); tlb_gather_mmu(&tlb, mm, start, end); update_hiwater_rss(mm); unmap_vmas(&tlb, vma, start, end); free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, next ? next->vm_start : USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb, start, end); } /* * Create a list of vma's touched by the unmap, removing them from the mm's * vma list as we go.. */ static void detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long end) { struct vm_area_struct **insertion_point; struct vm_area_struct *tail_vma = NULL; insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; do { vma_rb_erase(vma, &mm->mm_rb); mm->map_count--; tail_vma = vma; vma = vma->vm_next; } while (vma && vma->vm_start < end); *insertion_point = vma; if (vma) { vma->vm_prev = prev; vma_gap_update(vma); } else mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; tail_vma->vm_next = NULL; /* Kill the cache */ vmacache_invalidate(mm); } /* * __split_vma() bypasses sysctl_max_map_count checking. We use this where it * has already been checked or doesn't make sense to fail. */ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { struct vm_area_struct *new; int err; if (vma->vm_ops && vma->vm_ops->split) { err = vma->vm_ops->split(vma, addr); if (err) return err; } new = vm_area_dup(vma); if (!new) return -ENOMEM; if (new_below) new->vm_end = addr; else { new->vm_start = addr; new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); } err = vma_dup_policy(vma, new); if (err) goto out_free_vma; err = anon_vma_clone(new, vma); if (err) goto out_free_mpol; if (new->vm_file) get_file(new->vm_file); if (new->vm_ops && new->vm_ops->open) new->vm_ops->open(new); if (new_below) err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + ((addr - new->vm_start) >> PAGE_SHIFT), new); else err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); /* Success. */ if (!err) return 0; /* Clean everything up if vma_adjust failed. */ if (new->vm_ops && new->vm_ops->close) new->vm_ops->close(new); if (new->vm_file) fput(new->vm_file); unlink_anon_vmas(new); out_free_mpol: mpol_put(vma_policy(new)); out_free_vma: vm_area_free(new); return err; } /* * Split a vma into two pieces at address 'addr', a new vma is allocated * either for the first part or the tail. */ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; return __split_vma(mm, vma, addr, new_below); } /* Munmap is split into 2 main parts -- this part which finds * what needs doing, and the areas themselves, which do the * work. This now handles partial unmappings. * Jeremy Fitzhardinge <jeremy@goop.org> */ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, bool downgrade) { unsigned long end; struct vm_area_struct *vma, *prev, *last; if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; len = PAGE_ALIGN(len); if (len == 0) return -EINVAL; /* Find the first overlapping VMA */ vma = find_vma(mm, start); if (!vma) return 0; prev = vma->vm_prev; /* we have start < vma->vm_end */ /* if it doesn't overlap, we have nothing.. */ end = start + len; if (vma->vm_start >= end) return 0; /* * If we need to split any vma, do it now to save pain later. * * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially * unmapped vm_area_struct will remain in use: so lower split_vma * places tmp vma above, and higher split_vma places tmp vma below. */ if (start > vma->vm_start) { int error; /* * Make sure that map_count on return from munmap() will * not exceed its limit; but let map_count go just above * its limit temporarily, to help free resources as expected. */ if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) return -ENOMEM; error = __split_vma(mm, vma, start, 0); if (error) return error; prev = vma; } /* Does it split the last one? */ last = find_vma(mm, end); if (last && end > last->vm_start) { int error = __split_vma(mm, last, end, 1); if (error) return error; } vma = prev ? prev->vm_next : mm->mmap; if (unlikely(uf)) { /* * If userfaultfd_unmap_prep returns an error the vmas * will remain splitted, but userland will get a * highly unexpected error anyway. This is no * different than the case where the first of the two * __split_vma fails, but we don't undo the first * split, despite we could. This is unlikely enough * failure that it's not worth optimizing it for. */ int error = userfaultfd_unmap_prep(vma, start, end, uf); if (error) return error; } /* * unlock any mlock()ed ranges before detaching vmas */ if (mm->locked_vm) { struct vm_area_struct *tmp = vma; while (tmp && tmp->vm_start < end) { if (tmp->vm_flags & VM_LOCKED) { mm->locked_vm -= vma_pages(tmp); munlock_vma_pages_all(tmp); } tmp = tmp->vm_next; } } /* Detach vmas from rbtree */ detach_vmas_to_be_unmapped(mm, vma, prev, end); /* * mpx unmap needs to be called with mmap_sem held for write. * It is safe to call it before unmap_region(). */ arch_unmap(mm, vma, start, end); if (downgrade) downgrade_write(&mm->mmap_sem); unmap_region(mm, vma, prev, start, end); /* Fix up all other VM information */ remove_vma_list(mm, vma); return downgrade ? 1 : 0; } int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf) { return __do_munmap(mm, start, len, uf, false); } static int __vm_munmap(unsigned long start, size_t len, bool downgrade) { int ret; struct mm_struct *mm = current->mm; LIST_HEAD(uf); if (down_write_killable(&mm->mmap_sem)) return -EINTR; ret = __do_munmap(mm, start, len, &uf, downgrade); /* * Returning 1 indicates mmap_sem is downgraded. * But 1 is not legal return value of vm_munmap() and munmap(), reset * it to 0 before return. */ if (ret == 1) { up_read(&mm->mmap_sem); ret = 0; } else up_write(&mm->mmap_sem); userfaultfd_unmap_complete(mm, &uf); return ret; } int vm_munmap(unsigned long start, size_t len) { return __vm_munmap(start, len, false); } EXPORT_SYMBOL(vm_munmap); SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) { profile_munmap(addr); return __vm_munmap(addr, len, true); } /* * Emulation of deprecated remap_file_pages() syscall. */ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long populate = 0; unsigned long ret = -EINVAL; struct file *file; pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.rst.\n", current->comm, current->pid); if (prot) return ret; start = start & PAGE_MASK; size = size & PAGE_MASK; if (start + size <= start) return ret; /* Does pgoff wrap? */ if (pgoff + (size >> PAGE_SHIFT) < pgoff) return ret; if (down_write_killable(&mm->mmap_sem)) return -EINTR; vma = find_vma(mm, start); if (!vma || !(vma->vm_flags & VM_SHARED)) goto out; if (start < vma->vm_start) goto out; if (start + size > vma->vm_end) { struct vm_area_struct *next; for (next = vma->vm_next; next; next = next->vm_next) { /* hole between vmas ? */ if (next->vm_start != next->vm_prev->vm_end) goto out; if (next->vm_file != vma->vm_file) goto out; if (next->vm_flags != vma->vm_flags) goto out; if (start + size <= next->vm_end) break; } if (!next) goto out; } prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; flags &= MAP_NONBLOCK; flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; if (vma->vm_flags & VM_LOCKED) { struct vm_area_struct *tmp; flags |= MAP_LOCKED; /* drop PG_Mlocked flag for over-mapped range */ for (tmp = vma; tmp->vm_start >= start + size; tmp = tmp->vm_next) { /* * Split pmd and munlock page on the border * of the range. */ vma_adjust_trans_huge(tmp, start, start + size, 0); munlock_vma_pages_range(tmp, max(tmp->vm_start, start), min(tmp->vm_end, start + size)); } } file = get_file(vma->vm_file); ret = do_mmap_pgoff(vma->vm_file, start, size, prot, flags, pgoff, &populate, NULL); fput(file); out: up_write(&mm->mmap_sem); if (populate) mm_populate(ret, populate); if (!IS_ERR_VALUE(ret)) ret = 0; return ret; } /* * this is really a simplified "do_mmap". it only handles * anonymous maps. eventually we may be able to do some * brk-specific accounting here. */ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; struct rb_node **rb_link, *rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; int error; /* Until we need other flags, refuse anything except VM_EXEC. */ if ((flags & (~VM_EXEC)) != 0) return -EINVAL; flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); if (offset_in_page(error)) return error; error = mlock_future_check(mm, mm->def_flags, len); if (error) return error; /* * Clear old maps. this also does some error checking for us */ while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { if (do_munmap(mm, addr, len, uf)) return -ENOMEM; } /* Check against address space limits *after* clearing old maps... */ if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) return -ENOMEM; if (mm->map_count > sysctl_max_map_count) return -ENOMEM; if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) return -ENOMEM; /* Can we just expand an old private anonymous mapping? */ vma = vma_merge(mm, prev, addr, addr + len, flags, NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX); if (vma) goto out; /* * create a vma struct for an anonymous mapping */ vma = vm_area_alloc(mm); if (!vma) { vm_unacct_memory(len >> PAGE_SHIFT); return -ENOMEM; } vma_set_anonymous(vma); vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_pgoff = pgoff; vma->vm_flags = flags; vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); out: perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT; mm->data_vm += len >> PAGE_SHIFT; if (flags & VM_LOCKED) mm->locked_vm += (len >> PAGE_SHIFT); vma->vm_flags |= VM_SOFTDIRTY; return 0; } int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) { struct mm_struct *mm = current->mm; unsigned long len; int ret; bool populate; LIST_HEAD(uf); len = PAGE_ALIGN(request); if (len < request) return -ENOMEM; if (!len) return 0; if (down_write_killable(&mm->mmap_sem)) return -EINTR; ret = do_brk_flags(addr, len, flags, &uf); populate = ((mm->def_flags & VM_LOCKED) != 0); up_write(&mm->mmap_sem); userfaultfd_unmap_complete(mm, &uf); if (populate && !ret) mm_populate(addr, len); return ret; } EXPORT_SYMBOL(vm_brk_flags); int vm_brk(unsigned long addr, unsigned long len) { return vm_brk_flags(addr, len, 0); } EXPORT_SYMBOL(vm_brk); /* Release all mmaps. */ void exit_mmap(struct mm_struct *mm) { struct mmu_gather tlb; struct vm_area_struct *vma; unsigned long nr_accounted = 0; /* mm's last user has gone, and its about to be pulled down */ mmu_notifier_release(mm); if (unlikely(mm_is_oom_victim(mm))) { /* * Manually reap the mm to free as much memory as possible. * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard * this mm from further consideration. Taking mm->mmap_sem for * write after setting MMF_OOM_SKIP will guarantee that the oom * reaper will not run on this mm again after mmap_sem is * dropped. * * Nothing can be holding mm->mmap_sem here and the above call * to mmu_notifier_release(mm) ensures mmu notifier callbacks in * __oom_reap_task_mm() will not block. * * This needs to be done before calling munlock_vma_pages_all(), * which clears VM_LOCKED, otherwise the oom reaper cannot * reliably test it. */ (void)__oom_reap_task_mm(mm); set_bit(MMF_OOM_SKIP, &mm->flags); down_write(&mm->mmap_sem); up_write(&mm->mmap_sem); } if (mm->locked_vm) { vma = mm->mmap; while (vma) { if (vma->vm_flags & VM_LOCKED) munlock_vma_pages_all(vma); vma = vma->vm_next; } } arch_exit_mmap(mm); vma = mm->mmap; if (!vma) /* Can happen if dup_mmap() received an OOM */ return; lru_add_drain(); flush_cache_mm(mm); tlb_gather_mmu(&tlb, mm, 0, -1); /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use -1 here to ensure all VMAs in the mm are unmapped */ unmap_vmas(&tlb, vma, 0, -1); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb, 0, -1); /* * Walk the list again, actually closing and freeing it, * with preemption enabled, without holding any MM locks. */ while (vma) { if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); vma = remove_vma(vma); } vm_unacct_memory(nr_accounted); } /* Insert vm structure into process list sorted by address * and into the inode's i_mmap tree. If vm_file is non-NULL * then i_mmap_rwsem is taken here. */ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { struct vm_area_struct *prev; struct rb_node **rb_link, *rb_parent; if (find_vma_links(mm, vma->vm_start, vma->vm_end, &prev, &rb_link, &rb_parent)) return -ENOMEM; if ((vma->vm_flags & VM_ACCOUNT) && security_vm_enough_memory_mm(mm, vma_pages(vma))) return -ENOMEM; /* * The vm_pgoff of a purely anonymous vma should be irrelevant * until its first write fault, when page's anon_vma and index * are set. But now set the vm_pgoff it will almost certainly * end up with (unless mremap moves it elsewhere before that * first wfault), so /proc/pid/maps tells a consistent story. * * By setting it to reflect the virtual start address of the * vma, merges and splits can happen in a seamless way, just * using the existing file pgoff checks and manipulations. * Similarly in do_mmap_pgoff and in do_brk. */ if (vma_is_anonymous(vma)) { BUG_ON(vma->anon_vma); vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; } vma_link(mm, vma, prev, rb_link, rb_parent); return 0; } /* * Copy the vma structure to a new location in the same mm, * prior to moving page table entries, to effect an mremap move. */ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, unsigned long addr, unsigned long len, pgoff_t pgoff, bool *need_rmap_locks) { struct vm_area_struct *vma = *vmap; unsigned long vma_start = vma->vm_start; struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma, *prev; struct rb_node **rb_link, *rb_parent; bool faulted_in_anon_vma = true; /* * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. */ if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { pgoff = addr >> PAGE_SHIFT; faulted_in_anon_vma = false; } if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) return NULL; /* should never get here */ new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), vma->vm_userfaultfd_ctx); if (new_vma) { /* * Source vma may have been merged into new_vma */ if (unlikely(vma_start >= new_vma->vm_start && vma_start < new_vma->vm_end)) { /* * The only way we can get a vma_merge with * self during an mremap is if the vma hasn't * been faulted in yet and we were allowed to * reset the dst vma->vm_pgoff to the * destination address of the mremap to allow * the merge to happen. mremap must change the * vm_pgoff linearity between src and dst vmas * (in turn preventing a vma_merge) to be * safe. It is only safe to keep the vm_pgoff * linear if there are no pages mapped yet. */ VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); *vmap = vma = new_vma; } *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); } else { new_vma = vm_area_dup(vma); if (!new_vma) goto out; new_vma->vm_start = addr; new_vma->vm_end = addr + len; new_vma->vm_pgoff = pgoff; if (vma_dup_policy(vma, new_vma)) goto out_free_vma; if (anon_vma_clone(new_vma, vma)) goto out_free_mempol; if (new_vma->vm_file) get_file(new_vma->vm_file); if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); vma_link(mm, new_vma, prev, rb_link, rb_parent); *need_rmap_locks = false; } return new_vma; out_free_mempol: mpol_put(vma_policy(new_vma)); out_free_vma: vm_area_free(new_vma); out: return NULL; } /* * Return true if the calling process may expand its vm space by the passed * number of pages */ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) { if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) return false; if (is_data_mapping(flags) && mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { /* Workaround for Valgrind */ if (rlimit(RLIMIT_DATA) == 0 && mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) return true; pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", current->comm, current->pid, (mm->data_vm + npages) << PAGE_SHIFT, rlimit(RLIMIT_DATA), ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data"); if (!ignore_rlimit_data) return false; } return true; } void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) { mm->total_vm += npages; if (is_exec_mapping(flags)) mm->exec_vm += npages; else if (is_stack_mapping(flags)) mm->stack_vm += npages; else if (is_data_mapping(flags)) mm->data_vm += npages; } static vm_fault_t special_mapping_fault(struct vm_fault *vmf); /* * Having a close hook prevents vma merging regardless of flags. */ static void special_mapping_close(struct vm_area_struct *vma) { } static const char *special_mapping_name(struct vm_area_struct *vma) { return ((struct vm_special_mapping *)vma->vm_private_data)->name; } static int special_mapping_mremap(struct vm_area_struct *new_vma) { struct vm_special_mapping *sm = new_vma->vm_private_data; if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) return -EFAULT; if (sm->mremap) return sm->mremap(sm, new_vma); return 0; } static const struct vm_operations_struct special_mapping_vmops = { .close = special_mapping_close, .fault = special_mapping_fault, .mremap = special_mapping_mremap, .name = special_mapping_name, }; static const struct vm_operations_struct legacy_special_mapping_vmops = { .close = special_mapping_close, .fault = special_mapping_fault, }; static vm_fault_t special_mapping_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; pgoff_t pgoff; struct page **pages; if (vma->vm_ops == &legacy_special_mapping_vmops) { pages = vma->vm_private_data; } else { struct vm_special_mapping *sm = vma->vm_private_data; if (sm->fault) return sm->fault(sm, vmf->vma, vmf); pages = sm->pages; } for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) pgoff--; if (*pages) { struct page *page = *pages; get_page(page); vmf->page = page; return 0; } return VM_FAULT_SIGBUS; } static struct vm_area_struct *__install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, void *priv, const struct vm_operations_struct *ops) { int ret; struct vm_area_struct *vma; vma = vm_area_alloc(mm); if (unlikely(vma == NULL)) return ERR_PTR(-ENOMEM); vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_ops = ops; vma->vm_private_data = priv; ret = insert_vm_struct(mm, vma); if (ret) goto out; vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); perf_event_mmap(vma); return vma; out: vm_area_free(vma); return ERR_PTR(ret); } bool vma_is_special_mapping(const struct vm_area_struct *vma, const struct vm_special_mapping *sm) { return vma->vm_private_data == sm && (vma->vm_ops == &special_mapping_vmops || vma->vm_ops == &legacy_special_mapping_vmops); } /* * Called with mm->mmap_sem held for writing. * Insert a new vma covering the given region, with the given flags. * Its pages are supplied by the given array of struct page *. * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. * The region past the last page supplied will always produce SIGBUS. * The array pointer and the pages it points to are assumed to stay alive * for as long as this mapping might exist. */ struct vm_area_struct *_install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, const struct vm_special_mapping *spec) { return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, &special_mapping_vmops); } int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, struct page **pages) { struct vm_area_struct *vma = __install_special_mapping( mm, addr, len, vm_flags, (void *)pages, &legacy_special_mapping_vmops); return PTR_ERR_OR_ZERO(vma); } static DEFINE_MUTEX(mm_all_locks_mutex); static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) { if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { /* * The LSB of head.next can't change from under us * because we hold the mm_all_locks_mutex. */ down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem); /* * We can safely modify head.next after taking the * anon_vma->root->rwsem. If some other vma in this mm shares * the same anon_vma we won't take it again. * * No need of atomic instructions here, head.next * can't change from under us thanks to the * anon_vma->root->rwsem. */ if (__test_and_set_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) BUG(); } } static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) { if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { /* * AS_MM_ALL_LOCKS can't change from under us because * we hold the mm_all_locks_mutex. * * Operations on ->flags have to be atomic because * even if AS_MM_ALL_LOCKS is stable thanks to the * mm_all_locks_mutex, there may be other cpus * changing other bitflags in parallel to us. */ if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem); } } /* * This operation locks against the VM for all pte/vma/mm related * operations that could ever happen on a certain mm. This includes * vmtruncate, try_to_unmap, and all page faults. * * The caller must take the mmap_sem in write mode before calling * mm_take_all_locks(). The caller isn't allowed to release the * mmap_sem until mm_drop_all_locks() returns. * * mmap_sem in write mode is required in order to block all operations * that could modify pagetables and free pages without need of * altering the vma layout. It's also needed in write mode to avoid new * anon_vmas to be associated with existing vmas. * * A single task can't take more than one mm_take_all_locks() in a row * or it would deadlock. * * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in * mapping->flags avoid to take the same lock twice, if more than one * vma in this mm is backed by the same anon_vma or address_space. * * We take locks in following order, accordingly to comment at beginning * of mm/rmap.c: * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for * hugetlb mapping); * - all i_mmap_rwsem locks; * - all anon_vma->rwseml * * We can take all locks within these types randomly because the VM code * doesn't nest them and we protected from parallel mm_take_all_locks() by * mm_all_locks_mutex. * * mm_take_all_locks() and mm_drop_all_locks are expensive operations * that may have to take thousand of locks. * * mm_take_all_locks() can fail if it's interrupted by signals. */ int mm_take_all_locks(struct mm_struct *mm) { struct vm_area_struct *vma; struct anon_vma_chain *avc; BUG_ON(down_read_trylock(&mm->mmap_sem)); mutex_lock(&mm_all_locks_mutex); for (vma = mm->mmap; vma; vma = vma->vm_next) { if (signal_pending(current)) goto out_unlock; if (vma->vm_file && vma->vm_file->f_mapping && is_vm_hugetlb_page(vma)) vm_lock_mapping(mm, vma->vm_file->f_mapping); } for (vma = mm->mmap; vma; vma = vma->vm_next) { if (signal_pending(current)) goto out_unlock; if (vma->vm_file && vma->vm_file->f_mapping && !is_vm_hugetlb_page(vma)) vm_lock_mapping(mm, vma->vm_file->f_mapping); } for (vma = mm->mmap; vma; vma = vma->vm_next) { if (signal_pending(current)) goto out_unlock; if (vma->anon_vma) list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) vm_lock_anon_vma(mm, avc->anon_vma); } return 0; out_unlock: mm_drop_all_locks(mm); return -EINTR; } static void vm_unlock_anon_vma(struct anon_vma *anon_vma) { if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { /* * The LSB of head.next can't change to 0 from under * us because we hold the mm_all_locks_mutex. * * We must however clear the bitflag before unlocking * the vma so the users using the anon_vma->rb_root will * never see our bitflag. * * No need of atomic instructions here, head.next * can't change from under us until we release the * anon_vma->root->rwsem. */ if (!__test_and_clear_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) BUG(); anon_vma_unlock_write(anon_vma); } } static void vm_unlock_mapping(struct address_space *mapping) { if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { /* * AS_MM_ALL_LOCKS can't change to 0 from under us * because we hold the mm_all_locks_mutex. */ i_mmap_unlock_write(mapping); if (!test_and_clear_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); } } /* * The mmap_sem cannot be released by the caller until * mm_drop_all_locks() returns. */ void mm_drop_all_locks(struct mm_struct *mm) { struct vm_area_struct *vma; struct anon_vma_chain *avc; BUG_ON(down_read_trylock(&mm->mmap_sem)); BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); for (vma = mm->mmap; vma; vma = vma->vm_next) { if (vma->anon_vma) list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) vm_unlock_anon_vma(avc->anon_vma); if (vma->vm_file && vma->vm_file->f_mapping) vm_unlock_mapping(vma->vm_file->f_mapping); } mutex_unlock(&mm_all_locks_mutex); } /* * initialise the percpu counter for VM */ void __init mmap_init(void) { int ret; ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); VM_BUG_ON(ret); } /* * Initialise sysctl_user_reserve_kbytes. * * This is intended to prevent a user from starting a single memory hogging * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER * mode. * * The default value is min(3% of free memory, 128MB) * 128MB is enough to recover with sshd/login, bash, and top/kill. */ static int init_user_reserve(void) { unsigned long free_kbytes; free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); return 0; } subsys_initcall(init_user_reserve); /* * Initialise sysctl_admin_reserve_kbytes. * * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin * to log in and kill a memory hogging process. * * Systems with more than 256MB will reserve 8MB, enough to recover * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will * only reserve 3% of free pages by default. */ static int init_admin_reserve(void) { unsigned long free_kbytes; free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); return 0; } subsys_initcall(init_admin_reserve); /* * Reinititalise user and admin reserves if memory is added or removed. * * The default user reserve max is 128MB, and the default max for the * admin reserve is 8MB. These are usually, but not always, enough to * enable recovery from a memory hogging process using login/sshd, a shell, * and tools like top. It may make sense to increase or even disable the * reserve depending on the existence of swap or variations in the recovery * tools. So, the admin may have changed them. * * If memory is added and the reserves have been eliminated or increased above * the default max, then we'll trust the admin. * * If memory is removed and there isn't enough free memory, then we * need to reset the reserves. * * Otherwise keep the reserve set by the admin. */ static int reserve_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { unsigned long tmp, free_kbytes; switch (action) { case MEM_ONLINE: /* Default max is 128MB. Leave alone if modified by operator. */ tmp = sysctl_user_reserve_kbytes; if (0 < tmp && tmp < (1UL << 17)) init_user_reserve(); /* Default max is 8MB. Leave alone if modified by operator. */ tmp = sysctl_admin_reserve_kbytes; if (0 < tmp && tmp < (1UL << 13)) init_admin_reserve(); break; case MEM_OFFLINE: free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); if (sysctl_user_reserve_kbytes > free_kbytes) { init_user_reserve(); pr_info("vm.user_reserve_kbytes reset to %lu\n", sysctl_user_reserve_kbytes); } if (sysctl_admin_reserve_kbytes > free_kbytes) { init_admin_reserve(); pr_info("vm.admin_reserve_kbytes reset to %lu\n", sysctl_admin_reserve_kbytes); } break; default: break; } return NOTIFY_OK; } static struct notifier_block reserve_mem_nb = { .notifier_call = reserve_mem_notifier, }; static int __meminit init_reserve_notifier(void) { if (register_hotmemory_notifier(&reserve_mem_nb)) pr_err("Failed registering memory add/remove notifier for admin reserve\n"); return 0; } subsys_initcall(init_reserve_notifier);
./CrossVul/dataset_final_sorted/CWE-362/c/good_829_4
crossvul-cpp_data_good_5518_1
#define _GNU_SOURCE #include <endian.h> #include <errno.h> #include <fcntl.h> #include <grp.h> #include <sched.h> #include <setjmp.h> #include <signal.h> #include <stdarg.h> #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <sys/ioctl.h> #include <sys/prctl.h> #include <sys/socket.h> #include <sys/types.h> #include <linux/limits.h> #include <linux/netlink.h> #include <linux/types.h> #define SYNC_VAL 0x42 #define JUMP_VAL 0x43 /* Assume the stack grows down, so arguments should be above it. */ struct clone_arg { /* * Reserve some space for clone() to locate arguments * and retcode in this place */ char stack[4096] __attribute__ ((aligned(16))); char stack_ptr[0]; jmp_buf *env; }; struct nlconfig_t { char *data; uint32_t cloneflags; char *uidmap; int uidmap_len; char *gidmap; int gidmap_len; uint8_t is_setgroup; int consolefd; }; /* * List of netlink message types sent to us as part of bootstrapping the init. * These constants are defined in libcontainer/message_linux.go. */ #define INIT_MSG 62000 #define CLONE_FLAGS_ATTR 27281 #define CONSOLE_PATH_ATTR 27282 #define NS_PATHS_ATTR 27283 #define UIDMAP_ATTR 27284 #define GIDMAP_ATTR 27285 #define SETGROUP_ATTR 27286 /* * Use the raw syscall for versions of glibc which don't include a function for * it, namely (glibc 2.12). */ #if __GLIBC__ == 2 && __GLIBC_MINOR__ < 14 # define _GNU_SOURCE # include "syscall.h" # if !defined(SYS_setns) && defined(__NR_setns) # define SYS_setns __NR_setns # endif #ifndef SYS_setns # error "setns(2) syscall not supported by glibc version" #endif int setns(int fd, int nstype) { return syscall(SYS_setns, fd, nstype); } #endif /* TODO(cyphar): Fix this so it correctly deals with syncT. */ #define bail(fmt, ...) \ do { \ fprintf(stderr, "nsenter: " fmt ": %m\n", ##__VA_ARGS__); \ exit(__COUNTER__ + 1); \ } while(0) static int child_func(void *arg) { struct clone_arg *ca = (struct clone_arg *)arg; longjmp(*ca->env, JUMP_VAL); } static int clone_parent(jmp_buf *env, int flags) __attribute__ ((noinline)); static int clone_parent(jmp_buf *env, int flags) { int child; struct clone_arg ca = { .env = env, }; child = clone(child_func, ca.stack_ptr, CLONE_PARENT | SIGCHLD | flags, &ca); /* * On old kernels, CLONE_PARENT didn't work with CLONE_NEWPID, so we have * to unshare(2) before clone(2) in order to do this. This was fixed in * upstream commit 1f7f4dde5c945f41a7abc2285be43d918029ecc5, and was * introduced by 40a0d32d1eaffe6aac7324ca92604b6b3977eb0e. * * As far as we're aware, the last mainline kernel which had this bug was * Linux 3.12. However, we cannot comment on which kernels the broken patch * was backported to. */ if (errno == EINVAL) { if (unshare(flags) < 0) bail("unable to unshare namespaces"); child = clone(child_func, ca.stack_ptr, SIGCHLD | CLONE_PARENT, &ca); } return child; } /* * Gets the init pipe fd from the environment, which is used to read the * bootstrap data and tell the parent what the new pid is after we finish * setting up the environment. */ static int initpipe(void) { int pipenum; char *initpipe, *endptr; initpipe = getenv("_LIBCONTAINER_INITPIPE"); if (initpipe == NULL || *initpipe == '\0') return -1; errno = 0; pipenum = strtol(initpipe, &endptr, 10); if (errno != 0 || *endptr != '\0') bail("unable to parse _LIBCONTAINER_INITPIPE"); return pipenum; } static uint32_t readint32(char *buf) { return *(uint32_t *) buf; } static uint8_t readint8(char *buf) { return *(uint8_t *) buf; } static int write_file(char *data, size_t data_len, char *pathfmt, ...) { int fd, len, ret = 0; char path[PATH_MAX]; va_list ap; va_start(ap, pathfmt); len = vsnprintf(path, PATH_MAX, pathfmt, ap); va_end(ap); if (len < 0) return -1; fd = open(path, O_RDWR); if (fd < 0) { ret = -1; goto out; } len = write(fd, data, data_len); if (len != data_len) { ret = -1; goto out; } out: close(fd); return ret; } #define SETGROUPS_ALLOW "allow" #define SETGROUPS_DENY "deny" /* This *must* be called before we touch gid_map. */ static void update_setgroups(int pid, bool setgroup) { char *policy; if (setgroup) policy = SETGROUPS_ALLOW; else policy = SETGROUPS_DENY; if (write_file(policy, strlen(policy), "/proc/%d/setgroups", pid) < 0) { /* * If the kernel is too old to support /proc/pid/setgroups, * open(2) or write(2) will return ENOENT. This is fine. */ if (errno != ENOENT) bail("failed to write '%s' to /proc/%d/setgroups", policy, pid); } } static void update_uidmap(int pid, char *map, int map_len) { if (map == NULL || map_len <= 0) return; if (write_file(map, map_len, "/proc/%d/uid_map", pid) < 0) bail("failed to update /proc/%d/uid_map", pid); } static void update_gidmap(int pid, char *map, int map_len) { if (map == NULL || map_len <= 0) return; if (write_file(map, map_len, "/proc/%d/gid_map", pid) < 0) bail("failed to update /proc/%d/gid_map", pid); } #define JSON_MAX 4096 static void start_child(int pipenum, jmp_buf *env, int syncpipe[2], struct nlconfig_t *config) { int len, childpid; char buf[JSON_MAX]; uint8_t syncval; /* * We must fork to actually enter the PID namespace, and use * CLONE_PARENT so that the child init can have the right parent * (the bootstrap process). Also so we don't need to forward the * child's exit code or resend its death signal. */ childpid = clone_parent(env, config->cloneflags); if (childpid < 0) bail("unable to fork"); /* Update setgroups, uid_map and gid_map for the process if provided. */ if (config->is_setgroup) update_setgroups(childpid, true); update_uidmap(childpid, config->uidmap, config->uidmap_len); update_gidmap(childpid, config->gidmap, config->gidmap_len); /* Send the sync signal to the child. */ close(syncpipe[0]); syncval = SYNC_VAL; if (write(syncpipe[1], &syncval, sizeof(syncval)) != sizeof(syncval)) bail("failed to write sync byte to child"); /* Send the child pid back to our parent */ len = snprintf(buf, JSON_MAX, "{\"pid\": %d}\n", childpid); if (len < 0 || write(pipenum, buf, len) != len) { kill(childpid, SIGKILL); bail("unable to send a child pid"); } exit(0); } static void nl_parse(int fd, struct nlconfig_t *config) { size_t len, size; struct nlmsghdr hdr; char *data, *current; /* Retrieve the netlink header. */ len = read(fd, &hdr, NLMSG_HDRLEN); if (len != NLMSG_HDRLEN) bail("invalid netlink header length %lu", len); if (hdr.nlmsg_type == NLMSG_ERROR) bail("failed to read netlink message"); if (hdr.nlmsg_type != INIT_MSG) bail("unexpected msg type %d", hdr.nlmsg_type); /* Retrieve data. */ size = NLMSG_PAYLOAD(&hdr, 0); current = data = malloc(size); if (!data) bail("failed to allocate %zu bytes of memory for nl_payload", size); len = read(fd, data, size); if (len != size) bail("failed to read netlink payload, %lu != %lu", len, size); /* Parse the netlink payload. */ config->data = data; config->consolefd = -1; while (current < data + size) { struct nlattr *nlattr = (struct nlattr *)current; size_t payload_len = nlattr->nla_len - NLA_HDRLEN; /* Advance to payload. */ current += NLA_HDRLEN; /* Handle payload. */ switch (nlattr->nla_type) { case CLONE_FLAGS_ATTR: config->cloneflags = readint32(current); break; case CONSOLE_PATH_ATTR: /* * The context in which this is done (before or after we * join the other namespaces) will affect how the path * resolution of the console works. This order is not * decided here, but rather in container_linux.go. We just * follow the order given by the netlink message. */ config->consolefd = open(current, O_RDWR); if (config->consolefd < 0) bail("failed to open console %s", current); break; case NS_PATHS_ATTR:{ /* * Open each namespace path and setns it in the * order provided to us. We currently don't have * any context for what kind of namespace we're * joining, so just blindly do it. */ char *saveptr = NULL; char *ns = strtok_r(current, ",", &saveptr); int *fds = NULL, num = 0, i; char **paths = NULL; if (!ns || !strlen(current)) bail("ns paths are empty"); /* * We have to open the file descriptors first, since after * we join the mnt namespace we might no longer be able to * access the paths. */ do { int fd; /* Resize fds. */ num++; fds = realloc(fds, num * sizeof(int)); paths = realloc(paths, num * sizeof(char *)); fd = open(ns, O_RDONLY); if (fd < 0) bail("failed to open %s", ns); fds[num - 1] = fd; paths[num - 1] = ns; } while ((ns = strtok_r(NULL, ",", &saveptr)) != NULL); for (i = 0; i < num; i++) { int fd = fds[i]; char *path = paths[i]; if (setns(fd, 0) < 0) bail("failed to setns to %s", path); close(fd); } free(fds); free(paths); break; } case UIDMAP_ATTR: config->uidmap = current; config->uidmap_len = payload_len; break; case GIDMAP_ATTR: config->gidmap = current; config->gidmap_len = payload_len; break; case SETGROUP_ATTR: config->is_setgroup = readint8(current); break; default: bail("unknown netlink message type %d", nlattr->nla_type); } current += NLA_ALIGN(payload_len); } } void nl_free(struct nlconfig_t *config) { free(config->data); } void nsexec(void) { int pipenum; jmp_buf env; int syncpipe[2]; struct nlconfig_t config = {0}; /* * If we don't have an init pipe, just return to the go routine. * We'll only get an init pipe for start or exec. */ pipenum = initpipe(); if (pipenum == -1) return; /* make the process non-dumpable */ if (prctl(PR_SET_DUMPABLE, 0, 0, 0, 0) != 0) { bail("failed to set process as non-dumpable"); } /* Parse all of the netlink configuration. */ nl_parse(pipenum, &config); /* clone(2) flags are mandatory. */ if (config.cloneflags == -1) bail("missing clone_flags"); /* Pipe so we can tell the child when we've finished setting up. */ if (pipe(syncpipe) < 0) bail("failed to setup sync pipe between parent and child"); /* Set up the jump point. */ if (setjmp(env) == JUMP_VAL) { /* * We're inside the child now, having jumped from the * start_child() code after forking in the parent. */ uint8_t s = 0; int consolefd = config.consolefd; /* Close the writing side of pipe. */ close(syncpipe[1]); /* Sync with parent. */ if (read(syncpipe[0], &s, sizeof(s)) != sizeof(s) || s != SYNC_VAL) bail("failed to read sync byte from parent"); if (setsid() < 0) bail("setsid failed"); if (setuid(0) < 0) bail("setuid failed"); if (setgid(0) < 0) bail("setgid failed"); if (setgroups(0, NULL) < 0) bail("setgroups failed"); if (consolefd != -1) { if (ioctl(consolefd, TIOCSCTTY, 0) < 0) bail("ioctl TIOCSCTTY failed"); if (dup3(consolefd, STDIN_FILENO, 0) != STDIN_FILENO) bail("failed to dup stdin"); if (dup3(consolefd, STDOUT_FILENO, 0) != STDOUT_FILENO) bail("failed to dup stdout"); if (dup3(consolefd, STDERR_FILENO, 0) != STDERR_FILENO) bail("failed to dup stderr"); } /* Free netlink data. */ nl_free(&config); /* Finish executing, let the Go runtime take over. */ return; } /* Run the parent code. */ start_child(pipenum, &env, syncpipe, &config); /* Should never be reached. */ bail("should never be reached"); }
./CrossVul/dataset_final_sorted/CWE-362/c/good_5518_1
crossvul-cpp_data_good_5187_0
/* auditsc.c -- System-call auditing support * Handles all system-call specific auditing features. * * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. * Copyright 2005 Hewlett-Packard Development Company, L.P. * Copyright (C) 2005, 2006 IBM Corporation * All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Written by Rickard E. (Rik) Faith <faith@redhat.com> * * Many of the ideas implemented here are from Stephen C. Tweedie, * especially the idea of avoiding a copy by using getname. * * The method for actual interception of syscall entry and exit (not in * this file -- see entry.S) is based on a GPL'd patch written by * okir@suse.de and Copyright 2003 SuSE Linux AG. * * POSIX message queue support added by George Wilson <ltcgcw@us.ibm.com>, * 2006. * * The support of additional filter rules compares (>, <, >=, <=) was * added by Dustin Kirkland <dustin.kirkland@us.ibm.com>, 2005. * * Modified by Amy Griffis <amy.griffis@hp.com> to collect additional * filesystem information. * * Subject and object context labeling support added by <danjones@us.ibm.com> * and <dustin.kirkland@us.ibm.com> for LSPP certification compliance. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <asm/types.h> #include <linux/atomic.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/mm.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/mount.h> #include <linux/socket.h> #include <linux/mqueue.h> #include <linux/audit.h> #include <linux/personality.h> #include <linux/time.h> #include <linux/netlink.h> #include <linux/compiler.h> #include <asm/unistd.h> #include <linux/security.h> #include <linux/list.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/highmem.h> #include <linux/syscalls.h> #include <asm/syscall.h> #include <linux/capability.h> #include <linux/fs_struct.h> #include <linux/compat.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/uaccess.h> #include <uapi/linux/limits.h> #include "audit.h" /* flags stating the success for a syscall */ #define AUDITSC_INVALID 0 #define AUDITSC_SUCCESS 1 #define AUDITSC_FAILURE 2 /* no execve audit message should be longer than this (userspace limits), * see the note near the top of audit_log_execve_info() about this value */ #define MAX_EXECVE_AUDIT_LEN 7500 /* max length to print of cmdline/proctitle value during audit */ #define MAX_PROCTITLE_AUDIT_LEN 128 /* number of audit rules */ int audit_n_rules; /* determines whether we collect data for signals sent */ int audit_signals; struct audit_aux_data { struct audit_aux_data *next; int type; }; #define AUDIT_AUX_IPCPERM 0 /* Number of target pids per aux struct. */ #define AUDIT_AUX_PIDS 16 struct audit_aux_data_pids { struct audit_aux_data d; pid_t target_pid[AUDIT_AUX_PIDS]; kuid_t target_auid[AUDIT_AUX_PIDS]; kuid_t target_uid[AUDIT_AUX_PIDS]; unsigned int target_sessionid[AUDIT_AUX_PIDS]; u32 target_sid[AUDIT_AUX_PIDS]; char target_comm[AUDIT_AUX_PIDS][TASK_COMM_LEN]; int pid_count; }; struct audit_aux_data_bprm_fcaps { struct audit_aux_data d; struct audit_cap_data fcap; unsigned int fcap_ver; struct audit_cap_data old_pcap; struct audit_cap_data new_pcap; }; struct audit_tree_refs { struct audit_tree_refs *next; struct audit_chunk *c[31]; }; static int audit_match_perm(struct audit_context *ctx, int mask) { unsigned n; if (unlikely(!ctx)) return 0; n = ctx->major; switch (audit_classify_syscall(ctx->arch, n)) { case 0: /* native */ if ((mask & AUDIT_PERM_WRITE) && audit_match_class(AUDIT_CLASS_WRITE, n)) return 1; if ((mask & AUDIT_PERM_READ) && audit_match_class(AUDIT_CLASS_READ, n)) return 1; if ((mask & AUDIT_PERM_ATTR) && audit_match_class(AUDIT_CLASS_CHATTR, n)) return 1; return 0; case 1: /* 32bit on biarch */ if ((mask & AUDIT_PERM_WRITE) && audit_match_class(AUDIT_CLASS_WRITE_32, n)) return 1; if ((mask & AUDIT_PERM_READ) && audit_match_class(AUDIT_CLASS_READ_32, n)) return 1; if ((mask & AUDIT_PERM_ATTR) && audit_match_class(AUDIT_CLASS_CHATTR_32, n)) return 1; return 0; case 2: /* open */ return mask & ACC_MODE(ctx->argv[1]); case 3: /* openat */ return mask & ACC_MODE(ctx->argv[2]); case 4: /* socketcall */ return ((mask & AUDIT_PERM_WRITE) && ctx->argv[0] == SYS_BIND); case 5: /* execve */ return mask & AUDIT_PERM_EXEC; default: return 0; } } static int audit_match_filetype(struct audit_context *ctx, int val) { struct audit_names *n; umode_t mode = (umode_t)val; if (unlikely(!ctx)) return 0; list_for_each_entry(n, &ctx->names_list, list) { if ((n->ino != AUDIT_INO_UNSET) && ((n->mode & S_IFMT) == mode)) return 1; } return 0; } /* * We keep a linked list of fixed-sized (31 pointer) arrays of audit_chunk *; * ->first_trees points to its beginning, ->trees - to the current end of data. * ->tree_count is the number of free entries in array pointed to by ->trees. * Original condition is (NULL, NULL, 0); as soon as it grows we never revert to NULL, * "empty" becomes (p, p, 31) afterwards. We don't shrink the list (and seriously, * it's going to remain 1-element for almost any setup) until we free context itself. * References in it _are_ dropped - at the same time we free/drop aux stuff. */ #ifdef CONFIG_AUDIT_TREE static void audit_set_auditable(struct audit_context *ctx) { if (!ctx->prio) { ctx->prio = 1; ctx->current_state = AUDIT_RECORD_CONTEXT; } } static int put_tree_ref(struct audit_context *ctx, struct audit_chunk *chunk) { struct audit_tree_refs *p = ctx->trees; int left = ctx->tree_count; if (likely(left)) { p->c[--left] = chunk; ctx->tree_count = left; return 1; } if (!p) return 0; p = p->next; if (p) { p->c[30] = chunk; ctx->trees = p; ctx->tree_count = 30; return 1; } return 0; } static int grow_tree_refs(struct audit_context *ctx) { struct audit_tree_refs *p = ctx->trees; ctx->trees = kzalloc(sizeof(struct audit_tree_refs), GFP_KERNEL); if (!ctx->trees) { ctx->trees = p; return 0; } if (p) p->next = ctx->trees; else ctx->first_trees = ctx->trees; ctx->tree_count = 31; return 1; } #endif static void unroll_tree_refs(struct audit_context *ctx, struct audit_tree_refs *p, int count) { #ifdef CONFIG_AUDIT_TREE struct audit_tree_refs *q; int n; if (!p) { /* we started with empty chain */ p = ctx->first_trees; count = 31; /* if the very first allocation has failed, nothing to do */ if (!p) return; } n = count; for (q = p; q != ctx->trees; q = q->next, n = 31) { while (n--) { audit_put_chunk(q->c[n]); q->c[n] = NULL; } } while (n-- > ctx->tree_count) { audit_put_chunk(q->c[n]); q->c[n] = NULL; } ctx->trees = p; ctx->tree_count = count; #endif } static void free_tree_refs(struct audit_context *ctx) { struct audit_tree_refs *p, *q; for (p = ctx->first_trees; p; p = q) { q = p->next; kfree(p); } } static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree) { #ifdef CONFIG_AUDIT_TREE struct audit_tree_refs *p; int n; if (!tree) return 0; /* full ones */ for (p = ctx->first_trees; p != ctx->trees; p = p->next) { for (n = 0; n < 31; n++) if (audit_tree_match(p->c[n], tree)) return 1; } /* partial */ if (p) { for (n = ctx->tree_count; n < 31; n++) if (audit_tree_match(p->c[n], tree)) return 1; } #endif return 0; } static int audit_compare_uid(kuid_t uid, struct audit_names *name, struct audit_field *f, struct audit_context *ctx) { struct audit_names *n; int rc; if (name) { rc = audit_uid_comparator(uid, f->op, name->uid); if (rc) return rc; } if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { rc = audit_uid_comparator(uid, f->op, n->uid); if (rc) return rc; } } return 0; } static int audit_compare_gid(kgid_t gid, struct audit_names *name, struct audit_field *f, struct audit_context *ctx) { struct audit_names *n; int rc; if (name) { rc = audit_gid_comparator(gid, f->op, name->gid); if (rc) return rc; } if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { rc = audit_gid_comparator(gid, f->op, n->gid); if (rc) return rc; } } return 0; } static int audit_field_compare(struct task_struct *tsk, const struct cred *cred, struct audit_field *f, struct audit_context *ctx, struct audit_names *name) { switch (f->val) { /* process to file object comparisons */ case AUDIT_COMPARE_UID_TO_OBJ_UID: return audit_compare_uid(cred->uid, name, f, ctx); case AUDIT_COMPARE_GID_TO_OBJ_GID: return audit_compare_gid(cred->gid, name, f, ctx); case AUDIT_COMPARE_EUID_TO_OBJ_UID: return audit_compare_uid(cred->euid, name, f, ctx); case AUDIT_COMPARE_EGID_TO_OBJ_GID: return audit_compare_gid(cred->egid, name, f, ctx); case AUDIT_COMPARE_AUID_TO_OBJ_UID: return audit_compare_uid(tsk->loginuid, name, f, ctx); case AUDIT_COMPARE_SUID_TO_OBJ_UID: return audit_compare_uid(cred->suid, name, f, ctx); case AUDIT_COMPARE_SGID_TO_OBJ_GID: return audit_compare_gid(cred->sgid, name, f, ctx); case AUDIT_COMPARE_FSUID_TO_OBJ_UID: return audit_compare_uid(cred->fsuid, name, f, ctx); case AUDIT_COMPARE_FSGID_TO_OBJ_GID: return audit_compare_gid(cred->fsgid, name, f, ctx); /* uid comparisons */ case AUDIT_COMPARE_UID_TO_AUID: return audit_uid_comparator(cred->uid, f->op, tsk->loginuid); case AUDIT_COMPARE_UID_TO_EUID: return audit_uid_comparator(cred->uid, f->op, cred->euid); case AUDIT_COMPARE_UID_TO_SUID: return audit_uid_comparator(cred->uid, f->op, cred->suid); case AUDIT_COMPARE_UID_TO_FSUID: return audit_uid_comparator(cred->uid, f->op, cred->fsuid); /* auid comparisons */ case AUDIT_COMPARE_AUID_TO_EUID: return audit_uid_comparator(tsk->loginuid, f->op, cred->euid); case AUDIT_COMPARE_AUID_TO_SUID: return audit_uid_comparator(tsk->loginuid, f->op, cred->suid); case AUDIT_COMPARE_AUID_TO_FSUID: return audit_uid_comparator(tsk->loginuid, f->op, cred->fsuid); /* euid comparisons */ case AUDIT_COMPARE_EUID_TO_SUID: return audit_uid_comparator(cred->euid, f->op, cred->suid); case AUDIT_COMPARE_EUID_TO_FSUID: return audit_uid_comparator(cred->euid, f->op, cred->fsuid); /* suid comparisons */ case AUDIT_COMPARE_SUID_TO_FSUID: return audit_uid_comparator(cred->suid, f->op, cred->fsuid); /* gid comparisons */ case AUDIT_COMPARE_GID_TO_EGID: return audit_gid_comparator(cred->gid, f->op, cred->egid); case AUDIT_COMPARE_GID_TO_SGID: return audit_gid_comparator(cred->gid, f->op, cred->sgid); case AUDIT_COMPARE_GID_TO_FSGID: return audit_gid_comparator(cred->gid, f->op, cred->fsgid); /* egid comparisons */ case AUDIT_COMPARE_EGID_TO_SGID: return audit_gid_comparator(cred->egid, f->op, cred->sgid); case AUDIT_COMPARE_EGID_TO_FSGID: return audit_gid_comparator(cred->egid, f->op, cred->fsgid); /* sgid comparison */ case AUDIT_COMPARE_SGID_TO_FSGID: return audit_gid_comparator(cred->sgid, f->op, cred->fsgid); default: WARN(1, "Missing AUDIT_COMPARE define. Report as a bug\n"); return 0; } return 0; } /* Determine if any context name data matches a rule's watch data */ /* Compare a task_struct with an audit_rule. Return 1 on match, 0 * otherwise. * * If task_creation is true, this is an explicit indication that we are * filtering a task rule at task creation time. This and tsk == current are * the only situations where tsk->cred may be accessed without an rcu read lock. */ static int audit_filter_rules(struct task_struct *tsk, struct audit_krule *rule, struct audit_context *ctx, struct audit_names *name, enum audit_state *state, bool task_creation) { const struct cred *cred; int i, need_sid = 1; u32 sid; cred = rcu_dereference_check(tsk->cred, tsk == current || task_creation); for (i = 0; i < rule->field_count; i++) { struct audit_field *f = &rule->fields[i]; struct audit_names *n; int result = 0; pid_t pid; switch (f->type) { case AUDIT_PID: pid = task_pid_nr(tsk); result = audit_comparator(pid, f->op, f->val); break; case AUDIT_PPID: if (ctx) { if (!ctx->ppid) ctx->ppid = task_ppid_nr(tsk); result = audit_comparator(ctx->ppid, f->op, f->val); } break; case AUDIT_EXE: result = audit_exe_compare(tsk, rule->exe); break; case AUDIT_UID: result = audit_uid_comparator(cred->uid, f->op, f->uid); break; case AUDIT_EUID: result = audit_uid_comparator(cred->euid, f->op, f->uid); break; case AUDIT_SUID: result = audit_uid_comparator(cred->suid, f->op, f->uid); break; case AUDIT_FSUID: result = audit_uid_comparator(cred->fsuid, f->op, f->uid); break; case AUDIT_GID: result = audit_gid_comparator(cred->gid, f->op, f->gid); if (f->op == Audit_equal) { if (!result) result = in_group_p(f->gid); } else if (f->op == Audit_not_equal) { if (result) result = !in_group_p(f->gid); } break; case AUDIT_EGID: result = audit_gid_comparator(cred->egid, f->op, f->gid); if (f->op == Audit_equal) { if (!result) result = in_egroup_p(f->gid); } else if (f->op == Audit_not_equal) { if (result) result = !in_egroup_p(f->gid); } break; case AUDIT_SGID: result = audit_gid_comparator(cred->sgid, f->op, f->gid); break; case AUDIT_FSGID: result = audit_gid_comparator(cred->fsgid, f->op, f->gid); break; case AUDIT_PERS: result = audit_comparator(tsk->personality, f->op, f->val); break; case AUDIT_ARCH: if (ctx) result = audit_comparator(ctx->arch, f->op, f->val); break; case AUDIT_EXIT: if (ctx && ctx->return_valid) result = audit_comparator(ctx->return_code, f->op, f->val); break; case AUDIT_SUCCESS: if (ctx && ctx->return_valid) { if (f->val) result = audit_comparator(ctx->return_valid, f->op, AUDITSC_SUCCESS); else result = audit_comparator(ctx->return_valid, f->op, AUDITSC_FAILURE); } break; case AUDIT_DEVMAJOR: if (name) { if (audit_comparator(MAJOR(name->dev), f->op, f->val) || audit_comparator(MAJOR(name->rdev), f->op, f->val)) ++result; } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_comparator(MAJOR(n->dev), f->op, f->val) || audit_comparator(MAJOR(n->rdev), f->op, f->val)) { ++result; break; } } } break; case AUDIT_DEVMINOR: if (name) { if (audit_comparator(MINOR(name->dev), f->op, f->val) || audit_comparator(MINOR(name->rdev), f->op, f->val)) ++result; } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_comparator(MINOR(n->dev), f->op, f->val) || audit_comparator(MINOR(n->rdev), f->op, f->val)) { ++result; break; } } } break; case AUDIT_INODE: if (name) result = audit_comparator(name->ino, f->op, f->val); else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_comparator(n->ino, f->op, f->val)) { ++result; break; } } } break; case AUDIT_OBJ_UID: if (name) { result = audit_uid_comparator(name->uid, f->op, f->uid); } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_uid_comparator(n->uid, f->op, f->uid)) { ++result; break; } } } break; case AUDIT_OBJ_GID: if (name) { result = audit_gid_comparator(name->gid, f->op, f->gid); } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (audit_gid_comparator(n->gid, f->op, f->gid)) { ++result; break; } } } break; case AUDIT_WATCH: if (name) result = audit_watch_compare(rule->watch, name->ino, name->dev); break; case AUDIT_DIR: if (ctx) result = match_tree_refs(ctx, rule->tree); break; case AUDIT_LOGINUID: result = audit_uid_comparator(tsk->loginuid, f->op, f->uid); break; case AUDIT_LOGINUID_SET: result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val); break; case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: /* NOTE: this may return negative values indicating a temporary error. We simply treat this as a match for now to avoid losing information that may be wanted. An error message will also be logged upon error */ if (f->lsm_rule) { if (need_sid) { security_task_getsecid(tsk, &sid); need_sid = 0; } result = security_audit_rule_match(sid, f->type, f->op, f->lsm_rule, ctx); } break; case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: /* The above note for AUDIT_SUBJ_USER...AUDIT_SUBJ_CLR also applies here */ if (f->lsm_rule) { /* Find files that match */ if (name) { result = security_audit_rule_match( name->osid, f->type, f->op, f->lsm_rule, ctx); } else if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { if (security_audit_rule_match(n->osid, f->type, f->op, f->lsm_rule, ctx)) { ++result; break; } } } /* Find ipc objects that match */ if (!ctx || ctx->type != AUDIT_IPC) break; if (security_audit_rule_match(ctx->ipc.osid, f->type, f->op, f->lsm_rule, ctx)) ++result; } break; case AUDIT_ARG0: case AUDIT_ARG1: case AUDIT_ARG2: case AUDIT_ARG3: if (ctx) result = audit_comparator(ctx->argv[f->type-AUDIT_ARG0], f->op, f->val); break; case AUDIT_FILTERKEY: /* ignore this field for filtering */ result = 1; break; case AUDIT_PERM: result = audit_match_perm(ctx, f->val); break; case AUDIT_FILETYPE: result = audit_match_filetype(ctx, f->val); break; case AUDIT_FIELD_COMPARE: result = audit_field_compare(tsk, cred, f, ctx, name); break; } if (!result) return 0; } if (ctx) { if (rule->prio <= ctx->prio) return 0; if (rule->filterkey) { kfree(ctx->filterkey); ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC); } ctx->prio = rule->prio; } switch (rule->action) { case AUDIT_NEVER: *state = AUDIT_DISABLED; break; case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break; } return 1; } /* At process creation time, we can determine if system-call auditing is * completely disabled for this task. Since we only have the task * structure at this point, we can only check uid and gid. */ static enum audit_state audit_filter_task(struct task_struct *tsk, char **key) { struct audit_entry *e; enum audit_state state; rcu_read_lock(); list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_TASK], list) { if (audit_filter_rules(tsk, &e->rule, NULL, NULL, &state, true)) { if (state == AUDIT_RECORD_CONTEXT) *key = kstrdup(e->rule.filterkey, GFP_ATOMIC); rcu_read_unlock(); return state; } } rcu_read_unlock(); return AUDIT_BUILD_CONTEXT; } static int audit_in_mask(const struct audit_krule *rule, unsigned long val) { int word, bit; if (val > 0xffffffff) return false; word = AUDIT_WORD(val); if (word >= AUDIT_BITMASK_SIZE) return false; bit = AUDIT_BIT(val); return rule->mask[word] & bit; } /* At syscall entry and exit time, this filter is called if the * audit_state is not low enough that auditing cannot take place, but is * also not high enough that we already know we have to write an audit * record (i.e., the state is AUDIT_SETUP_CONTEXT or AUDIT_BUILD_CONTEXT). */ static enum audit_state audit_filter_syscall(struct task_struct *tsk, struct audit_context *ctx, struct list_head *list) { struct audit_entry *e; enum audit_state state; if (audit_pid && tsk->tgid == audit_pid) return AUDIT_DISABLED; rcu_read_lock(); if (!list_empty(list)) { list_for_each_entry_rcu(e, list, list) { if (audit_in_mask(&e->rule, ctx->major) && audit_filter_rules(tsk, &e->rule, ctx, NULL, &state, false)) { rcu_read_unlock(); ctx->current_state = state; return state; } } } rcu_read_unlock(); return AUDIT_BUILD_CONTEXT; } /* * Given an audit_name check the inode hash table to see if they match. * Called holding the rcu read lock to protect the use of audit_inode_hash */ static int audit_filter_inode_name(struct task_struct *tsk, struct audit_names *n, struct audit_context *ctx) { int h = audit_hash_ino((u32)n->ino); struct list_head *list = &audit_inode_hash[h]; struct audit_entry *e; enum audit_state state; if (list_empty(list)) return 0; list_for_each_entry_rcu(e, list, list) { if (audit_in_mask(&e->rule, ctx->major) && audit_filter_rules(tsk, &e->rule, ctx, n, &state, false)) { ctx->current_state = state; return 1; } } return 0; } /* At syscall exit time, this filter is called if any audit_names have been * collected during syscall processing. We only check rules in sublists at hash * buckets applicable to the inode numbers in audit_names. * Regarding audit_state, same rules apply as for audit_filter_syscall(). */ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx) { struct audit_names *n; if (audit_pid && tsk->tgid == audit_pid) return; rcu_read_lock(); list_for_each_entry(n, &ctx->names_list, list) { if (audit_filter_inode_name(tsk, n, ctx)) break; } rcu_read_unlock(); } /* Transfer the audit context pointer to the caller, clearing it in the tsk's struct */ static inline struct audit_context *audit_take_context(struct task_struct *tsk, int return_valid, long return_code) { struct audit_context *context = tsk->audit_context; if (!context) return NULL; context->return_valid = return_valid; /* * we need to fix up the return code in the audit logs if the actual * return codes are later going to be fixed up by the arch specific * signal handlers * * This is actually a test for: * (rc == ERESTARTSYS ) || (rc == ERESTARTNOINTR) || * (rc == ERESTARTNOHAND) || (rc == ERESTART_RESTARTBLOCK) * * but is faster than a bunch of || */ if (unlikely(return_code <= -ERESTARTSYS) && (return_code >= -ERESTART_RESTARTBLOCK) && (return_code != -ENOIOCTLCMD)) context->return_code = -EINTR; else context->return_code = return_code; if (context->in_syscall && !context->dummy) { audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]); audit_filter_inodes(tsk, context); } tsk->audit_context = NULL; return context; } static inline void audit_proctitle_free(struct audit_context *context) { kfree(context->proctitle.value); context->proctitle.value = NULL; context->proctitle.len = 0; } static inline void audit_free_names(struct audit_context *context) { struct audit_names *n, *next; list_for_each_entry_safe(n, next, &context->names_list, list) { list_del(&n->list); if (n->name) putname(n->name); if (n->should_free) kfree(n); } context->name_count = 0; path_put(&context->pwd); context->pwd.dentry = NULL; context->pwd.mnt = NULL; } static inline void audit_free_aux(struct audit_context *context) { struct audit_aux_data *aux; while ((aux = context->aux)) { context->aux = aux->next; kfree(aux); } while ((aux = context->aux_pids)) { context->aux_pids = aux->next; kfree(aux); } } static inline struct audit_context *audit_alloc_context(enum audit_state state) { struct audit_context *context; context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) return NULL; context->state = state; context->prio = state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0; INIT_LIST_HEAD(&context->killed_trees); INIT_LIST_HEAD(&context->names_list); return context; } /** * audit_alloc - allocate an audit context block for a task * @tsk: task * * Filter on the task information and allocate a per-task audit context * if necessary. Doing so turns on system call auditing for the * specified task. This is called from copy_process, so no lock is * needed. */ int audit_alloc(struct task_struct *tsk) { struct audit_context *context; enum audit_state state; char *key = NULL; if (likely(!audit_ever_enabled)) return 0; /* Return if not auditing. */ state = audit_filter_task(tsk, &key); if (state == AUDIT_DISABLED) { clear_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT); return 0; } if (!(context = audit_alloc_context(state))) { kfree(key); audit_log_lost("out of memory in audit_alloc"); return -ENOMEM; } context->filterkey = key; tsk->audit_context = context; set_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT); return 0; } static inline void audit_free_context(struct audit_context *context) { audit_free_names(context); unroll_tree_refs(context, NULL, 0); free_tree_refs(context); audit_free_aux(context); kfree(context->filterkey); kfree(context->sockaddr); audit_proctitle_free(context); kfree(context); } static int audit_log_pid_context(struct audit_context *context, pid_t pid, kuid_t auid, kuid_t uid, unsigned int sessionid, u32 sid, char *comm) { struct audit_buffer *ab; char *ctx = NULL; u32 len; int rc = 0; ab = audit_log_start(context, GFP_KERNEL, AUDIT_OBJ_PID); if (!ab) return rc; audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid, from_kuid(&init_user_ns, auid), from_kuid(&init_user_ns, uid), sessionid); if (sid) { if (security_secid_to_secctx(sid, &ctx, &len)) { audit_log_format(ab, " obj=(none)"); rc = 1; } else { audit_log_format(ab, " obj=%s", ctx); security_release_secctx(ctx, len); } } audit_log_format(ab, " ocomm="); audit_log_untrustedstring(ab, comm); audit_log_end(ab); return rc; } static void audit_log_execve_info(struct audit_context *context, struct audit_buffer **ab) { long len_max; long len_rem; long len_full; long len_buf; long len_abuf; long len_tmp; bool require_data; bool encode; unsigned int iter; unsigned int arg; char *buf_head; char *buf; const char __user *p = (const char __user *)current->mm->arg_start; /* NOTE: this buffer needs to be large enough to hold all the non-arg * data we put in the audit record for this argument (see the * code below) ... at this point in time 96 is plenty */ char abuf[96]; /* NOTE: we set MAX_EXECVE_AUDIT_LEN to a rather arbitrary limit, the * current value of 7500 is not as important as the fact that it * is less than 8k, a setting of 7500 gives us plenty of wiggle * room if we go over a little bit in the logging below */ WARN_ON_ONCE(MAX_EXECVE_AUDIT_LEN > 7500); len_max = MAX_EXECVE_AUDIT_LEN; /* scratch buffer to hold the userspace args */ buf_head = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL); if (!buf_head) { audit_panic("out of memory for argv string"); return; } buf = buf_head; audit_log_format(*ab, "argc=%d", context->execve.argc); len_rem = len_max; len_buf = 0; len_full = 0; require_data = true; encode = false; iter = 0; arg = 0; do { /* NOTE: we don't ever want to trust this value for anything * serious, but the audit record format insists we * provide an argument length for really long arguments, * e.g. > MAX_EXECVE_AUDIT_LEN, so we have no choice but * to use strncpy_from_user() to obtain this value for * recording in the log, although we don't use it * anywhere here to avoid a double-fetch problem */ if (len_full == 0) len_full = strnlen_user(p, MAX_ARG_STRLEN) - 1; /* read more data from userspace */ if (require_data) { /* can we make more room in the buffer? */ if (buf != buf_head) { memmove(buf_head, buf, len_buf); buf = buf_head; } /* fetch as much as we can of the argument */ len_tmp = strncpy_from_user(&buf_head[len_buf], p, len_max - len_buf); if (len_tmp == -EFAULT) { /* unable to copy from userspace */ send_sig(SIGKILL, current, 0); goto out; } else if (len_tmp == (len_max - len_buf)) { /* buffer is not large enough */ require_data = true; /* NOTE: if we are going to span multiple * buffers force the encoding so we stand * a chance at a sane len_full value and * consistent record encoding */ encode = true; len_full = len_full * 2; p += len_tmp; } else { require_data = false; if (!encode) encode = audit_string_contains_control( buf, len_tmp); /* try to use a trusted value for len_full */ if (len_full < len_max) len_full = (encode ? len_tmp * 2 : len_tmp); p += len_tmp + 1; } len_buf += len_tmp; buf_head[len_buf] = '\0'; /* length of the buffer in the audit record? */ len_abuf = (encode ? len_buf * 2 : len_buf + 2); } /* write as much as we can to the audit log */ if (len_buf > 0) { /* NOTE: some magic numbers here - basically if we * can't fit a reasonable amount of data into the * existing audit buffer, flush it and start with * a new buffer */ if ((sizeof(abuf) + 8) > len_rem) { len_rem = len_max; audit_log_end(*ab); *ab = audit_log_start(context, GFP_KERNEL, AUDIT_EXECVE); if (!*ab) goto out; } /* create the non-arg portion of the arg record */ len_tmp = 0; if (require_data || (iter > 0) || ((len_abuf + sizeof(abuf)) > len_rem)) { if (iter == 0) { len_tmp += snprintf(&abuf[len_tmp], sizeof(abuf) - len_tmp, " a%d_len=%lu", arg, len_full); } len_tmp += snprintf(&abuf[len_tmp], sizeof(abuf) - len_tmp, " a%d[%d]=", arg, iter++); } else len_tmp += snprintf(&abuf[len_tmp], sizeof(abuf) - len_tmp, " a%d=", arg); WARN_ON(len_tmp >= sizeof(abuf)); abuf[sizeof(abuf) - 1] = '\0'; /* log the arg in the audit record */ audit_log_format(*ab, "%s", abuf); len_rem -= len_tmp; len_tmp = len_buf; if (encode) { if (len_abuf > len_rem) len_tmp = len_rem / 2; /* encoding */ audit_log_n_hex(*ab, buf, len_tmp); len_rem -= len_tmp * 2; len_abuf -= len_tmp * 2; } else { if (len_abuf > len_rem) len_tmp = len_rem - 2; /* quotes */ audit_log_n_string(*ab, buf, len_tmp); len_rem -= len_tmp + 2; /* don't subtract the "2" because we still need * to add quotes to the remaining string */ len_abuf -= len_tmp; } len_buf -= len_tmp; buf += len_tmp; } /* ready to move to the next argument? */ if ((len_buf == 0) && !require_data) { arg++; iter = 0; len_full = 0; require_data = true; encode = false; } } while (arg < context->execve.argc); /* NOTE: the caller handles the final audit_log_end() call */ out: kfree(buf_head); } static void show_special(struct audit_context *context, int *call_panic) { struct audit_buffer *ab; int i; ab = audit_log_start(context, GFP_KERNEL, context->type); if (!ab) return; switch (context->type) { case AUDIT_SOCKETCALL: { int nargs = context->socketcall.nargs; audit_log_format(ab, "nargs=%d", nargs); for (i = 0; i < nargs; i++) audit_log_format(ab, " a%d=%lx", i, context->socketcall.args[i]); break; } case AUDIT_IPC: { u32 osid = context->ipc.osid; audit_log_format(ab, "ouid=%u ogid=%u mode=%#ho", from_kuid(&init_user_ns, context->ipc.uid), from_kgid(&init_user_ns, context->ipc.gid), context->ipc.mode); if (osid) { char *ctx = NULL; u32 len; if (security_secid_to_secctx(osid, &ctx, &len)) { audit_log_format(ab, " osid=%u", osid); *call_panic = 1; } else { audit_log_format(ab, " obj=%s", ctx); security_release_secctx(ctx, len); } } if (context->ipc.has_perm) { audit_log_end(ab); ab = audit_log_start(context, GFP_KERNEL, AUDIT_IPC_SET_PERM); if (unlikely(!ab)) return; audit_log_format(ab, "qbytes=%lx ouid=%u ogid=%u mode=%#ho", context->ipc.qbytes, context->ipc.perm_uid, context->ipc.perm_gid, context->ipc.perm_mode); } break; } case AUDIT_MQ_OPEN: { audit_log_format(ab, "oflag=0x%x mode=%#ho mq_flags=0x%lx mq_maxmsg=%ld " "mq_msgsize=%ld mq_curmsgs=%ld", context->mq_open.oflag, context->mq_open.mode, context->mq_open.attr.mq_flags, context->mq_open.attr.mq_maxmsg, context->mq_open.attr.mq_msgsize, context->mq_open.attr.mq_curmsgs); break; } case AUDIT_MQ_SENDRECV: { audit_log_format(ab, "mqdes=%d msg_len=%zd msg_prio=%u " "abs_timeout_sec=%ld abs_timeout_nsec=%ld", context->mq_sendrecv.mqdes, context->mq_sendrecv.msg_len, context->mq_sendrecv.msg_prio, context->mq_sendrecv.abs_timeout.tv_sec, context->mq_sendrecv.abs_timeout.tv_nsec); break; } case AUDIT_MQ_NOTIFY: { audit_log_format(ab, "mqdes=%d sigev_signo=%d", context->mq_notify.mqdes, context->mq_notify.sigev_signo); break; } case AUDIT_MQ_GETSETATTR: { struct mq_attr *attr = &context->mq_getsetattr.mqstat; audit_log_format(ab, "mqdes=%d mq_flags=0x%lx mq_maxmsg=%ld mq_msgsize=%ld " "mq_curmsgs=%ld ", context->mq_getsetattr.mqdes, attr->mq_flags, attr->mq_maxmsg, attr->mq_msgsize, attr->mq_curmsgs); break; } case AUDIT_CAPSET: { audit_log_format(ab, "pid=%d", context->capset.pid); audit_log_cap(ab, "cap_pi", &context->capset.cap.inheritable); audit_log_cap(ab, "cap_pp", &context->capset.cap.permitted); audit_log_cap(ab, "cap_pe", &context->capset.cap.effective); break; } case AUDIT_MMAP: { audit_log_format(ab, "fd=%d flags=0x%x", context->mmap.fd, context->mmap.flags); break; } case AUDIT_EXECVE: { audit_log_execve_info(context, &ab); break; } } audit_log_end(ab); } static inline int audit_proctitle_rtrim(char *proctitle, int len) { char *end = proctitle + len - 1; while (end > proctitle && !isprint(*end)) end--; /* catch the case where proctitle is only 1 non-print character */ len = end - proctitle + 1; len -= isprint(proctitle[len-1]) == 0; return len; } static void audit_log_proctitle(struct task_struct *tsk, struct audit_context *context) { int res; char *buf; char *msg = "(null)"; int len = strlen(msg); struct audit_buffer *ab; ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE); if (!ab) return; /* audit_panic or being filtered */ audit_log_format(ab, "proctitle="); /* Not cached */ if (!context->proctitle.value) { buf = kmalloc(MAX_PROCTITLE_AUDIT_LEN, GFP_KERNEL); if (!buf) goto out; /* Historically called this from procfs naming */ res = get_cmdline(tsk, buf, MAX_PROCTITLE_AUDIT_LEN); if (res == 0) { kfree(buf); goto out; } res = audit_proctitle_rtrim(buf, res); if (res == 0) { kfree(buf); goto out; } context->proctitle.value = buf; context->proctitle.len = res; } msg = context->proctitle.value; len = context->proctitle.len; out: audit_log_n_untrustedstring(ab, msg, len); audit_log_end(ab); } static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) { int i, call_panic = 0; struct audit_buffer *ab; struct audit_aux_data *aux; struct audit_names *n; /* tsk == current */ context->personality = tsk->personality; ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); if (!ab) return; /* audit_panic has been called */ audit_log_format(ab, "arch=%x syscall=%d", context->arch, context->major); if (context->personality != PER_LINUX) audit_log_format(ab, " per=%lx", context->personality); if (context->return_valid) audit_log_format(ab, " success=%s exit=%ld", (context->return_valid==AUDITSC_SUCCESS)?"yes":"no", context->return_code); audit_log_format(ab, " a0=%lx a1=%lx a2=%lx a3=%lx items=%d", context->argv[0], context->argv[1], context->argv[2], context->argv[3], context->name_count); audit_log_task_info(ab, tsk); audit_log_key(ab, context->filterkey); audit_log_end(ab); for (aux = context->aux; aux; aux = aux->next) { ab = audit_log_start(context, GFP_KERNEL, aux->type); if (!ab) continue; /* audit_panic has been called */ switch (aux->type) { case AUDIT_BPRM_FCAPS: { struct audit_aux_data_bprm_fcaps *axs = (void *)aux; audit_log_format(ab, "fver=%x", axs->fcap_ver); audit_log_cap(ab, "fp", &axs->fcap.permitted); audit_log_cap(ab, "fi", &axs->fcap.inheritable); audit_log_format(ab, " fe=%d", axs->fcap.fE); audit_log_cap(ab, "old_pp", &axs->old_pcap.permitted); audit_log_cap(ab, "old_pi", &axs->old_pcap.inheritable); audit_log_cap(ab, "old_pe", &axs->old_pcap.effective); audit_log_cap(ab, "new_pp", &axs->new_pcap.permitted); audit_log_cap(ab, "new_pi", &axs->new_pcap.inheritable); audit_log_cap(ab, "new_pe", &axs->new_pcap.effective); break; } } audit_log_end(ab); } if (context->type) show_special(context, &call_panic); if (context->fds[0] >= 0) { ab = audit_log_start(context, GFP_KERNEL, AUDIT_FD_PAIR); if (ab) { audit_log_format(ab, "fd0=%d fd1=%d", context->fds[0], context->fds[1]); audit_log_end(ab); } } if (context->sockaddr_len) { ab = audit_log_start(context, GFP_KERNEL, AUDIT_SOCKADDR); if (ab) { audit_log_format(ab, "saddr="); audit_log_n_hex(ab, (void *)context->sockaddr, context->sockaddr_len); audit_log_end(ab); } } for (aux = context->aux_pids; aux; aux = aux->next) { struct audit_aux_data_pids *axs = (void *)aux; for (i = 0; i < axs->pid_count; i++) if (audit_log_pid_context(context, axs->target_pid[i], axs->target_auid[i], axs->target_uid[i], axs->target_sessionid[i], axs->target_sid[i], axs->target_comm[i])) call_panic = 1; } if (context->target_pid && audit_log_pid_context(context, context->target_pid, context->target_auid, context->target_uid, context->target_sessionid, context->target_sid, context->target_comm)) call_panic = 1; if (context->pwd.dentry && context->pwd.mnt) { ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD); if (ab) { audit_log_d_path(ab, "cwd=", &context->pwd); audit_log_end(ab); } } i = 0; list_for_each_entry(n, &context->names_list, list) { if (n->hidden) continue; audit_log_name(context, n, NULL, i++, &call_panic); } audit_log_proctitle(tsk, context); /* Send end of event record to help user space know we are finished */ ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); if (ab) audit_log_end(ab); if (call_panic) audit_panic("error converting sid to string"); } /** * audit_free - free a per-task audit context * @tsk: task whose audit context block to free * * Called from copy_process and do_exit */ void __audit_free(struct task_struct *tsk) { struct audit_context *context; context = audit_take_context(tsk, 0, 0); if (!context) return; /* Check for system calls that do not go through the exit * function (e.g., exit_group), then free context block. * We use GFP_ATOMIC here because we might be doing this * in the context of the idle thread */ /* that can happen only if we are called from do_exit() */ if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT) audit_log_exit(context, tsk); if (!list_empty(&context->killed_trees)) audit_kill_trees(&context->killed_trees); audit_free_context(context); } /** * audit_syscall_entry - fill in an audit record at syscall entry * @major: major syscall type (function) * @a1: additional syscall register 1 * @a2: additional syscall register 2 * @a3: additional syscall register 3 * @a4: additional syscall register 4 * * Fill in audit context at syscall entry. This only happens if the * audit context was created when the task was created and the state or * filters demand the audit context be built. If the state from the * per-task filter or from the per-syscall filter is AUDIT_RECORD_CONTEXT, * then the record will be written at syscall exit time (otherwise, it * will only be written if another part of the kernel requests that it * be written). */ void __audit_syscall_entry(int major, unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4) { struct task_struct *tsk = current; struct audit_context *context = tsk->audit_context; enum audit_state state; if (!context) return; BUG_ON(context->in_syscall || context->name_count); if (!audit_enabled) return; context->arch = syscall_get_arch(); context->major = major; context->argv[0] = a1; context->argv[1] = a2; context->argv[2] = a3; context->argv[3] = a4; state = context->state; context->dummy = !audit_n_rules; if (!context->dummy && state == AUDIT_BUILD_CONTEXT) { context->prio = 0; state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_ENTRY]); } if (state == AUDIT_DISABLED) return; context->serial = 0; context->ctime = CURRENT_TIME; context->in_syscall = 1; context->current_state = state; context->ppid = 0; } /** * audit_syscall_exit - deallocate audit context after a system call * @success: success value of the syscall * @return_code: return value of the syscall * * Tear down after system call. If the audit context has been marked as * auditable (either because of the AUDIT_RECORD_CONTEXT state from * filtering, or because some other part of the kernel wrote an audit * message), then write out the syscall information. In call cases, * free the names stored from getname(). */ void __audit_syscall_exit(int success, long return_code) { struct task_struct *tsk = current; struct audit_context *context; if (success) success = AUDITSC_SUCCESS; else success = AUDITSC_FAILURE; context = audit_take_context(tsk, success, return_code); if (!context) return; if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT) audit_log_exit(context, tsk); context->in_syscall = 0; context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0; if (!list_empty(&context->killed_trees)) audit_kill_trees(&context->killed_trees); audit_free_names(context); unroll_tree_refs(context, NULL, 0); audit_free_aux(context); context->aux = NULL; context->aux_pids = NULL; context->target_pid = 0; context->target_sid = 0; context->sockaddr_len = 0; context->type = 0; context->fds[0] = -1; if (context->state != AUDIT_RECORD_CONTEXT) { kfree(context->filterkey); context->filterkey = NULL; } tsk->audit_context = context; } static inline void handle_one(const struct inode *inode) { #ifdef CONFIG_AUDIT_TREE struct audit_context *context; struct audit_tree_refs *p; struct audit_chunk *chunk; int count; if (likely(hlist_empty(&inode->i_fsnotify_marks))) return; context = current->audit_context; p = context->trees; count = context->tree_count; rcu_read_lock(); chunk = audit_tree_lookup(inode); rcu_read_unlock(); if (!chunk) return; if (likely(put_tree_ref(context, chunk))) return; if (unlikely(!grow_tree_refs(context))) { pr_warn("out of memory, audit has lost a tree reference\n"); audit_set_auditable(context); audit_put_chunk(chunk); unroll_tree_refs(context, p, count); return; } put_tree_ref(context, chunk); #endif } static void handle_path(const struct dentry *dentry) { #ifdef CONFIG_AUDIT_TREE struct audit_context *context; struct audit_tree_refs *p; const struct dentry *d, *parent; struct audit_chunk *drop; unsigned long seq; int count; context = current->audit_context; p = context->trees; count = context->tree_count; retry: drop = NULL; d = dentry; rcu_read_lock(); seq = read_seqbegin(&rename_lock); for(;;) { struct inode *inode = d_backing_inode(d); if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_marks))) { struct audit_chunk *chunk; chunk = audit_tree_lookup(inode); if (chunk) { if (unlikely(!put_tree_ref(context, chunk))) { drop = chunk; break; } } } parent = d->d_parent; if (parent == d) break; d = parent; } if (unlikely(read_seqretry(&rename_lock, seq) || drop)) { /* in this order */ rcu_read_unlock(); if (!drop) { /* just a race with rename */ unroll_tree_refs(context, p, count); goto retry; } audit_put_chunk(drop); if (grow_tree_refs(context)) { /* OK, got more space */ unroll_tree_refs(context, p, count); goto retry; } /* too bad */ pr_warn("out of memory, audit has lost a tree reference\n"); unroll_tree_refs(context, p, count); audit_set_auditable(context); return; } rcu_read_unlock(); #endif } static struct audit_names *audit_alloc_name(struct audit_context *context, unsigned char type) { struct audit_names *aname; if (context->name_count < AUDIT_NAMES) { aname = &context->preallocated_names[context->name_count]; memset(aname, 0, sizeof(*aname)); } else { aname = kzalloc(sizeof(*aname), GFP_NOFS); if (!aname) return NULL; aname->should_free = true; } aname->ino = AUDIT_INO_UNSET; aname->type = type; list_add_tail(&aname->list, &context->names_list); context->name_count++; return aname; } /** * audit_reusename - fill out filename with info from existing entry * @uptr: userland ptr to pathname * * Search the audit_names list for the current audit context. If there is an * existing entry with a matching "uptr" then return the filename * associated with that audit_name. If not, return NULL. */ struct filename * __audit_reusename(const __user char *uptr) { struct audit_context *context = current->audit_context; struct audit_names *n; list_for_each_entry(n, &context->names_list, list) { if (!n->name) continue; if (n->name->uptr == uptr) { n->name->refcnt++; return n->name; } } return NULL; } /** * audit_getname - add a name to the list * @name: name to add * * Add a name to the list of audit names for this context. * Called from fs/namei.c:getname(). */ void __audit_getname(struct filename *name) { struct audit_context *context = current->audit_context; struct audit_names *n; if (!context->in_syscall) return; n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); if (!n) return; n->name = name; n->name_len = AUDIT_NAME_FULL; name->aname = n; name->refcnt++; if (!context->pwd.dentry) get_fs_pwd(current->fs, &context->pwd); } /** * __audit_inode - store the inode and device from a lookup * @name: name being audited * @dentry: dentry being audited * @flags: attributes for this particular entry */ void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags) { struct audit_context *context = current->audit_context; struct inode *inode = d_backing_inode(dentry); struct audit_names *n; bool parent = flags & AUDIT_INODE_PARENT; if (!context->in_syscall) return; if (!name) goto out_alloc; /* * If we have a pointer to an audit_names entry already, then we can * just use it directly if the type is correct. */ n = name->aname; if (n) { if (parent) { if (n->type == AUDIT_TYPE_PARENT || n->type == AUDIT_TYPE_UNKNOWN) goto out; } else { if (n->type != AUDIT_TYPE_PARENT) goto out; } } list_for_each_entry_reverse(n, &context->names_list, list) { if (n->ino) { /* valid inode number, use that for the comparison */ if (n->ino != inode->i_ino || n->dev != inode->i_sb->s_dev) continue; } else if (n->name) { /* inode number has not been set, check the name */ if (strcmp(n->name->name, name->name)) continue; } else /* no inode and no name (?!) ... this is odd ... */ continue; /* match the correct record type */ if (parent) { if (n->type == AUDIT_TYPE_PARENT || n->type == AUDIT_TYPE_UNKNOWN) goto out; } else { if (n->type != AUDIT_TYPE_PARENT) goto out; } } out_alloc: /* unable to find an entry with both a matching name and type */ n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); if (!n) return; if (name) { n->name = name; name->refcnt++; } out: if (parent) { n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL; n->type = AUDIT_TYPE_PARENT; if (flags & AUDIT_INODE_HIDDEN) n->hidden = true; } else { n->name_len = AUDIT_NAME_FULL; n->type = AUDIT_TYPE_NORMAL; } handle_path(dentry); audit_copy_inode(n, dentry, inode); } void __audit_file(const struct file *file) { __audit_inode(NULL, file->f_path.dentry, 0); } /** * __audit_inode_child - collect inode info for created/removed objects * @parent: inode of dentry parent * @dentry: dentry being audited * @type: AUDIT_TYPE_* value that we're looking for * * For syscalls that create or remove filesystem objects, audit_inode * can only collect information for the filesystem object's parent. * This call updates the audit context with the child's information. * Syscalls that create a new filesystem object must be hooked after * the object is created. Syscalls that remove a filesystem object * must be hooked prior, in order to capture the target inode during * unsuccessful attempts. */ void __audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type) { struct audit_context *context = current->audit_context; struct inode *inode = d_backing_inode(dentry); const char *dname = dentry->d_name.name; struct audit_names *n, *found_parent = NULL, *found_child = NULL; if (!context->in_syscall) return; if (inode) handle_one(inode); /* look for a parent entry first */ list_for_each_entry(n, &context->names_list, list) { if (!n->name || (n->type != AUDIT_TYPE_PARENT && n->type != AUDIT_TYPE_UNKNOWN)) continue; if (n->ino == parent->i_ino && n->dev == parent->i_sb->s_dev && !audit_compare_dname_path(dname, n->name->name, n->name_len)) { if (n->type == AUDIT_TYPE_UNKNOWN) n->type = AUDIT_TYPE_PARENT; found_parent = n; break; } } /* is there a matching child entry? */ list_for_each_entry(n, &context->names_list, list) { /* can only match entries that have a name */ if (!n->name || (n->type != type && n->type != AUDIT_TYPE_UNKNOWN)) continue; if (!strcmp(dname, n->name->name) || !audit_compare_dname_path(dname, n->name->name, found_parent ? found_parent->name_len : AUDIT_NAME_FULL)) { if (n->type == AUDIT_TYPE_UNKNOWN) n->type = type; found_child = n; break; } } if (!found_parent) { /* create a new, "anonymous" parent record */ n = audit_alloc_name(context, AUDIT_TYPE_PARENT); if (!n) return; audit_copy_inode(n, NULL, parent); } if (!found_child) { found_child = audit_alloc_name(context, type); if (!found_child) return; /* Re-use the name belonging to the slot for a matching parent * directory. All names for this context are relinquished in * audit_free_names() */ if (found_parent) { found_child->name = found_parent->name; found_child->name_len = AUDIT_NAME_FULL; found_child->name->refcnt++; } } if (inode) audit_copy_inode(found_child, dentry, inode); else found_child->ino = AUDIT_INO_UNSET; } EXPORT_SYMBOL_GPL(__audit_inode_child); /** * auditsc_get_stamp - get local copies of audit_context values * @ctx: audit_context for the task * @t: timespec to store time recorded in the audit_context * @serial: serial value that is recorded in the audit_context * * Also sets the context as auditable. */ int auditsc_get_stamp(struct audit_context *ctx, struct timespec *t, unsigned int *serial) { if (!ctx->in_syscall) return 0; if (!ctx->serial) ctx->serial = audit_serial(); t->tv_sec = ctx->ctime.tv_sec; t->tv_nsec = ctx->ctime.tv_nsec; *serial = ctx->serial; if (!ctx->prio) { ctx->prio = 1; ctx->current_state = AUDIT_RECORD_CONTEXT; } return 1; } /* global counter which is incremented every time something logs in */ static atomic_t session_id = ATOMIC_INIT(0); static int audit_set_loginuid_perm(kuid_t loginuid) { /* if we are unset, we don't need privs */ if (!audit_loginuid_set(current)) return 0; /* if AUDIT_FEATURE_LOGINUID_IMMUTABLE means never ever allow a change*/ if (is_audit_feature_set(AUDIT_FEATURE_LOGINUID_IMMUTABLE)) return -EPERM; /* it is set, you need permission */ if (!capable(CAP_AUDIT_CONTROL)) return -EPERM; /* reject if this is not an unset and we don't allow that */ if (is_audit_feature_set(AUDIT_FEATURE_ONLY_UNSET_LOGINUID) && uid_valid(loginuid)) return -EPERM; return 0; } static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid, unsigned int oldsessionid, unsigned int sessionid, int rc) { struct audit_buffer *ab; uid_t uid, oldloginuid, loginuid; if (!audit_enabled) return; uid = from_kuid(&init_user_ns, task_uid(current)); oldloginuid = from_kuid(&init_user_ns, koldloginuid); loginuid = from_kuid(&init_user_ns, kloginuid), ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); if (!ab) return; audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid); audit_log_task_context(ab); audit_log_format(ab, " old-auid=%u auid=%u old-ses=%u ses=%u res=%d", oldloginuid, loginuid, oldsessionid, sessionid, !rc); audit_log_end(ab); } /** * audit_set_loginuid - set current task's audit_context loginuid * @loginuid: loginuid value * * Returns 0. * * Called (set) from fs/proc/base.c::proc_loginuid_write(). */ int audit_set_loginuid(kuid_t loginuid) { struct task_struct *task = current; unsigned int oldsessionid, sessionid = (unsigned int)-1; kuid_t oldloginuid; int rc; oldloginuid = audit_get_loginuid(current); oldsessionid = audit_get_sessionid(current); rc = audit_set_loginuid_perm(loginuid); if (rc) goto out; /* are we setting or clearing? */ if (uid_valid(loginuid)) sessionid = (unsigned int)atomic_inc_return(&session_id); task->sessionid = sessionid; task->loginuid = loginuid; out: audit_log_set_loginuid(oldloginuid, loginuid, oldsessionid, sessionid, rc); return rc; } /** * __audit_mq_open - record audit data for a POSIX MQ open * @oflag: open flag * @mode: mode bits * @attr: queue attributes * */ void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) { struct audit_context *context = current->audit_context; if (attr) memcpy(&context->mq_open.attr, attr, sizeof(struct mq_attr)); else memset(&context->mq_open.attr, 0, sizeof(struct mq_attr)); context->mq_open.oflag = oflag; context->mq_open.mode = mode; context->type = AUDIT_MQ_OPEN; } /** * __audit_mq_sendrecv - record audit data for a POSIX MQ timed send/receive * @mqdes: MQ descriptor * @msg_len: Message length * @msg_prio: Message priority * @abs_timeout: Message timeout in absolute time * */ void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout) { struct audit_context *context = current->audit_context; struct timespec *p = &context->mq_sendrecv.abs_timeout; if (abs_timeout) memcpy(p, abs_timeout, sizeof(struct timespec)); else memset(p, 0, sizeof(struct timespec)); context->mq_sendrecv.mqdes = mqdes; context->mq_sendrecv.msg_len = msg_len; context->mq_sendrecv.msg_prio = msg_prio; context->type = AUDIT_MQ_SENDRECV; } /** * __audit_mq_notify - record audit data for a POSIX MQ notify * @mqdes: MQ descriptor * @notification: Notification event * */ void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) { struct audit_context *context = current->audit_context; if (notification) context->mq_notify.sigev_signo = notification->sigev_signo; else context->mq_notify.sigev_signo = 0; context->mq_notify.mqdes = mqdes; context->type = AUDIT_MQ_NOTIFY; } /** * __audit_mq_getsetattr - record audit data for a POSIX MQ get/set attribute * @mqdes: MQ descriptor * @mqstat: MQ flags * */ void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) { struct audit_context *context = current->audit_context; context->mq_getsetattr.mqdes = mqdes; context->mq_getsetattr.mqstat = *mqstat; context->type = AUDIT_MQ_GETSETATTR; } /** * audit_ipc_obj - record audit data for ipc object * @ipcp: ipc permissions * */ void __audit_ipc_obj(struct kern_ipc_perm *ipcp) { struct audit_context *context = current->audit_context; context->ipc.uid = ipcp->uid; context->ipc.gid = ipcp->gid; context->ipc.mode = ipcp->mode; context->ipc.has_perm = 0; security_ipc_getsecid(ipcp, &context->ipc.osid); context->type = AUDIT_IPC; } /** * audit_ipc_set_perm - record audit data for new ipc permissions * @qbytes: msgq bytes * @uid: msgq user id * @gid: msgq group id * @mode: msgq mode (permissions) * * Called only after audit_ipc_obj(). */ void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) { struct audit_context *context = current->audit_context; context->ipc.qbytes = qbytes; context->ipc.perm_uid = uid; context->ipc.perm_gid = gid; context->ipc.perm_mode = mode; context->ipc.has_perm = 1; } void __audit_bprm(struct linux_binprm *bprm) { struct audit_context *context = current->audit_context; context->type = AUDIT_EXECVE; context->execve.argc = bprm->argc; } /** * audit_socketcall - record audit data for sys_socketcall * @nargs: number of args, which should not be more than AUDITSC_ARGS. * @args: args array * */ int __audit_socketcall(int nargs, unsigned long *args) { struct audit_context *context = current->audit_context; if (nargs <= 0 || nargs > AUDITSC_ARGS || !args) return -EINVAL; context->type = AUDIT_SOCKETCALL; context->socketcall.nargs = nargs; memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long)); return 0; } /** * __audit_fd_pair - record audit data for pipe and socketpair * @fd1: the first file descriptor * @fd2: the second file descriptor * */ void __audit_fd_pair(int fd1, int fd2) { struct audit_context *context = current->audit_context; context->fds[0] = fd1; context->fds[1] = fd2; } /** * audit_sockaddr - record audit data for sys_bind, sys_connect, sys_sendto * @len: data length in user space * @a: data address in kernel space * * Returns 0 for success or NULL context or < 0 on error. */ int __audit_sockaddr(int len, void *a) { struct audit_context *context = current->audit_context; if (!context->sockaddr) { void *p = kmalloc(sizeof(struct sockaddr_storage), GFP_KERNEL); if (!p) return -ENOMEM; context->sockaddr = p; } context->sockaddr_len = len; memcpy(context->sockaddr, a, len); return 0; } void __audit_ptrace(struct task_struct *t) { struct audit_context *context = current->audit_context; context->target_pid = task_pid_nr(t); context->target_auid = audit_get_loginuid(t); context->target_uid = task_uid(t); context->target_sessionid = audit_get_sessionid(t); security_task_getsecid(t, &context->target_sid); memcpy(context->target_comm, t->comm, TASK_COMM_LEN); } /** * audit_signal_info - record signal info for shutting down audit subsystem * @sig: signal value * @t: task being signaled * * If the audit subsystem is being terminated, record the task (pid) * and uid that is doing that. */ int __audit_signal_info(int sig, struct task_struct *t) { struct audit_aux_data_pids *axp; struct task_struct *tsk = current; struct audit_context *ctx = tsk->audit_context; kuid_t uid = current_uid(), t_uid = task_uid(t); if (audit_pid && t->tgid == audit_pid) { if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { audit_sig_pid = task_pid_nr(tsk); if (uid_valid(tsk->loginuid)) audit_sig_uid = tsk->loginuid; else audit_sig_uid = uid; security_task_getsecid(tsk, &audit_sig_sid); } if (!audit_signals || audit_dummy_context()) return 0; } /* optimize the common case by putting first signal recipient directly * in audit_context */ if (!ctx->target_pid) { ctx->target_pid = task_tgid_nr(t); ctx->target_auid = audit_get_loginuid(t); ctx->target_uid = t_uid; ctx->target_sessionid = audit_get_sessionid(t); security_task_getsecid(t, &ctx->target_sid); memcpy(ctx->target_comm, t->comm, TASK_COMM_LEN); return 0; } axp = (void *)ctx->aux_pids; if (!axp || axp->pid_count == AUDIT_AUX_PIDS) { axp = kzalloc(sizeof(*axp), GFP_ATOMIC); if (!axp) return -ENOMEM; axp->d.type = AUDIT_OBJ_PID; axp->d.next = ctx->aux_pids; ctx->aux_pids = (void *)axp; } BUG_ON(axp->pid_count >= AUDIT_AUX_PIDS); axp->target_pid[axp->pid_count] = task_tgid_nr(t); axp->target_auid[axp->pid_count] = audit_get_loginuid(t); axp->target_uid[axp->pid_count] = t_uid; axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t); security_task_getsecid(t, &axp->target_sid[axp->pid_count]); memcpy(axp->target_comm[axp->pid_count], t->comm, TASK_COMM_LEN); axp->pid_count++; return 0; } /** * __audit_log_bprm_fcaps - store information about a loading bprm and relevant fcaps * @bprm: pointer to the bprm being processed * @new: the proposed new credentials * @old: the old credentials * * Simply check if the proc already has the caps given by the file and if not * store the priv escalation info for later auditing at the end of the syscall * * -Eric */ int __audit_log_bprm_fcaps(struct linux_binprm *bprm, const struct cred *new, const struct cred *old) { struct audit_aux_data_bprm_fcaps *ax; struct audit_context *context = current->audit_context; struct cpu_vfs_cap_data vcaps; ax = kmalloc(sizeof(*ax), GFP_KERNEL); if (!ax) return -ENOMEM; ax->d.type = AUDIT_BPRM_FCAPS; ax->d.next = context->aux; context->aux = (void *)ax; get_vfs_caps_from_disk(bprm->file->f_path.dentry, &vcaps); ax->fcap.permitted = vcaps.permitted; ax->fcap.inheritable = vcaps.inheritable; ax->fcap.fE = !!(vcaps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE); ax->fcap_ver = (vcaps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT; ax->old_pcap.permitted = old->cap_permitted; ax->old_pcap.inheritable = old->cap_inheritable; ax->old_pcap.effective = old->cap_effective; ax->new_pcap.permitted = new->cap_permitted; ax->new_pcap.inheritable = new->cap_inheritable; ax->new_pcap.effective = new->cap_effective; return 0; } /** * __audit_log_capset - store information about the arguments to the capset syscall * @new: the new credentials * @old: the old (current) credentials * * Record the arguments userspace sent to sys_capset for later printing by the * audit system if applicable */ void __audit_log_capset(const struct cred *new, const struct cred *old) { struct audit_context *context = current->audit_context; context->capset.pid = task_pid_nr(current); context->capset.cap.effective = new->cap_effective; context->capset.cap.inheritable = new->cap_effective; context->capset.cap.permitted = new->cap_permitted; context->type = AUDIT_CAPSET; } void __audit_mmap_fd(int fd, int flags) { struct audit_context *context = current->audit_context; context->mmap.fd = fd; context->mmap.flags = flags; context->type = AUDIT_MMAP; } static void audit_log_task(struct audit_buffer *ab) { kuid_t auid, uid; kgid_t gid; unsigned int sessionid; char comm[sizeof(current->comm)]; auid = audit_get_loginuid(current); sessionid = audit_get_sessionid(current); current_uid_gid(&uid, &gid); audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u", from_kuid(&init_user_ns, auid), from_kuid(&init_user_ns, uid), from_kgid(&init_user_ns, gid), sessionid); audit_log_task_context(ab); audit_log_format(ab, " pid=%d comm=", task_pid_nr(current)); audit_log_untrustedstring(ab, get_task_comm(comm, current)); audit_log_d_path_exe(ab, current->mm); } /** * audit_core_dumps - record information about processes that end abnormally * @signr: signal value * * If a process ends with a core dump, something fishy is going on and we * should record the event for investigation. */ void audit_core_dumps(long signr) { struct audit_buffer *ab; if (!audit_enabled) return; if (signr == SIGQUIT) /* don't care for those */ return; ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); if (unlikely(!ab)) return; audit_log_task(ab); audit_log_format(ab, " sig=%ld", signr); audit_log_end(ab); } void __audit_seccomp(unsigned long syscall, long signr, int code) { struct audit_buffer *ab; ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_SECCOMP); if (unlikely(!ab)) return; audit_log_task(ab); audit_log_format(ab, " sig=%ld arch=%x syscall=%ld compat=%d ip=0x%lx code=0x%x", signr, syscall_get_arch(), syscall, in_compat_syscall(), KSTK_EIP(current), code); audit_log_end(ab); } struct list_head *audit_killed_trees(void) { struct audit_context *ctx = current->audit_context; if (likely(!ctx || !ctx->in_syscall)) return NULL; return &ctx->killed_trees; }
./CrossVul/dataset_final_sorted/CWE-362/c/good_5187_0
crossvul-cpp_data_good_3536_0
/* SCTP kernel implementation * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2003 International Business Machines, Corp. * Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 Nokia, Inc. * Copyright (c) 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel implementation * * These functions handle all input from the IP layer into SCTP. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <lksctp-developers@lists.sourceforge.net> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Xingang Guo <xingang.guo@intel.com> * Jon Grimm <jgrimm@us.ibm.com> * Hui Huang <hui.huang@nokia.com> * Daisy Chang <daisyc@us.ibm.com> * Sridhar Samudrala <sri@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #include <linux/types.h> #include <linux/list.h> /* For struct list_head */ #include <linux/socket.h> #include <linux/ip.h> #include <linux/time.h> /* For struct timeval */ #include <net/ip.h> #include <net/icmp.h> #include <net/snmp.h> #include <net/sock.h> #include <net/xfrm.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> #include <net/sctp/checksum.h> #include <net/net_namespace.h> /* Forward declarations for internal helpers. */ static int sctp_rcv_ootb(struct sk_buff *); static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb, const union sctp_addr *laddr, const union sctp_addr *paddr, struct sctp_transport **transportp); static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr); static struct sctp_association *__sctp_lookup_association( const union sctp_addr *local, const union sctp_addr *peer, struct sctp_transport **pt); static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); /* Calculate the SCTP checksum of an SCTP packet. */ static inline int sctp_rcv_checksum(struct sk_buff *skb) { struct sk_buff *list = skb_shinfo(skb)->frag_list; struct sctphdr *sh = sctp_hdr(skb); __be32 cmp = sh->checksum; __be32 val = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); for (; list; list = list->next) val = sctp_update_cksum((__u8 *)list->data, skb_headlen(list), val); val = sctp_end_cksum(val); if (val != cmp) { /* CRC failure, dump it. */ SCTP_INC_STATS_BH(SCTP_MIB_CHECKSUMERRORS); return -1; } return 0; } struct sctp_input_cb { union { struct inet_skb_parm h4; #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) struct inet6_skb_parm h6; #endif } header; struct sctp_chunk *chunk; }; #define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0])) /* * This is the routine which IP calls when receiving an SCTP packet. */ int sctp_rcv(struct sk_buff *skb) { struct sock *sk; struct sctp_association *asoc; struct sctp_endpoint *ep = NULL; struct sctp_ep_common *rcvr; struct sctp_transport *transport = NULL; struct sctp_chunk *chunk; struct sctphdr *sh; union sctp_addr src; union sctp_addr dest; int family; struct sctp_af *af; if (skb->pkt_type!=PACKET_HOST) goto discard_it; SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS); if (skb_linearize(skb)) goto discard_it; sh = sctp_hdr(skb); /* Pull up the IP and SCTP headers. */ __skb_pull(skb, skb_transport_offset(skb)); if (skb->len < sizeof(struct sctphdr)) goto discard_it; if (!skb_csum_unnecessary(skb) && sctp_rcv_checksum(skb) < 0) goto discard_it; skb_pull(skb, sizeof(struct sctphdr)); /* Make sure we at least have chunk headers worth of data left. */ if (skb->len < sizeof(struct sctp_chunkhdr)) goto discard_it; family = ipver2af(ip_hdr(skb)->version); af = sctp_get_af_specific(family); if (unlikely(!af)) goto discard_it; /* Initialize local addresses for lookups. */ af->from_skb(&src, skb, 1); af->from_skb(&dest, skb, 0); /* If the packet is to or from a non-unicast address, * silently discard the packet. * * This is not clearly defined in the RFC except in section * 8.4 - OOTB handling. However, based on the book "Stream Control * Transmission Protocol" 2.1, "It is important to note that the * IP address of an SCTP transport address must be a routable * unicast address. In other words, IP multicast addresses and * IP broadcast addresses cannot be used in an SCTP transport * address." */ if (!af->addr_valid(&src, NULL, skb) || !af->addr_valid(&dest, NULL, skb)) goto discard_it; asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport); if (!asoc) ep = __sctp_rcv_lookup_endpoint(&dest); /* Retrieve the common input handling substructure. */ rcvr = asoc ? &asoc->base : &ep->base; sk = rcvr->sk; /* * If a frame arrives on an interface and the receiving socket is * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB */ if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { if (asoc) { sctp_association_put(asoc); asoc = NULL; } else { sctp_endpoint_put(ep); ep = NULL; } sk = sctp_get_ctl_sock(); ep = sctp_sk(sk)->ep; sctp_endpoint_hold(ep); rcvr = &ep->base; } /* * RFC 2960, 8.4 - Handle "Out of the blue" Packets. * An SCTP packet is called an "out of the blue" (OOTB) * packet if it is correctly formed, i.e., passed the * receiver's checksum check, but the receiver is not * able to identify the association to which this * packet belongs. */ if (!asoc) { if (sctp_rcv_ootb(skb)) { SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES); goto discard_release; } } if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family)) goto discard_release; nf_reset(skb); if (sk_filter(sk, skb)) goto discard_release; /* Create an SCTP packet structure. */ chunk = sctp_chunkify(skb, asoc, sk); if (!chunk) goto discard_release; SCTP_INPUT_CB(skb)->chunk = chunk; /* Remember what endpoint is to handle this packet. */ chunk->rcvr = rcvr; /* Remember the SCTP header. */ chunk->sctp_hdr = sh; /* Set the source and destination addresses of the incoming chunk. */ sctp_init_addrs(chunk, &src, &dest); /* Remember where we came from. */ chunk->transport = transport; /* Acquire access to the sock lock. Note: We are safe from other * bottom halves on this lock, but a user may be in the lock too, * so check if it is busy. */ sctp_bh_lock_sock(sk); if (sk != rcvr->sk) { /* Our cached sk is different from the rcvr->sk. This is * because migrate()/accept() may have moved the association * to a new socket and released all the sockets. So now we * are holding a lock on the old socket while the user may * be doing something with the new socket. Switch our veiw * of the current sk. */ sctp_bh_unlock_sock(sk); sk = rcvr->sk; sctp_bh_lock_sock(sk); } if (sock_owned_by_user(sk)) { SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); sctp_add_backlog(sk, skb); } else { SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ); sctp_inq_push(&chunk->rcvr->inqueue, chunk); } sctp_bh_unlock_sock(sk); /* Release the asoc/ep ref we took in the lookup calls. */ if (asoc) sctp_association_put(asoc); else sctp_endpoint_put(ep); return 0; discard_it: SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_DISCARDS); kfree_skb(skb); return 0; discard_release: /* Release the asoc/ep ref we took in the lookup calls. */ if (asoc) sctp_association_put(asoc); else sctp_endpoint_put(ep); goto discard_it; } /* Process the backlog queue of the socket. Every skb on * the backlog holds a ref on an association or endpoint. * We hold this ref throughout the state machine to make * sure that the structure we need is still around. */ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) { struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; struct sctp_inq *inqueue = &chunk->rcvr->inqueue; struct sctp_ep_common *rcvr = NULL; int backloged = 0; rcvr = chunk->rcvr; /* If the rcvr is dead then the association or endpoint * has been deleted and we can safely drop the chunk * and refs that we are holding. */ if (rcvr->dead) { sctp_chunk_free(chunk); goto done; } if (unlikely(rcvr->sk != sk)) { /* In this case, the association moved from one socket to * another. We are currently sitting on the backlog of the * old socket, so we need to move. * However, since we are here in the process context we * need to take make sure that the user doesn't own * the new socket when we process the packet. * If the new socket is user-owned, queue the chunk to the * backlog of the new socket without dropping any refs. * Otherwise, we can safely push the chunk on the inqueue. */ sk = rcvr->sk; sctp_bh_lock_sock(sk); if (sock_owned_by_user(sk)) { sk_add_backlog(sk, skb); backloged = 1; } else sctp_inq_push(inqueue, chunk); sctp_bh_unlock_sock(sk); /* If the chunk was backloged again, don't drop refs */ if (backloged) return 0; } else { sctp_inq_push(inqueue, chunk); } done: /* Release the refs we took in sctp_add_backlog */ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) sctp_association_put(sctp_assoc(rcvr)); else if (SCTP_EP_TYPE_SOCKET == rcvr->type) sctp_endpoint_put(sctp_ep(rcvr)); else BUG(); return 0; } static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb) { struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; struct sctp_ep_common *rcvr = chunk->rcvr; /* Hold the assoc/ep while hanging on the backlog queue. * This way, we know structures we need will not disappear from us */ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) sctp_association_hold(sctp_assoc(rcvr)); else if (SCTP_EP_TYPE_SOCKET == rcvr->type) sctp_endpoint_hold(sctp_ep(rcvr)); else BUG(); sk_add_backlog(sk, skb); } /* Handle icmp frag needed error. */ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, struct sctp_transport *t, __u32 pmtu) { if (!t || (t->pathmtu <= pmtu)) return; if (sock_owned_by_user(sk)) { asoc->pmtu_pending = 1; t->pmtu_pending = 1; return; } if (t->param_flags & SPP_PMTUD_ENABLE) { /* Update transports view of the MTU */ sctp_transport_update_pmtu(t, pmtu); /* Update association pmtu. */ sctp_assoc_sync_pmtu(asoc); } /* Retransmit with the new pmtu setting. * Normally, if PMTU discovery is disabled, an ICMP Fragmentation * Needed will never be sent, but if a message was sent before * PMTU discovery was disabled that was larger than the PMTU, it * would not be fragmented, so it must be re-transmitted fragmented. */ sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD); } /* * SCTP Implementer's Guide, 2.37 ICMP handling procedures * * ICMP8) If the ICMP code is a "Unrecognized next header type encountered" * or a "Protocol Unreachable" treat this message as an abort * with the T bit set. * * This function sends an event to the state machine, which will abort the * association. * */ void sctp_icmp_proto_unreachable(struct sock *sk, struct sctp_association *asoc, struct sctp_transport *t) { SCTP_DEBUG_PRINTK("%s\n", __func__); sctp_do_sm(SCTP_EVENT_T_OTHER, SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), asoc->state, asoc->ep, asoc, t, GFP_ATOMIC); } /* Common lookup code for icmp/icmpv6 error handler. */ struct sock *sctp_err_lookup(int family, struct sk_buff *skb, struct sctphdr *sctphdr, struct sctp_association **app, struct sctp_transport **tpp) { union sctp_addr saddr; union sctp_addr daddr; struct sctp_af *af; struct sock *sk = NULL; struct sctp_association *asoc; struct sctp_transport *transport = NULL; struct sctp_init_chunk *chunkhdr; __u32 vtag = ntohl(sctphdr->vtag); int len = skb->len - ((void *)sctphdr - (void *)skb->data); *app = NULL; *tpp = NULL; af = sctp_get_af_specific(family); if (unlikely(!af)) { return NULL; } /* Initialize local addresses for lookups. */ af->from_skb(&saddr, skb, 1); af->from_skb(&daddr, skb, 0); /* Look for an association that matches the incoming ICMP error * packet. */ asoc = __sctp_lookup_association(&saddr, &daddr, &transport); if (!asoc) return NULL; sk = asoc->base.sk; /* RFC 4960, Appendix C. ICMP Handling * * ICMP6) An implementation MUST validate that the Verification Tag * contained in the ICMP message matches the Verification Tag of * the peer. If the Verification Tag is not 0 and does NOT * match, discard the ICMP message. If it is 0 and the ICMP * message contains enough bytes to verify that the chunk type is * an INIT chunk and that the Initiate Tag matches the tag of the * peer, continue with ICMP7. If the ICMP message is too short * or the chunk type or the Initiate Tag does not match, silently * discard the packet. */ if (vtag == 0) { chunkhdr = (struct sctp_init_chunk *)((void *)sctphdr + sizeof(struct sctphdr)); if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t) + sizeof(__be32) || chunkhdr->chunk_hdr.type != SCTP_CID_INIT || ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) { goto out; } } else if (vtag != asoc->c.peer_vtag) { goto out; } sctp_bh_lock_sock(sk); /* If too many ICMPs get dropped on busy * servers this needs to be solved differently. */ if (sock_owned_by_user(sk)) NET_INC_STATS_BH(&init_net, LINUX_MIB_LOCKDROPPEDICMPS); *app = asoc; *tpp = transport; return sk; out: if (asoc) sctp_association_put(asoc); return NULL; } /* Common cleanup code for icmp/icmpv6 error handler. */ void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) { sctp_bh_unlock_sock(sk); if (asoc) sctp_association_put(asoc); } /* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. After adjustment * header points to the first 8 bytes of the sctp header. We need * to find the appropriate port. * * The locking strategy used here is very "optimistic". When * someone else accesses the socket the ICMP is just dropped * and for some paths there is no check at all. * A more general error queue to queue errors for later handling * is probably better. * */ void sctp_v4_err(struct sk_buff *skb, __u32 info) { struct iphdr *iph = (struct iphdr *)skb->data; const int ihlen = iph->ihl * 4; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct sock *sk; struct sctp_association *asoc = NULL; struct sctp_transport *transport; struct inet_sock *inet; sk_buff_data_t saveip, savesctp; int err; if (skb->len < ihlen + 8) { ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS); return; } /* Fix up skb to look at the embedded net header. */ saveip = skb->network_header; savesctp = skb->transport_header; skb_reset_network_header(skb); skb_set_transport_header(skb, ihlen); sk = sctp_err_lookup(AF_INET, skb, sctp_hdr(skb), &asoc, &transport); /* Put back, the original values. */ skb->network_header = saveip; skb->transport_header = savesctp; if (!sk) { ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS); return; } /* Warning: The sock lock is held. Remember to call * sctp_err_finish! */ switch (type) { case ICMP_PARAMETERPROB: err = EPROTO; break; case ICMP_DEST_UNREACH: if (code > NR_ICMP_UNREACH) goto out_unlock; /* PMTU discovery (RFC1191) */ if (ICMP_FRAG_NEEDED == code) { sctp_icmp_frag_needed(sk, asoc, transport, info); goto out_unlock; } else { if (ICMP_PROT_UNREACH == code) { sctp_icmp_proto_unreachable(sk, asoc, transport); goto out_unlock; } } err = icmp_err_convert[code].errno; break; case ICMP_TIME_EXCEEDED: /* Ignore any time exceeded errors due to fragment reassembly * timeouts. */ if (ICMP_EXC_FRAGTIME == code) goto out_unlock; err = EHOSTUNREACH; break; default: goto out_unlock; } inet = inet_sk(sk); if (!sock_owned_by_user(sk) && inet->recverr) { sk->sk_err = err; sk->sk_error_report(sk); } else { /* Only an error on timeout */ sk->sk_err_soft = err; } out_unlock: sctp_err_finish(sk, asoc); } /* * RFC 2960, 8.4 - Handle "Out of the blue" Packets. * * This function scans all the chunks in the OOTB packet to determine if * the packet should be discarded right away. If a response might be needed * for this packet, or, if further processing is possible, the packet will * be queued to a proper inqueue for the next phase of handling. * * Output: * Return 0 - If further processing is needed. * Return 1 - If the packet can be discarded right away. */ static int sctp_rcv_ootb(struct sk_buff *skb) { sctp_chunkhdr_t *ch; __u8 *ch_end; sctp_errhdr_t *err; ch = (sctp_chunkhdr_t *) skb->data; /* Scan through all the chunks in the packet. */ do { /* Break out if chunk length is less then minimal. */ if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) break; ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); if (ch_end > skb_tail_pointer(skb)) break; /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the * receiver MUST silently discard the OOTB packet and take no * further action. */ if (SCTP_CID_ABORT == ch->type) goto discard; /* RFC 8.4, 6) If the packet contains a SHUTDOWN COMPLETE * chunk, the receiver should silently discard the packet * and take no further action. */ if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type) goto discard; /* RFC 4460, 2.11.2 * This will discard packets with INIT chunk bundled as * subsequent chunks in the packet. When INIT is first, * the normal INIT processing will discard the chunk. */ if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data) goto discard; /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR * or a COOKIE ACK the SCTP Packet should be silently * discarded. */ if (SCTP_CID_COOKIE_ACK == ch->type) goto discard; if (SCTP_CID_ERROR == ch->type) { sctp_walk_errors(err, ch) { if (SCTP_ERROR_STALE_COOKIE == err->cause) goto discard; } } ch = (sctp_chunkhdr_t *) ch_end; } while (ch_end < skb_tail_pointer(skb)); return 0; discard: return 1; } /* Insert endpoint into the hash table. */ static void __sctp_hash_endpoint(struct sctp_endpoint *ep) { struct sctp_ep_common *epb; struct sctp_hashbucket *head; epb = &ep->base; epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); head = &sctp_ep_hashtable[epb->hashent]; sctp_write_lock(&head->lock); hlist_add_head(&epb->node, &head->chain); sctp_write_unlock(&head->lock); } /* Add an endpoint to the hash. Local BH-safe. */ void sctp_hash_endpoint(struct sctp_endpoint *ep) { sctp_local_bh_disable(); __sctp_hash_endpoint(ep); sctp_local_bh_enable(); } /* Remove endpoint from the hash table. */ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep) { struct sctp_hashbucket *head; struct sctp_ep_common *epb; epb = &ep->base; if (hlist_unhashed(&epb->node)) return; epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); head = &sctp_ep_hashtable[epb->hashent]; sctp_write_lock(&head->lock); __hlist_del(&epb->node); sctp_write_unlock(&head->lock); } /* Remove endpoint from the hash. Local BH-safe. */ void sctp_unhash_endpoint(struct sctp_endpoint *ep) { sctp_local_bh_disable(); __sctp_unhash_endpoint(ep); sctp_local_bh_enable(); } /* Look up an endpoint. */ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr) { struct sctp_hashbucket *head; struct sctp_ep_common *epb; struct sctp_endpoint *ep; struct hlist_node *node; int hash; hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port)); head = &sctp_ep_hashtable[hash]; read_lock(&head->lock); sctp_for_each_hentry(epb, node, &head->chain) { ep = sctp_ep(epb); if (sctp_endpoint_is_match(ep, laddr)) goto hit; } ep = sctp_sk((sctp_get_ctl_sock()))->ep; hit: sctp_endpoint_hold(ep); read_unlock(&head->lock); return ep; } /* Insert association into the hash table. */ static void __sctp_hash_established(struct sctp_association *asoc) { struct sctp_ep_common *epb; struct sctp_hashbucket *head; epb = &asoc->base; /* Calculate which chain this entry will belong to. */ epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, asoc->peer.port); head = &sctp_assoc_hashtable[epb->hashent]; sctp_write_lock(&head->lock); hlist_add_head(&epb->node, &head->chain); sctp_write_unlock(&head->lock); } /* Add an association to the hash. Local BH-safe. */ void sctp_hash_established(struct sctp_association *asoc) { if (asoc->temp) return; sctp_local_bh_disable(); __sctp_hash_established(asoc); sctp_local_bh_enable(); } /* Remove association from the hash table. */ static void __sctp_unhash_established(struct sctp_association *asoc) { struct sctp_hashbucket *head; struct sctp_ep_common *epb; epb = &asoc->base; epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, asoc->peer.port); head = &sctp_assoc_hashtable[epb->hashent]; sctp_write_lock(&head->lock); __hlist_del(&epb->node); sctp_write_unlock(&head->lock); } /* Remove association from the hash table. Local BH-safe. */ void sctp_unhash_established(struct sctp_association *asoc) { if (asoc->temp) return; sctp_local_bh_disable(); __sctp_unhash_established(asoc); sctp_local_bh_enable(); } /* Look up an association. */ static struct sctp_association *__sctp_lookup_association( const union sctp_addr *local, const union sctp_addr *peer, struct sctp_transport **pt) { struct sctp_hashbucket *head; struct sctp_ep_common *epb; struct sctp_association *asoc; struct sctp_transport *transport; struct hlist_node *node; int hash; /* Optimize here for direct hit, only listening connections can * have wildcards anyways. */ hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port)); head = &sctp_assoc_hashtable[hash]; read_lock(&head->lock); sctp_for_each_hentry(epb, node, &head->chain) { asoc = sctp_assoc(epb); transport = sctp_assoc_is_match(asoc, local, peer); if (transport) goto hit; } read_unlock(&head->lock); return NULL; hit: *pt = transport; sctp_association_hold(asoc); read_unlock(&head->lock); return asoc; } /* Look up an association. BH-safe. */ SCTP_STATIC struct sctp_association *sctp_lookup_association(const union sctp_addr *laddr, const union sctp_addr *paddr, struct sctp_transport **transportp) { struct sctp_association *asoc; sctp_local_bh_disable(); asoc = __sctp_lookup_association(laddr, paddr, transportp); sctp_local_bh_enable(); return asoc; } /* Is there an association matching the given local and peer addresses? */ int sctp_has_association(const union sctp_addr *laddr, const union sctp_addr *paddr) { struct sctp_association *asoc; struct sctp_transport *transport; if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) { sctp_association_put(asoc); return 1; } return 0; } /* * SCTP Implementors Guide, 2.18 Handling of address * parameters within the INIT or INIT-ACK. * * D) When searching for a matching TCB upon reception of an INIT * or INIT-ACK chunk the receiver SHOULD use not only the * source address of the packet (containing the INIT or * INIT-ACK) but the receiver SHOULD also use all valid * address parameters contained within the chunk. * * 2.18.3 Solution description * * This new text clearly specifies to an implementor the need * to look within the INIT or INIT-ACK. Any implementation that * does not do this, may not be able to establish associations * in certain circumstances. * */ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb, const union sctp_addr *laddr, struct sctp_transport **transportp) { struct sctp_association *asoc; union sctp_addr addr; union sctp_addr *paddr = &addr; struct sctphdr *sh = sctp_hdr(skb); sctp_chunkhdr_t *ch; union sctp_params params; sctp_init_chunk_t *init; struct sctp_transport *transport; struct sctp_af *af; ch = (sctp_chunkhdr_t *) skb->data; /* * This code will NOT touch anything inside the chunk--it is * strictly READ-ONLY. * * RFC 2960 3 SCTP packet Format * * Multiple chunks can be bundled into one SCTP packet up to * the MTU size, except for the INIT, INIT ACK, and SHUTDOWN * COMPLETE chunks. These chunks MUST NOT be bundled with any * other chunk in a packet. See Section 6.10 for more details * on chunk bundling. */ /* Find the start of the TLVs and the end of the chunk. This is * the region we search for address parameters. */ init = (sctp_init_chunk_t *)skb->data; /* Walk the parameters looking for embedded addresses. */ sctp_walk_params(params, init, init_hdr.params) { /* Note: Ignoring hostname addresses. */ af = sctp_get_af_specific(param_type2af(params.p->type)); if (!af) continue; af->from_addr_param(paddr, params.addr, sh->source, 0); asoc = __sctp_lookup_association(laddr, paddr, &transport); if (asoc) return asoc; } return NULL; } /* ADD-IP, Section 5.2 * When an endpoint receives an ASCONF Chunk from the remote peer * special procedures may be needed to identify the association the * ASCONF Chunk is associated with. To properly find the association * the following procedures SHOULD be followed: * * D2) If the association is not found, use the address found in the * Address Parameter TLV combined with the port number found in the * SCTP common header. If found proceed to rule D4. * * D2-ext) If more than one ASCONF Chunks are packed together, use the * address found in the ASCONF Address Parameter TLV of each of the * subsequent ASCONF Chunks. If found, proceed to rule D4. */ static struct sctp_association *__sctp_rcv_asconf_lookup( sctp_chunkhdr_t *ch, const union sctp_addr *laddr, __be16 peer_port, struct sctp_transport **transportp) { sctp_addip_chunk_t *asconf = (struct sctp_addip_chunk *)ch; struct sctp_af *af; union sctp_addr_param *param; union sctp_addr paddr; /* Skip over the ADDIP header and find the Address parameter */ param = (union sctp_addr_param *)(asconf + 1); af = sctp_get_af_specific(param_type2af(param->v4.param_hdr.type)); if (unlikely(!af)) return NULL; af->from_addr_param(&paddr, param, peer_port, 0); return __sctp_lookup_association(laddr, &paddr, transportp); } /* SCTP-AUTH, Section 6.3: * If the receiver does not find a STCB for a packet containing an AUTH * chunk as the first chunk and not a COOKIE-ECHO chunk as the second * chunk, it MUST use the chunks after the AUTH chunk to look up an existing * association. * * This means that any chunks that can help us identify the association need * to be looked at to find this assocation. */ static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb, const union sctp_addr *laddr, struct sctp_transport **transportp) { struct sctp_association *asoc = NULL; sctp_chunkhdr_t *ch; int have_auth = 0; unsigned int chunk_num = 1; __u8 *ch_end; /* Walk through the chunks looking for AUTH or ASCONF chunks * to help us find the association. */ ch = (sctp_chunkhdr_t *) skb->data; do { /* Break out if chunk length is less then minimal. */ if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) break; ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); if (ch_end > skb_tail_pointer(skb)) break; switch(ch->type) { case SCTP_CID_AUTH: have_auth = chunk_num; break; case SCTP_CID_COOKIE_ECHO: /* If a packet arrives containing an AUTH chunk as * a first chunk, a COOKIE-ECHO chunk as the second * chunk, and possibly more chunks after them, and * the receiver does not have an STCB for that * packet, then authentication is based on * the contents of the COOKIE- ECHO chunk. */ if (have_auth == 1 && chunk_num == 2) return NULL; break; case SCTP_CID_ASCONF: if (have_auth || sctp_addip_noauth) asoc = __sctp_rcv_asconf_lookup(ch, laddr, sctp_hdr(skb)->source, transportp); default: break; } if (asoc) break; ch = (sctp_chunkhdr_t *) ch_end; chunk_num++; } while (ch_end < skb_tail_pointer(skb)); return asoc; } /* * There are circumstances when we need to look inside the SCTP packet * for information to help us find the association. Examples * include looking inside of INIT/INIT-ACK chunks or after the AUTH * chunks. */ static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb, const union sctp_addr *laddr, struct sctp_transport **transportp) { sctp_chunkhdr_t *ch; ch = (sctp_chunkhdr_t *) skb->data; /* The code below will attempt to walk the chunk and extract * parameter information. Before we do that, we need to verify * that the chunk length doesn't cause overflow. Otherwise, we'll * walk off the end. */ if (WORD_ROUND(ntohs(ch->length)) > skb->len) return NULL; /* If this is INIT/INIT-ACK look inside the chunk too. */ switch (ch->type) { case SCTP_CID_INIT: case SCTP_CID_INIT_ACK: return __sctp_rcv_init_lookup(skb, laddr, transportp); break; default: return __sctp_rcv_walk_lookup(skb, laddr, transportp); break; } return NULL; } /* Lookup an association for an inbound skb. */ static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb, const union sctp_addr *paddr, const union sctp_addr *laddr, struct sctp_transport **transportp) { struct sctp_association *asoc; asoc = __sctp_lookup_association(laddr, paddr, transportp); /* Further lookup for INIT/INIT-ACK packets. * SCTP Implementors Guide, 2.18 Handling of address * parameters within the INIT or INIT-ACK. */ if (!asoc) asoc = __sctp_rcv_lookup_harder(skb, laddr, transportp); return asoc; }
./CrossVul/dataset_final_sorted/CWE-362/c/good_3536_0
crossvul-cpp_data_good_5592_0
/* Manage a process's keyrings * * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/keyctl.h> #include <linux/fs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/security.h> #include <linux/user_namespace.h> #include <asm/uaccess.h> #include "internal.h" /* Session keyring create vs join semaphore */ static DEFINE_MUTEX(key_session_mutex); /* User keyring creation semaphore */ static DEFINE_MUTEX(key_user_keyring_mutex); /* The root user's tracking struct */ struct key_user root_key_user = { .usage = ATOMIC_INIT(3), .cons_lock = __MUTEX_INITIALIZER(root_key_user.cons_lock), .lock = __SPIN_LOCK_UNLOCKED(root_key_user.lock), .nkeys = ATOMIC_INIT(2), .nikeys = ATOMIC_INIT(2), .uid = GLOBAL_ROOT_UID, }; /* * Install the user and user session keyrings for the current process's UID. */ int install_user_keyrings(void) { struct user_struct *user; const struct cred *cred; struct key *uid_keyring, *session_keyring; key_perm_t user_keyring_perm; char buf[20]; int ret; uid_t uid; user_keyring_perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL; cred = current_cred(); user = cred->user; uid = from_kuid(cred->user_ns, user->uid); kenter("%p{%u}", user, uid); if (user->uid_keyring && user->session_keyring) { kleave(" = 0 [exist]"); return 0; } mutex_lock(&key_user_keyring_mutex); ret = 0; if (!user->uid_keyring) { /* get the UID-specific keyring * - there may be one in existence already as it may have been * pinned by a session, but the user_struct pointing to it * may have been destroyed by setuid */ sprintf(buf, "_uid.%u", uid); uid_keyring = find_keyring_by_name(buf, true); if (IS_ERR(uid_keyring)) { uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID, cred, user_keyring_perm, KEY_ALLOC_IN_QUOTA, NULL); if (IS_ERR(uid_keyring)) { ret = PTR_ERR(uid_keyring); goto error; } } /* get a default session keyring (which might also exist * already) */ sprintf(buf, "_uid_ses.%u", uid); session_keyring = find_keyring_by_name(buf, true); if (IS_ERR(session_keyring)) { session_keyring = keyring_alloc(buf, user->uid, INVALID_GID, cred, user_keyring_perm, KEY_ALLOC_IN_QUOTA, NULL); if (IS_ERR(session_keyring)) { ret = PTR_ERR(session_keyring); goto error_release; } /* we install a link from the user session keyring to * the user keyring */ ret = key_link(session_keyring, uid_keyring); if (ret < 0) goto error_release_both; } /* install the keyrings */ user->uid_keyring = uid_keyring; user->session_keyring = session_keyring; } mutex_unlock(&key_user_keyring_mutex); kleave(" = 0"); return 0; error_release_both: key_put(session_keyring); error_release: key_put(uid_keyring); error: mutex_unlock(&key_user_keyring_mutex); kleave(" = %d", ret); return ret; } /* * Install a fresh thread keyring directly to new credentials. This keyring is * allowed to overrun the quota. */ int install_thread_keyring_to_cred(struct cred *new) { struct key *keyring; keyring = keyring_alloc("_tid", new->uid, new->gid, new, KEY_POS_ALL | KEY_USR_VIEW, KEY_ALLOC_QUOTA_OVERRUN, NULL); if (IS_ERR(keyring)) return PTR_ERR(keyring); new->thread_keyring = keyring; return 0; } /* * Install a fresh thread keyring, discarding the old one. */ static int install_thread_keyring(void) { struct cred *new; int ret; new = prepare_creds(); if (!new) return -ENOMEM; BUG_ON(new->thread_keyring); ret = install_thread_keyring_to_cred(new); if (ret < 0) { abort_creds(new); return ret; } return commit_creds(new); } /* * Install a process keyring directly to a credentials struct. * * Returns -EEXIST if there was already a process keyring, 0 if one installed, * and other value on any other error */ int install_process_keyring_to_cred(struct cred *new) { struct key *keyring; if (new->process_keyring) return -EEXIST; keyring = keyring_alloc("_pid", new->uid, new->gid, new, KEY_POS_ALL | KEY_USR_VIEW, KEY_ALLOC_QUOTA_OVERRUN, NULL); if (IS_ERR(keyring)) return PTR_ERR(keyring); new->process_keyring = keyring; return 0; } /* * Make sure a process keyring is installed for the current process. The * existing process keyring is not replaced. * * Returns 0 if there is a process keyring by the end of this function, some * error otherwise. */ static int install_process_keyring(void) { struct cred *new; int ret; new = prepare_creds(); if (!new) return -ENOMEM; ret = install_process_keyring_to_cred(new); if (ret < 0) { abort_creds(new); return ret != -EEXIST ? ret : 0; } return commit_creds(new); } /* * Install a session keyring directly to a credentials struct. */ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring) { unsigned long flags; struct key *old; might_sleep(); /* create an empty session keyring */ if (!keyring) { flags = KEY_ALLOC_QUOTA_OVERRUN; if (cred->session_keyring) flags = KEY_ALLOC_IN_QUOTA; keyring = keyring_alloc("_ses", cred->uid, cred->gid, cred, KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ, flags, NULL); if (IS_ERR(keyring)) return PTR_ERR(keyring); } else { atomic_inc(&keyring->usage); } /* install the keyring */ old = cred->session_keyring; rcu_assign_pointer(cred->session_keyring, keyring); if (old) key_put(old); return 0; } /* * Install a session keyring, discarding the old one. If a keyring is not * supplied, an empty one is invented. */ static int install_session_keyring(struct key *keyring) { struct cred *new; int ret; new = prepare_creds(); if (!new) return -ENOMEM; ret = install_session_keyring_to_cred(new, keyring); if (ret < 0) { abort_creds(new); return ret; } return commit_creds(new); } /* * Handle the fsuid changing. */ void key_fsuid_changed(struct task_struct *tsk) { /* update the ownership of the thread keyring */ BUG_ON(!tsk->cred); if (tsk->cred->thread_keyring) { down_write(&tsk->cred->thread_keyring->sem); tsk->cred->thread_keyring->uid = tsk->cred->fsuid; up_write(&tsk->cred->thread_keyring->sem); } } /* * Handle the fsgid changing. */ void key_fsgid_changed(struct task_struct *tsk) { /* update the ownership of the thread keyring */ BUG_ON(!tsk->cred); if (tsk->cred->thread_keyring) { down_write(&tsk->cred->thread_keyring->sem); tsk->cred->thread_keyring->gid = tsk->cred->fsgid; up_write(&tsk->cred->thread_keyring->sem); } } /* * Search the process keyrings attached to the supplied cred for the first * matching key. * * The search criteria are the type and the match function. The description is * given to the match function as a parameter, but doesn't otherwise influence * the search. Typically the match function will compare the description * parameter to the key's description. * * This can only search keyrings that grant Search permission to the supplied * credentials. Keyrings linked to searched keyrings will also be searched if * they grant Search permission too. Keys can only be found if they grant * Search permission to the credentials. * * Returns a pointer to the key with the key usage count incremented if * successful, -EAGAIN if we didn't find any matching key or -ENOKEY if we only * matched negative keys. * * In the case of a successful return, the possession attribute is set on the * returned key reference. */ key_ref_t search_my_process_keyrings(struct key_type *type, const void *description, key_match_func_t match, bool no_state_check, const struct cred *cred) { key_ref_t key_ref, ret, err; /* we want to return -EAGAIN or -ENOKEY if any of the keyrings were * searchable, but we failed to find a key or we found a negative key; * otherwise we want to return a sample error (probably -EACCES) if * none of the keyrings were searchable * * in terms of priority: success > -ENOKEY > -EAGAIN > other error */ key_ref = NULL; ret = NULL; err = ERR_PTR(-EAGAIN); /* search the thread keyring first */ if (cred->thread_keyring) { key_ref = keyring_search_aux( make_key_ref(cred->thread_keyring, 1), cred, type, description, match, no_state_check); if (!IS_ERR(key_ref)) goto found; switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ case -ENOKEY: /* negative key */ ret = key_ref; break; default: err = key_ref; break; } } /* search the process keyring second */ if (cred->process_keyring) { key_ref = keyring_search_aux( make_key_ref(cred->process_keyring, 1), cred, type, description, match, no_state_check); if (!IS_ERR(key_ref)) goto found; switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ if (ret) break; case -ENOKEY: /* negative key */ ret = key_ref; break; default: err = key_ref; break; } } /* search the session keyring */ if (cred->session_keyring) { rcu_read_lock(); key_ref = keyring_search_aux( make_key_ref(rcu_dereference(cred->session_keyring), 1), cred, type, description, match, no_state_check); rcu_read_unlock(); if (!IS_ERR(key_ref)) goto found; switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ if (ret) break; case -ENOKEY: /* negative key */ ret = key_ref; break; default: err = key_ref; break; } } /* or search the user-session keyring */ else if (cred->user->session_keyring) { key_ref = keyring_search_aux( make_key_ref(cred->user->session_keyring, 1), cred, type, description, match, no_state_check); if (!IS_ERR(key_ref)) goto found; switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ if (ret) break; case -ENOKEY: /* negative key */ ret = key_ref; break; default: err = key_ref; break; } } /* no key - decide on the error we're going to go for */ key_ref = ret ? ret : err; found: return key_ref; } /* * Search the process keyrings attached to the supplied cred for the first * matching key in the manner of search_my_process_keyrings(), but also search * the keys attached to the assumed authorisation key using its credentials if * one is available. * * Return same as search_my_process_keyrings(). */ key_ref_t search_process_keyrings(struct key_type *type, const void *description, key_match_func_t match, const struct cred *cred) { struct request_key_auth *rka; key_ref_t key_ref, ret = ERR_PTR(-EACCES), err; might_sleep(); key_ref = search_my_process_keyrings(type, description, match, false, cred); if (!IS_ERR(key_ref)) goto found; err = key_ref; /* if this process has an instantiation authorisation key, then we also * search the keyrings of the process mentioned there * - we don't permit access to request_key auth keys via this method */ if (cred->request_key_auth && cred == current_cred() && type != &key_type_request_key_auth ) { /* defend against the auth key being revoked */ down_read(&cred->request_key_auth->sem); if (key_validate(cred->request_key_auth) == 0) { rka = cred->request_key_auth->payload.data; key_ref = search_process_keyrings(type, description, match, rka->cred); up_read(&cred->request_key_auth->sem); if (!IS_ERR(key_ref)) goto found; ret = key_ref; } else { up_read(&cred->request_key_auth->sem); } } /* no key - decide on the error we're going to go for */ if (err == ERR_PTR(-ENOKEY) || ret == ERR_PTR(-ENOKEY)) key_ref = ERR_PTR(-ENOKEY); else if (err == ERR_PTR(-EACCES)) key_ref = ret; else key_ref = err; found: return key_ref; } /* * See if the key we're looking at is the target key. */ int lookup_user_key_possessed(const struct key *key, const void *target) { return key == target; } /* * Look up a key ID given us by userspace with a given permissions mask to get * the key it refers to. * * Flags can be passed to request that special keyrings be created if referred * to directly, to permit partially constructed keys to be found and to skip * validity and permission checks on the found key. * * Returns a pointer to the key with an incremented usage count if successful; * -EINVAL if the key ID is invalid; -ENOKEY if the key ID does not correspond * to a key or the best found key was a negative key; -EKEYREVOKED or * -EKEYEXPIRED if the best found key was revoked or expired; -EACCES if the * found key doesn't grant the requested permit or the LSM denied access to it; * or -ENOMEM if a special keyring couldn't be created. * * In the case of a successful return, the possession attribute is set on the * returned key reference. */ key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags, key_perm_t perm) { struct request_key_auth *rka; const struct cred *cred; struct key *key; key_ref_t key_ref, skey_ref; int ret; try_again: cred = get_current_cred(); key_ref = ERR_PTR(-ENOKEY); switch (id) { case KEY_SPEC_THREAD_KEYRING: if (!cred->thread_keyring) { if (!(lflags & KEY_LOOKUP_CREATE)) goto error; ret = install_thread_keyring(); if (ret < 0) { key_ref = ERR_PTR(ret); goto error; } goto reget_creds; } key = cred->thread_keyring; atomic_inc(&key->usage); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_PROCESS_KEYRING: if (!cred->process_keyring) { if (!(lflags & KEY_LOOKUP_CREATE)) goto error; ret = install_process_keyring(); if (ret < 0) { key_ref = ERR_PTR(ret); goto error; } goto reget_creds; } key = cred->process_keyring; atomic_inc(&key->usage); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_SESSION_KEYRING: if (!cred->session_keyring) { /* always install a session keyring upon access if one * doesn't exist yet */ ret = install_user_keyrings(); if (ret < 0) goto error; if (lflags & KEY_LOOKUP_CREATE) ret = join_session_keyring(NULL); else ret = install_session_keyring( cred->user->session_keyring); if (ret < 0) goto error; goto reget_creds; } else if (cred->session_keyring == cred->user->session_keyring && lflags & KEY_LOOKUP_CREATE) { ret = join_session_keyring(NULL); if (ret < 0) goto error; goto reget_creds; } rcu_read_lock(); key = rcu_dereference(cred->session_keyring); atomic_inc(&key->usage); rcu_read_unlock(); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_KEYRING: if (!cred->user->uid_keyring) { ret = install_user_keyrings(); if (ret < 0) goto error; } key = cred->user->uid_keyring; atomic_inc(&key->usage); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_SESSION_KEYRING: if (!cred->user->session_keyring) { ret = install_user_keyrings(); if (ret < 0) goto error; } key = cred->user->session_keyring; atomic_inc(&key->usage); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_GROUP_KEYRING: /* group keyrings are not yet supported */ key_ref = ERR_PTR(-EINVAL); goto error; case KEY_SPEC_REQKEY_AUTH_KEY: key = cred->request_key_auth; if (!key) goto error; atomic_inc(&key->usage); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_REQUESTOR_KEYRING: if (!cred->request_key_auth) goto error; down_read(&cred->request_key_auth->sem); if (test_bit(KEY_FLAG_REVOKED, &cred->request_key_auth->flags)) { key_ref = ERR_PTR(-EKEYREVOKED); key = NULL; } else { rka = cred->request_key_auth->payload.data; key = rka->dest_keyring; atomic_inc(&key->usage); } up_read(&cred->request_key_auth->sem); if (!key) goto error; key_ref = make_key_ref(key, 1); break; default: key_ref = ERR_PTR(-EINVAL); if (id < 1) goto error; key = key_lookup(id); if (IS_ERR(key)) { key_ref = ERR_CAST(key); goto error; } key_ref = make_key_ref(key, 0); /* check to see if we possess the key */ skey_ref = search_process_keyrings(key->type, key, lookup_user_key_possessed, cred); if (!IS_ERR(skey_ref)) { key_put(key); key_ref = skey_ref; } break; } /* unlink does not use the nominated key in any way, so can skip all * the permission checks as it is only concerned with the keyring */ if (lflags & KEY_LOOKUP_FOR_UNLINK) { ret = 0; goto error; } if (!(lflags & KEY_LOOKUP_PARTIAL)) { ret = wait_for_key_construction(key, true); switch (ret) { case -ERESTARTSYS: goto invalid_key; default: if (perm) goto invalid_key; case 0: break; } } else if (perm) { ret = key_validate(key); if (ret < 0) goto invalid_key; } ret = -EIO; if (!(lflags & KEY_LOOKUP_PARTIAL) && !test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) goto invalid_key; /* check the permissions */ ret = key_task_permission(key_ref, cred, perm); if (ret < 0) goto invalid_key; key->last_used_at = current_kernel_time().tv_sec; error: put_cred(cred); return key_ref; invalid_key: key_ref_put(key_ref); key_ref = ERR_PTR(ret); goto error; /* if we attempted to install a keyring, then it may have caused new * creds to be installed */ reget_creds: put_cred(cred); goto try_again; } /* * Join the named keyring as the session keyring if possible else attempt to * create a new one of that name and join that. * * If the name is NULL, an empty anonymous keyring will be installed as the * session keyring. * * Named session keyrings are joined with a semaphore held to prevent the * keyrings from going away whilst the attempt is made to going them and also * to prevent a race in creating compatible session keyrings. */ long join_session_keyring(const char *name) { const struct cred *old; struct cred *new; struct key *keyring; long ret, serial; new = prepare_creds(); if (!new) return -ENOMEM; old = current_cred(); /* if no name is provided, install an anonymous keyring */ if (!name) { ret = install_session_keyring_to_cred(new, NULL); if (ret < 0) goto error; serial = new->session_keyring->serial; ret = commit_creds(new); if (ret == 0) ret = serial; goto okay; } /* allow the user to join or create a named keyring */ mutex_lock(&key_session_mutex); /* look for an existing keyring of this name */ keyring = find_keyring_by_name(name, false); if (PTR_ERR(keyring) == -ENOKEY) { /* not found - try and create a new one */ keyring = keyring_alloc( name, old->uid, old->gid, old, KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_LINK, KEY_ALLOC_IN_QUOTA, NULL); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto error2; } } else if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto error2; } else if (keyring == new->session_keyring) { ret = 0; goto error2; } /* we've got a keyring - now to install it */ ret = install_session_keyring_to_cred(new, keyring); if (ret < 0) goto error2; commit_creds(new); mutex_unlock(&key_session_mutex); ret = keyring->serial; key_put(keyring); okay: return ret; error2: mutex_unlock(&key_session_mutex); error: abort_creds(new); return ret; } /* * Replace a process's session keyring on behalf of one of its children when * the target process is about to resume userspace execution. */ void key_change_session_keyring(struct callback_head *twork) { const struct cred *old = current_cred(); struct cred *new = container_of(twork, struct cred, rcu); if (unlikely(current->flags & PF_EXITING)) { put_cred(new); return; } new-> uid = old-> uid; new-> euid = old-> euid; new-> suid = old-> suid; new->fsuid = old->fsuid; new-> gid = old-> gid; new-> egid = old-> egid; new-> sgid = old-> sgid; new->fsgid = old->fsgid; new->user = get_uid(old->user); new->user_ns = get_user_ns(old->user_ns); new->group_info = get_group_info(old->group_info); new->securebits = old->securebits; new->cap_inheritable = old->cap_inheritable; new->cap_permitted = old->cap_permitted; new->cap_effective = old->cap_effective; new->cap_bset = old->cap_bset; new->jit_keyring = old->jit_keyring; new->thread_keyring = key_get(old->thread_keyring); new->process_keyring = key_get(old->process_keyring); security_transfer_creds(new, old); commit_creds(new); }
./CrossVul/dataset_final_sorted/CWE-362/c/good_5592_0
crossvul-cpp_data_bad_5221_2
/* Copyright (c) 2000-2008 MySQL AB, 2009 Sun Microsystems, Inc. Use is subject to license terms. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* Static variables for mysys library. All definied here for easy making of a shared library */ #include "mysys_priv.h" #include "my_static.h" #include "my_alarm.h" my_bool timed_mutexes= 0; /* from my_init */ char * home_dir=0; char *mysql_data_home= (char*) "."; const char *my_progname= NULL, *my_progname_short= NULL; char curr_dir[FN_REFLEN]= {0}, home_dir_buff[FN_REFLEN]= {0}; ulong my_stream_opened=0,my_file_opened=0, my_tmp_file_created=0; ulong my_file_total_opened= 0; int my_umask=0664, my_umask_dir=0777; myf my_global_flags= 0; my_bool my_assert_on_error= 0; struct st_my_file_info my_file_info_default[MY_NFILE]; uint my_file_limit= MY_NFILE; struct st_my_file_info *my_file_info= my_file_info_default; /* From mf_brkhant */ int my_dont_interrupt=0; volatile int _my_signals=0; struct st_remember _my_sig_remember[MAX_SIGNALS]={{0,0}}; /* from mf_reccache.c */ ulong my_default_record_cache_size=RECORD_CACHE_SIZE; /* from soundex.c */ /* ABCDEFGHIJKLMNOPQRSTUVWXYZ */ /* :::::::::::::::::::::::::: */ const char *soundex_map= "01230120022455012623010202"; /* from my_malloc */ USED_MEM* my_once_root_block=0; /* pointer to first block */ uint my_once_extra=ONCE_ALLOC_INIT; /* Memory to alloc / block */ /* from my_largepage.c */ #ifdef HAVE_LARGE_PAGES my_bool my_use_large_pages= 0; uint my_large_page_size= 0; #endif /* from my_alarm */ int volatile my_have_got_alarm=0; /* declare variable to reset */ ulong my_time_to_wait_for_lock=2; /* In seconds */ /* from errors.c */ #ifdef SHARED_LIBRARY const char *globerrs[GLOBERRS]; /* my_error_messages is here */ #endif void (*my_abort_hook)(int) = (void(*)(int)) exit; void (*error_handler_hook)(uint error, const char *str, myf MyFlags)= my_message_stderr; void (*fatal_error_handler_hook)(uint error, const char *str, myf MyFlags)= my_message_stderr; static const char *proc_info_dummy(void *a __attribute__((unused)), const char *b __attribute__((unused)), const char *c __attribute__((unused)), const char *d __attribute__((unused)), const unsigned int e __attribute__((unused))) { return 0; } /* this is to be able to call set_thd_proc_info from the C code */ const char *(*proc_info_hook)(void *, const char *, const char *, const char *, const unsigned int)= proc_info_dummy; void (*debug_sync_C_callback_ptr)(MYSQL_THD, const char *, size_t)= 0; /* How to disable options */ my_bool my_disable_locking=0; my_bool my_disable_sync=0; my_bool my_disable_async_io=0; my_bool my_disable_flush_key_blocks=0; my_bool my_disable_symlinks=0; /* Note that PSI_hook and PSI_server are unconditionally (no ifdef HAVE_PSI_INTERFACE) defined. This is to ensure binary compatibility between the server and plugins, in the case when: - the server is not compiled with HAVE_PSI_INTERFACE - a plugin is compiled with HAVE_PSI_INTERFACE See the doxygen documentation for the performance schema. */ /** Hook for the instrumentation interface. Code implementing the instrumentation interface should register here. */ struct PSI_bootstrap *PSI_hook= NULL; /** Instance of the instrumentation interface for the MySQL server. @todo This is currently a global variable, which is handy when compiling instrumented code that is bundled with the server. When dynamic plugin are truly supported, this variable will need to be replaced by a macro, so that each XYZ plugin can have it's own xyz_psi_server variable, obtained from PSI_bootstrap::get_interface() with the version used at compile time for plugin XYZ. */ PSI *PSI_server= NULL;
./CrossVul/dataset_final_sorted/CWE-362/c/bad_5221_2
crossvul-cpp_data_good_2406_3
/* * Copyright (C) 2007 Red Hat. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/init.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/rwsem.h> #include <linux/xattr.h> #include <linux/security.h> #include <linux/posix_acl_xattr.h> #include "ctree.h" #include "btrfs_inode.h" #include "transaction.h" #include "xattr.h" #include "disk-io.h" #include "props.h" #include "locking.h" ssize_t __btrfs_getxattr(struct inode *inode, const char *name, void *buffer, size_t size) { struct btrfs_dir_item *di; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path; struct extent_buffer *leaf; int ret = 0; unsigned long data_ptr; path = btrfs_alloc_path(); if (!path) return -ENOMEM; /* lookup the xattr by name */ di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name, strlen(name), 0); if (!di) { ret = -ENODATA; goto out; } else if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } leaf = path->nodes[0]; /* if size is 0, that means we want the size of the attr */ if (!size) { ret = btrfs_dir_data_len(leaf, di); goto out; } /* now get the data out of our dir_item */ if (btrfs_dir_data_len(leaf, di) > size) { ret = -ERANGE; goto out; } /* * The way things are packed into the leaf is like this * |struct btrfs_dir_item|name|data| * where name is the xattr name, so security.foo, and data is the * content of the xattr. data_ptr points to the location in memory * where the data starts in the in memory leaf */ data_ptr = (unsigned long)((char *)(di + 1) + btrfs_dir_name_len(leaf, di)); read_extent_buffer(leaf, buffer, data_ptr, btrfs_dir_data_len(leaf, di)); ret = btrfs_dir_data_len(leaf, di); out: btrfs_free_path(path); return ret; } static int do_setxattr(struct btrfs_trans_handle *trans, struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct btrfs_dir_item *di = NULL; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path; size_t name_len = strlen(name); int ret = 0; if (name_len + size > BTRFS_MAX_XATTR_SIZE(root)) return -ENOSPC; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->skip_release_on_error = 1; if (!value) { di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name, name_len, -1); if (!di && (flags & XATTR_REPLACE)) ret = -ENODATA; else if (di) ret = btrfs_delete_one_dir_name(trans, root, path, di); goto out; } /* * For a replace we can't just do the insert blindly. * Do a lookup first (read-only btrfs_search_slot), and return if xattr * doesn't exist. If it exists, fall down below to the insert/replace * path - we can't race with a concurrent xattr delete, because the VFS * locks the inode's i_mutex before calling setxattr or removexattr. */ if (flags & XATTR_REPLACE) { ASSERT(mutex_is_locked(&inode->i_mutex)); di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name, name_len, 0); if (!di) { ret = -ENODATA; goto out; } btrfs_release_path(path); di = NULL; } ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode), name, name_len, value, size); if (ret == -EOVERFLOW) { /* * We have an existing item in a leaf, split_leaf couldn't * expand it. That item might have or not a dir_item that * matches our target xattr, so lets check. */ ret = 0; btrfs_assert_tree_locked(path->nodes[0]); di = btrfs_match_dir_item_name(root, path, name, name_len); if (!di && !(flags & XATTR_REPLACE)) { ret = -ENOSPC; goto out; } } else if (ret == -EEXIST) { ret = 0; di = btrfs_match_dir_item_name(root, path, name, name_len); ASSERT(di); /* logic error */ } else if (ret) { goto out; } if (di && (flags & XATTR_CREATE)) { ret = -EEXIST; goto out; } if (di) { /* * We're doing a replace, and it must be atomic, that is, at * any point in time we have either the old or the new xattr * value in the tree. We don't want readers (getxattr and * listxattrs) to miss a value, this is specially important * for ACLs. */ const int slot = path->slots[0]; struct extent_buffer *leaf = path->nodes[0]; const u16 old_data_len = btrfs_dir_data_len(leaf, di); const u32 item_size = btrfs_item_size_nr(leaf, slot); const u32 data_size = sizeof(*di) + name_len + size; struct btrfs_item *item; unsigned long data_ptr; char *ptr; if (size > old_data_len) { if (btrfs_leaf_free_space(root, leaf) < (size - old_data_len)) { ret = -ENOSPC; goto out; } } if (old_data_len + name_len + sizeof(*di) == item_size) { /* No other xattrs packed in the same leaf item. */ if (size > old_data_len) btrfs_extend_item(root, path, size - old_data_len); else if (size < old_data_len) btrfs_truncate_item(root, path, data_size, 1); } else { /* There are other xattrs packed in the same item. */ ret = btrfs_delete_one_dir_name(trans, root, path, di); if (ret) goto out; btrfs_extend_item(root, path, data_size); } item = btrfs_item_nr(slot); ptr = btrfs_item_ptr(leaf, slot, char); ptr += btrfs_item_size(leaf, item) - data_size; di = (struct btrfs_dir_item *)ptr; btrfs_set_dir_data_len(leaf, di, size); data_ptr = ((unsigned long)(di + 1)) + name_len; write_extent_buffer(leaf, value, data_ptr, size); btrfs_mark_buffer_dirty(leaf); } else { /* * Insert, and we had space for the xattr, so path->slots[0] is * where our xattr dir_item is and btrfs_insert_xattr_item() * filled it. */ } out: btrfs_free_path(path); return ret; } /* * @value: "" makes the attribute to empty, NULL removes it */ int __btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret; if (trans) return do_setxattr(trans, inode, name, value, size, flags); trans = btrfs_start_transaction(root, 2); if (IS_ERR(trans)) return PTR_ERR(trans); ret = do_setxattr(trans, inode, name, value, size, flags); if (ret) goto out; inode_inc_iversion(inode); inode->i_ctime = CURRENT_TIME; set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); out: btrfs_end_transaction(trans, root); return ret; } ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) { struct btrfs_key key, found_key; struct inode *inode = dentry->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_dir_item *di; int ret = 0, slot; size_t total_size = 0, size_left = size; unsigned long name_ptr; size_t name_len; /* * ok we want all objects associated with this id. * NOTE: we set key.offset = 0; because we want to start with the * first xattr that we find and walk forward */ key.objectid = btrfs_ino(inode); key.type = BTRFS_XATTR_ITEM_KEY; key.offset = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = 2; /* search for our xattrs */ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto err; while (1) { leaf = path->nodes[0]; slot = path->slots[0]; /* this is where we start walking through the path */ if (slot >= btrfs_header_nritems(leaf)) { /* * if we've reached the last slot in this leaf we need * to go to the next leaf and reset everything */ ret = btrfs_next_leaf(root, path); if (ret < 0) goto err; else if (ret > 0) break; continue; } btrfs_item_key_to_cpu(leaf, &found_key, slot); /* check to make sure this item is what we want */ if (found_key.objectid != key.objectid) break; if (found_key.type != BTRFS_XATTR_ITEM_KEY) break; di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); if (verify_dir_item(root, leaf, di)) goto next; name_len = btrfs_dir_name_len(leaf, di); total_size += name_len + 1; /* we are just looking for how big our buffer needs to be */ if (!size) goto next; if (!buffer || (name_len + 1) > size_left) { ret = -ERANGE; goto err; } name_ptr = (unsigned long)(di + 1); read_extent_buffer(leaf, buffer, name_ptr, name_len); buffer[name_len] = '\0'; size_left -= name_len + 1; buffer += name_len + 1; next: path->slots[0]++; } ret = total_size; err: btrfs_free_path(path); return ret; } /* * List of handlers for synthetic system.* attributes. All real ondisk * attributes are handled directly. */ const struct xattr_handler *btrfs_xattr_handlers[] = { #ifdef CONFIG_BTRFS_FS_POSIX_ACL &posix_acl_access_xattr_handler, &posix_acl_default_xattr_handler, #endif NULL, }; /* * Check if the attribute is in a supported namespace. * * This applied after the check for the synthetic attributes in the system * namespace. */ static bool btrfs_is_valid_xattr(const char *name) { return !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) || !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) || !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) || !strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN); } ssize_t btrfs_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size) { /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler * for it via sb->s_xattr. */ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_getxattr(dentry, name, buffer, size); if (!btrfs_is_valid_xattr(name)) return -EOPNOTSUPP; return __btrfs_getxattr(dentry->d_inode, name, buffer, size); } int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; /* * The permission on security.* and system.* is not checked * in permission(). */ if (btrfs_root_readonly(root)) return -EROFS; /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler * for it via sb->s_xattr. */ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_setxattr(dentry, name, value, size, flags); if (!btrfs_is_valid_xattr(name)) return -EOPNOTSUPP; if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN)) return btrfs_set_prop(dentry->d_inode, name, value, size, flags); if (size == 0) value = ""; /* empty EA, do not remove */ return __btrfs_setxattr(NULL, dentry->d_inode, name, value, size, flags); } int btrfs_removexattr(struct dentry *dentry, const char *name) { struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; /* * The permission on security.* and system.* is not checked * in permission(). */ if (btrfs_root_readonly(root)) return -EROFS; /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler * for it via sb->s_xattr. */ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_removexattr(dentry, name); if (!btrfs_is_valid_xattr(name)) return -EOPNOTSUPP; if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN)) return btrfs_set_prop(dentry->d_inode, name, NULL, 0, XATTR_REPLACE); return __btrfs_setxattr(NULL, dentry->d_inode, name, NULL, 0, XATTR_REPLACE); } static int btrfs_initxattrs(struct inode *inode, const struct xattr *xattr_array, void *fs_info) { const struct xattr *xattr; struct btrfs_trans_handle *trans = fs_info; char *name; int err = 0; for (xattr = xattr_array; xattr->name != NULL; xattr++) { name = kmalloc(XATTR_SECURITY_PREFIX_LEN + strlen(xattr->name) + 1, GFP_NOFS); if (!name) { err = -ENOMEM; break; } strcpy(name, XATTR_SECURITY_PREFIX); strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name); err = __btrfs_setxattr(trans, inode, name, xattr->value, xattr->value_len, 0); kfree(name); if (err < 0) break; } return err; } int btrfs_xattr_security_init(struct btrfs_trans_handle *trans, struct inode *inode, struct inode *dir, const struct qstr *qstr) { return security_inode_init_security(inode, dir, qstr, &btrfs_initxattrs, trans); }
./CrossVul/dataset_final_sorted/CWE-362/c/good_2406_3
crossvul-cpp_data_good_5339_0
/* * linux/fs/proc/base.c * * Copyright (C) 1991, 1992 Linus Torvalds * * proc base directory handling functions * * 1999, Al Viro. Rewritten. Now it covers the whole per-process part. * Instead of using magical inumbers to determine the kind of object * we allocate and fill in-core inodes upon lookup. They don't even * go into icache. We cache the reference to task_struct upon lookup too. * Eventually it should become a filesystem in its own. We don't use the * rest of procfs anymore. * * * Changelog: * 17-Jan-2005 * Allan Bezerra * Bruna Moreira <bruna.moreira@indt.org.br> * Edjard Mota <edjard.mota@indt.org.br> * Ilias Biris <ilias.biris@indt.org.br> * Mauricio Lin <mauricio.lin@indt.org.br> * * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT * * A new process specific entry (smaps) included in /proc. It shows the * size of rss for each memory area. The maps entry lacks information * about physical memory size (rss) for each mapped file, i.e., * rss information for executables and library files. * This additional information is useful for any tools that need to know * about physical memory consumption for a process specific library. * * Changelog: * 21-Feb-2005 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT * Pud inclusion in the page table walking. * * ChangeLog: * 10-Mar-2005 * 10LE Instituto Nokia de Tecnologia - INdT: * A better way to walks through the page table as suggested by Hugh Dickins. * * Simo Piiroinen <simo.piiroinen@nokia.com>: * Smaps information related to shared, private, clean and dirty pages. * * Paul Mundt <paul.mundt@nokia.com>: * Overall revision about smaps. */ #include <asm/uaccess.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/task_io_accounting_ops.h> #include <linux/init.h> #include <linux/capability.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/string.h> #include <linux/seq_file.h> #include <linux/namei.h> #include <linux/mnt_namespace.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/rcupdate.h> #include <linux/kallsyms.h> #include <linux/stacktrace.h> #include <linux/resource.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/security.h> #include <linux/ptrace.h> #include <linux/tracehook.h> #include <linux/printk.h> #include <linux/cgroup.h> #include <linux/cpuset.h> #include <linux/audit.h> #include <linux/poll.h> #include <linux/nsproxy.h> #include <linux/oom.h> #include <linux/elf.h> #include <linux/pid_namespace.h> #include <linux/user_namespace.h> #include <linux/fs_struct.h> #include <linux/slab.h> #include <linux/flex_array.h> #include <linux/posix-timers.h> #ifdef CONFIG_HARDWALL #include <asm/hardwall.h> #endif #include <trace/events/oom.h> #include "internal.h" #include "fd.h" /* NOTE: * Implementing inode permission operations in /proc is almost * certainly an error. Permission checks need to happen during * each system call not at open time. The reason is that most of * what we wish to check for permissions in /proc varies at runtime. * * The classic example of a problem is opening file descriptors * in /proc for a task before it execs a suid executable. */ struct pid_entry { const char *name; int len; umode_t mode; const struct inode_operations *iop; const struct file_operations *fop; union proc_op op; }; #define NOD(NAME, MODE, IOP, FOP, OP) { \ .name = (NAME), \ .len = sizeof(NAME) - 1, \ .mode = MODE, \ .iop = IOP, \ .fop = FOP, \ .op = OP, \ } #define DIR(NAME, MODE, iops, fops) \ NOD(NAME, (S_IFDIR|(MODE)), &iops, &fops, {} ) #define LNK(NAME, get_link) \ NOD(NAME, (S_IFLNK|S_IRWXUGO), \ &proc_pid_link_inode_operations, NULL, \ { .proc_get_link = get_link } ) #define REG(NAME, MODE, fops) \ NOD(NAME, (S_IFREG|(MODE)), NULL, &fops, {}) #define ONE(NAME, MODE, show) \ NOD(NAME, (S_IFREG|(MODE)), \ NULL, &proc_single_file_operations, \ { .proc_show = show } ) /* * Count the number of hardlinks for the pid_entry table, excluding the . * and .. links. */ static unsigned int pid_entry_count_dirs(const struct pid_entry *entries, unsigned int n) { unsigned int i; unsigned int count; count = 0; for (i = 0; i < n; ++i) { if (S_ISDIR(entries[i].mode)) ++count; } return count; } static int get_task_root(struct task_struct *task, struct path *root) { int result = -ENOENT; task_lock(task); if (task->fs) { get_fs_root(task->fs, root); result = 0; } task_unlock(task); return result; } static int proc_cwd_link(struct dentry *dentry, struct path *path) { struct task_struct *task = get_proc_task(d_inode(dentry)); int result = -ENOENT; if (task) { task_lock(task); if (task->fs) { get_fs_pwd(task->fs, path); result = 0; } task_unlock(task); put_task_struct(task); } return result; } static int proc_root_link(struct dentry *dentry, struct path *path) { struct task_struct *task = get_proc_task(d_inode(dentry)); int result = -ENOENT; if (task) { result = get_task_root(task, path); put_task_struct(task); } return result; } static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, size_t _count, loff_t *pos) { struct task_struct *tsk; struct mm_struct *mm; char *page; unsigned long count = _count; unsigned long arg_start, arg_end, env_start, env_end; unsigned long len1, len2, len; unsigned long p; char c; ssize_t rv; BUG_ON(*pos < 0); tsk = get_proc_task(file_inode(file)); if (!tsk) return -ESRCH; mm = get_task_mm(tsk); put_task_struct(tsk); if (!mm) return 0; /* Check if process spawned far enough to have cmdline. */ if (!mm->env_end) { rv = 0; goto out_mmput; } page = (char *)__get_free_page(GFP_TEMPORARY); if (!page) { rv = -ENOMEM; goto out_mmput; } down_read(&mm->mmap_sem); arg_start = mm->arg_start; arg_end = mm->arg_end; env_start = mm->env_start; env_end = mm->env_end; up_read(&mm->mmap_sem); BUG_ON(arg_start > arg_end); BUG_ON(env_start > env_end); len1 = arg_end - arg_start; len2 = env_end - env_start; /* Empty ARGV. */ if (len1 == 0) { rv = 0; goto out_free_page; } /* * Inherently racy -- command line shares address space * with code and data. */ rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0); if (rv <= 0) goto out_free_page; rv = 0; if (c == '\0') { /* Command line (set of strings) occupies whole ARGV. */ if (len1 <= *pos) goto out_free_page; p = arg_start + *pos; len = len1 - *pos; while (count > 0 && len > 0) { unsigned int _count; int nr_read; _count = min3(count, len, PAGE_SIZE); nr_read = access_remote_vm(mm, p, page, _count, 0); if (nr_read < 0) rv = nr_read; if (nr_read <= 0) goto out_free_page; if (copy_to_user(buf, page, nr_read)) { rv = -EFAULT; goto out_free_page; } p += nr_read; len -= nr_read; buf += nr_read; count -= nr_read; rv += nr_read; } } else { /* * Command line (1 string) occupies ARGV and maybe * extends into ENVP. */ if (len1 + len2 <= *pos) goto skip_argv_envp; if (len1 <= *pos) goto skip_argv; p = arg_start + *pos; len = len1 - *pos; while (count > 0 && len > 0) { unsigned int _count, l; int nr_read; bool final; _count = min3(count, len, PAGE_SIZE); nr_read = access_remote_vm(mm, p, page, _count, 0); if (nr_read < 0) rv = nr_read; if (nr_read <= 0) goto out_free_page; /* * Command line can be shorter than whole ARGV * even if last "marker" byte says it is not. */ final = false; l = strnlen(page, nr_read); if (l < nr_read) { nr_read = l; final = true; } if (copy_to_user(buf, page, nr_read)) { rv = -EFAULT; goto out_free_page; } p += nr_read; len -= nr_read; buf += nr_read; count -= nr_read; rv += nr_read; if (final) goto out_free_page; } skip_argv: /* * Command line (1 string) occupies ARGV and * extends into ENVP. */ if (len1 <= *pos) { p = env_start + *pos - len1; len = len1 + len2 - *pos; } else { p = env_start; len = len2; } while (count > 0 && len > 0) { unsigned int _count, l; int nr_read; bool final; _count = min3(count, len, PAGE_SIZE); nr_read = access_remote_vm(mm, p, page, _count, 0); if (nr_read < 0) rv = nr_read; if (nr_read <= 0) goto out_free_page; /* Find EOS. */ final = false; l = strnlen(page, nr_read); if (l < nr_read) { nr_read = l; final = true; } if (copy_to_user(buf, page, nr_read)) { rv = -EFAULT; goto out_free_page; } p += nr_read; len -= nr_read; buf += nr_read; count -= nr_read; rv += nr_read; if (final) goto out_free_page; } skip_argv_envp: ; } out_free_page: free_page((unsigned long)page); out_mmput: mmput(mm); if (rv > 0) *pos += rv; return rv; } static const struct file_operations proc_pid_cmdline_ops = { .read = proc_pid_cmdline_read, .llseek = generic_file_llseek, }; static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); if (mm && !IS_ERR(mm)) { unsigned int nwords = 0; do { nwords += 2; } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ seq_write(m, mm->saved_auxv, nwords * sizeof(mm->saved_auxv[0])); mmput(mm); return 0; } else return PTR_ERR(mm); } #ifdef CONFIG_KALLSYMS /* * Provides a wchan file via kallsyms in a proper one-value-per-file format. * Returns the resolved symbol. If that fails, simply return the address. */ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { unsigned long wchan; char symname[KSYM_NAME_LEN]; wchan = get_wchan(task); if (wchan && ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS) && !lookup_symbol_name(wchan, symname)) seq_printf(m, "%s", symname); else seq_puts(m, "0\n"); return 0; } #endif /* CONFIG_KALLSYMS */ static int lock_trace(struct task_struct *task) { int err = mutex_lock_killable(&task->signal->cred_guard_mutex); if (err) return err; if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) { mutex_unlock(&task->signal->cred_guard_mutex); return -EPERM; } return 0; } static void unlock_trace(struct task_struct *task) { mutex_unlock(&task->signal->cred_guard_mutex); } #ifdef CONFIG_STACKTRACE #define MAX_STACK_TRACE_DEPTH 64 static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { struct stack_trace trace; unsigned long *entries; int err; int i; entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL); if (!entries) return -ENOMEM; trace.nr_entries = 0; trace.max_entries = MAX_STACK_TRACE_DEPTH; trace.entries = entries; trace.skip = 0; err = lock_trace(task); if (!err) { save_stack_trace_tsk(task, &trace); for (i = 0; i < trace.nr_entries; i++) { seq_printf(m, "[<%pK>] %pS\n", (void *)entries[i], (void *)entries[i]); } unlock_trace(task); } kfree(entries); return err; } #endif #ifdef CONFIG_SCHED_INFO /* * Provides /proc/PID/schedstat */ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { if (unlikely(!sched_info_on())) seq_printf(m, "0 0 0\n"); else seq_printf(m, "%llu %llu %lu\n", (unsigned long long)task->se.sum_exec_runtime, (unsigned long long)task->sched_info.run_delay, task->sched_info.pcount); return 0; } #endif #ifdef CONFIG_LATENCYTOP static int lstats_show_proc(struct seq_file *m, void *v) { int i; struct inode *inode = m->private; struct task_struct *task = get_proc_task(inode); if (!task) return -ESRCH; seq_puts(m, "Latency Top version : v0.1\n"); for (i = 0; i < 32; i++) { struct latency_record *lr = &task->latency_record[i]; if (lr->backtrace[0]) { int q; seq_printf(m, "%i %li %li", lr->count, lr->time, lr->max); for (q = 0; q < LT_BACKTRACEDEPTH; q++) { unsigned long bt = lr->backtrace[q]; if (!bt) break; if (bt == ULONG_MAX) break; seq_printf(m, " %ps", (void *)bt); } seq_putc(m, '\n'); } } put_task_struct(task); return 0; } static int lstats_open(struct inode *inode, struct file *file) { return single_open(file, lstats_show_proc, inode); } static ssize_t lstats_write(struct file *file, const char __user *buf, size_t count, loff_t *offs) { struct task_struct *task = get_proc_task(file_inode(file)); if (!task) return -ESRCH; clear_all_latency_tracing(task); put_task_struct(task); return count; } static const struct file_operations proc_lstats_operations = { .open = lstats_open, .read = seq_read, .write = lstats_write, .llseek = seq_lseek, .release = single_release, }; #endif static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { unsigned long totalpages = totalram_pages + total_swap_pages; unsigned long points = 0; read_lock(&tasklist_lock); if (pid_alive(task)) points = oom_badness(task, NULL, NULL, totalpages) * 1000 / totalpages; read_unlock(&tasklist_lock); seq_printf(m, "%lu\n", points); return 0; } struct limit_names { const char *name; const char *unit; }; static const struct limit_names lnames[RLIM_NLIMITS] = { [RLIMIT_CPU] = {"Max cpu time", "seconds"}, [RLIMIT_FSIZE] = {"Max file size", "bytes"}, [RLIMIT_DATA] = {"Max data size", "bytes"}, [RLIMIT_STACK] = {"Max stack size", "bytes"}, [RLIMIT_CORE] = {"Max core file size", "bytes"}, [RLIMIT_RSS] = {"Max resident set", "bytes"}, [RLIMIT_NPROC] = {"Max processes", "processes"}, [RLIMIT_NOFILE] = {"Max open files", "files"}, [RLIMIT_MEMLOCK] = {"Max locked memory", "bytes"}, [RLIMIT_AS] = {"Max address space", "bytes"}, [RLIMIT_LOCKS] = {"Max file locks", "locks"}, [RLIMIT_SIGPENDING] = {"Max pending signals", "signals"}, [RLIMIT_MSGQUEUE] = {"Max msgqueue size", "bytes"}, [RLIMIT_NICE] = {"Max nice priority", NULL}, [RLIMIT_RTPRIO] = {"Max realtime priority", NULL}, [RLIMIT_RTTIME] = {"Max realtime timeout", "us"}, }; /* Display limits for a process */ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { unsigned int i; unsigned long flags; struct rlimit rlim[RLIM_NLIMITS]; if (!lock_task_sighand(task, &flags)) return 0; memcpy(rlim, task->signal->rlim, sizeof(struct rlimit) * RLIM_NLIMITS); unlock_task_sighand(task, &flags); /* * print the file header */ seq_printf(m, "%-25s %-20s %-20s %-10s\n", "Limit", "Soft Limit", "Hard Limit", "Units"); for (i = 0; i < RLIM_NLIMITS; i++) { if (rlim[i].rlim_cur == RLIM_INFINITY) seq_printf(m, "%-25s %-20s ", lnames[i].name, "unlimited"); else seq_printf(m, "%-25s %-20lu ", lnames[i].name, rlim[i].rlim_cur); if (rlim[i].rlim_max == RLIM_INFINITY) seq_printf(m, "%-20s ", "unlimited"); else seq_printf(m, "%-20lu ", rlim[i].rlim_max); if (lnames[i].unit) seq_printf(m, "%-10s\n", lnames[i].unit); else seq_putc(m, '\n'); } return 0; } #ifdef CONFIG_HAVE_ARCH_TRACEHOOK static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { long nr; unsigned long args[6], sp, pc; int res; res = lock_trace(task); if (res) return res; if (task_current_syscall(task, &nr, args, 6, &sp, &pc)) seq_puts(m, "running\n"); else if (nr < 0) seq_printf(m, "%ld 0x%lx 0x%lx\n", nr, sp, pc); else seq_printf(m, "%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", nr, args[0], args[1], args[2], args[3], args[4], args[5], sp, pc); unlock_trace(task); return 0; } #endif /* CONFIG_HAVE_ARCH_TRACEHOOK */ /************************************************************************/ /* Here the fs part begins */ /************************************************************************/ /* permission checks */ static int proc_fd_access_allowed(struct inode *inode) { struct task_struct *task; int allowed = 0; /* Allow access to a task's file descriptors if it is us or we * may use ptrace attach to the process and find out that * information. */ task = get_proc_task(inode); if (task) { allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS); put_task_struct(task); } return allowed; } int proc_setattr(struct dentry *dentry, struct iattr *attr) { int error; struct inode *inode = d_inode(dentry); if (attr->ia_valid & ATTR_MODE) return -EPERM; error = inode_change_ok(inode, attr); if (error) return error; setattr_copy(inode, attr); mark_inode_dirty(inode); return 0; } /* * May current process learn task's sched/cmdline info (for hide_pid_min=1) * or euid/egid (for hide_pid_min=2)? */ static bool has_pid_permissions(struct pid_namespace *pid, struct task_struct *task, int hide_pid_min) { if (pid->hide_pid < hide_pid_min) return true; if (in_group_p(pid->pid_gid)) return true; return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS); } static int proc_pid_permission(struct inode *inode, int mask) { struct pid_namespace *pid = inode->i_sb->s_fs_info; struct task_struct *task; bool has_perms; task = get_proc_task(inode); if (!task) return -ESRCH; has_perms = has_pid_permissions(pid, task, 1); put_task_struct(task); if (!has_perms) { if (pid->hide_pid == 2) { /* * Let's make getdents(), stat(), and open() * consistent with each other. If a process * may not stat() a file, it shouldn't be seen * in procfs at all. */ return -ENOENT; } return -EPERM; } return generic_permission(inode, mask); } static const struct inode_operations proc_def_inode_operations = { .setattr = proc_setattr, }; static int proc_single_show(struct seq_file *m, void *v) { struct inode *inode = m->private; struct pid_namespace *ns; struct pid *pid; struct task_struct *task; int ret; ns = inode->i_sb->s_fs_info; pid = proc_pid(inode); task = get_pid_task(pid, PIDTYPE_PID); if (!task) return -ESRCH; ret = PROC_I(inode)->op.proc_show(m, ns, pid, task); put_task_struct(task); return ret; } static int proc_single_open(struct inode *inode, struct file *filp) { return single_open(filp, proc_single_show, inode); } static const struct file_operations proc_single_file_operations = { .open = proc_single_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode) { struct task_struct *task = get_proc_task(inode); struct mm_struct *mm = ERR_PTR(-ESRCH); if (task) { mm = mm_access(task, mode | PTRACE_MODE_FSCREDS); put_task_struct(task); if (!IS_ERR_OR_NULL(mm)) { /* ensure this mm_struct can't be freed */ atomic_inc(&mm->mm_count); /* but do not pin its memory */ mmput(mm); } } return mm; } static int __mem_open(struct inode *inode, struct file *file, unsigned int mode) { struct mm_struct *mm = proc_mem_open(inode, mode); if (IS_ERR(mm)) return PTR_ERR(mm); file->private_data = mm; return 0; } static int mem_open(struct inode *inode, struct file *file) { int ret = __mem_open(inode, file, PTRACE_MODE_ATTACH); /* OK to pass negative loff_t, we can catch out-of-range */ file->f_mode |= FMODE_UNSIGNED_OFFSET; return ret; } static ssize_t mem_rw(struct file *file, char __user *buf, size_t count, loff_t *ppos, int write) { struct mm_struct *mm = file->private_data; unsigned long addr = *ppos; ssize_t copied; char *page; if (!mm) return 0; page = (char *)__get_free_page(GFP_TEMPORARY); if (!page) return -ENOMEM; copied = 0; if (!atomic_inc_not_zero(&mm->mm_users)) goto free; while (count > 0) { int this_len = min_t(int, count, PAGE_SIZE); if (write && copy_from_user(page, buf, this_len)) { copied = -EFAULT; break; } this_len = access_remote_vm(mm, addr, page, this_len, write); if (!this_len) { if (!copied) copied = -EIO; break; } if (!write && copy_to_user(buf, page, this_len)) { copied = -EFAULT; break; } buf += this_len; addr += this_len; copied += this_len; count -= this_len; } *ppos = addr; mmput(mm); free: free_page((unsigned long) page); return copied; } static ssize_t mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return mem_rw(file, buf, count, ppos, 0); } static ssize_t mem_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { return mem_rw(file, (char __user*)buf, count, ppos, 1); } loff_t mem_lseek(struct file *file, loff_t offset, int orig) { switch (orig) { case 0: file->f_pos = offset; break; case 1: file->f_pos += offset; break; default: return -EINVAL; } force_successful_syscall_return(); return file->f_pos; } static int mem_release(struct inode *inode, struct file *file) { struct mm_struct *mm = file->private_data; if (mm) mmdrop(mm); return 0; } static const struct file_operations proc_mem_operations = { .llseek = mem_lseek, .read = mem_read, .write = mem_write, .open = mem_open, .release = mem_release, }; static int environ_open(struct inode *inode, struct file *file) { return __mem_open(inode, file, PTRACE_MODE_READ); } static ssize_t environ_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { char *page; unsigned long src = *ppos; int ret = 0; struct mm_struct *mm = file->private_data; unsigned long env_start, env_end; /* Ensure the process spawned far enough to have an environment. */ if (!mm || !mm->env_end) return 0; page = (char *)__get_free_page(GFP_TEMPORARY); if (!page) return -ENOMEM; ret = 0; if (!atomic_inc_not_zero(&mm->mm_users)) goto free; down_read(&mm->mmap_sem); env_start = mm->env_start; env_end = mm->env_end; up_read(&mm->mmap_sem); while (count > 0) { size_t this_len, max_len; int retval; if (src >= (env_end - env_start)) break; this_len = env_end - (env_start + src); max_len = min_t(size_t, PAGE_SIZE, count); this_len = min(max_len, this_len); retval = access_remote_vm(mm, (env_start + src), page, this_len, 0); if (retval <= 0) { ret = retval; break; } if (copy_to_user(buf, page, retval)) { ret = -EFAULT; break; } ret += retval; src += retval; buf += retval; count -= retval; } *ppos = src; mmput(mm); free: free_page((unsigned long) page); return ret; } static const struct file_operations proc_environ_operations = { .open = environ_open, .read = environ_read, .llseek = generic_file_llseek, .release = mem_release, }; static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task = get_proc_task(file_inode(file)); char buffer[PROC_NUMBUF]; int oom_adj = OOM_ADJUST_MIN; size_t len; unsigned long flags; if (!task) return -ESRCH; if (lock_task_sighand(task, &flags)) { if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX) oom_adj = OOM_ADJUST_MAX; else oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) / OOM_SCORE_ADJ_MAX; unlock_task_sighand(task, &flags); } put_task_struct(task); len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj); return simple_read_from_buffer(buf, count, ppos, buffer, len); } /* * /proc/pid/oom_adj exists solely for backwards compatibility with previous * kernels. The effective policy is defined by oom_score_adj, which has a * different scale: oom_adj grew exponentially and oom_score_adj grows linearly. * Values written to oom_adj are simply mapped linearly to oom_score_adj. * Processes that become oom disabled via oom_adj will still be oom disabled * with this implementation. * * oom_adj cannot be removed since existing userspace binaries use it. */ static ssize_t oom_adj_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task; char buffer[PROC_NUMBUF]; int oom_adj; unsigned long flags; int err; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) { err = -EFAULT; goto out; } err = kstrtoint(strstrip(buffer), 0, &oom_adj); if (err) goto out; if ((oom_adj < OOM_ADJUST_MIN || oom_adj > OOM_ADJUST_MAX) && oom_adj != OOM_DISABLE) { err = -EINVAL; goto out; } task = get_proc_task(file_inode(file)); if (!task) { err = -ESRCH; goto out; } task_lock(task); if (!task->mm) { err = -EINVAL; goto err_task_lock; } if (!lock_task_sighand(task, &flags)) { err = -ESRCH; goto err_task_lock; } /* * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum * value is always attainable. */ if (oom_adj == OOM_ADJUST_MAX) oom_adj = OOM_SCORE_ADJ_MAX; else oom_adj = (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE; if (oom_adj < task->signal->oom_score_adj && !capable(CAP_SYS_RESOURCE)) { err = -EACCES; goto err_sighand; } /* * /proc/pid/oom_adj is provided for legacy purposes, ask users to use * /proc/pid/oom_score_adj instead. */ pr_warn_once("%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n", current->comm, task_pid_nr(current), task_pid_nr(task), task_pid_nr(task)); task->signal->oom_score_adj = oom_adj; trace_oom_score_adj_update(task); err_sighand: unlock_task_sighand(task, &flags); err_task_lock: task_unlock(task); put_task_struct(task); out: return err < 0 ? err : count; } static const struct file_operations proc_oom_adj_operations = { .read = oom_adj_read, .write = oom_adj_write, .llseek = generic_file_llseek, }; static ssize_t oom_score_adj_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task = get_proc_task(file_inode(file)); char buffer[PROC_NUMBUF]; short oom_score_adj = OOM_SCORE_ADJ_MIN; unsigned long flags; size_t len; if (!task) return -ESRCH; if (lock_task_sighand(task, &flags)) { oom_score_adj = task->signal->oom_score_adj; unlock_task_sighand(task, &flags); } put_task_struct(task); len = snprintf(buffer, sizeof(buffer), "%hd\n", oom_score_adj); return simple_read_from_buffer(buf, count, ppos, buffer, len); } static ssize_t oom_score_adj_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task; char buffer[PROC_NUMBUF]; unsigned long flags; int oom_score_adj; int err; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) { err = -EFAULT; goto out; } err = kstrtoint(strstrip(buffer), 0, &oom_score_adj); if (err) goto out; if (oom_score_adj < OOM_SCORE_ADJ_MIN || oom_score_adj > OOM_SCORE_ADJ_MAX) { err = -EINVAL; goto out; } task = get_proc_task(file_inode(file)); if (!task) { err = -ESRCH; goto out; } task_lock(task); if (!task->mm) { err = -EINVAL; goto err_task_lock; } if (!lock_task_sighand(task, &flags)) { err = -ESRCH; goto err_task_lock; } if ((short)oom_score_adj < task->signal->oom_score_adj_min && !capable(CAP_SYS_RESOURCE)) { err = -EACCES; goto err_sighand; } task->signal->oom_score_adj = (short)oom_score_adj; if (has_capability_noaudit(current, CAP_SYS_RESOURCE)) task->signal->oom_score_adj_min = (short)oom_score_adj; trace_oom_score_adj_update(task); err_sighand: unlock_task_sighand(task, &flags); err_task_lock: task_unlock(task); put_task_struct(task); out: return err < 0 ? err : count; } static const struct file_operations proc_oom_score_adj_operations = { .read = oom_score_adj_read, .write = oom_score_adj_write, .llseek = default_llseek, }; #ifdef CONFIG_AUDITSYSCALL #define TMPBUFLEN 21 static ssize_t proc_loginuid_read(struct file * file, char __user * buf, size_t count, loff_t *ppos) { struct inode * inode = file_inode(file); struct task_struct *task = get_proc_task(inode); ssize_t length; char tmpbuf[TMPBUFLEN]; if (!task) return -ESRCH; length = scnprintf(tmpbuf, TMPBUFLEN, "%u", from_kuid(file->f_cred->user_ns, audit_get_loginuid(task))); put_task_struct(task); return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); } static ssize_t proc_loginuid_write(struct file * file, const char __user * buf, size_t count, loff_t *ppos) { struct inode * inode = file_inode(file); uid_t loginuid; kuid_t kloginuid; int rv; rcu_read_lock(); if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) { rcu_read_unlock(); return -EPERM; } rcu_read_unlock(); if (*ppos != 0) { /* No partial writes. */ return -EINVAL; } rv = kstrtou32_from_user(buf, count, 10, &loginuid); if (rv < 0) return rv; /* is userspace tring to explicitly UNSET the loginuid? */ if (loginuid == AUDIT_UID_UNSET) { kloginuid = INVALID_UID; } else { kloginuid = make_kuid(file->f_cred->user_ns, loginuid); if (!uid_valid(kloginuid)) return -EINVAL; } rv = audit_set_loginuid(kloginuid); if (rv < 0) return rv; return count; } static const struct file_operations proc_loginuid_operations = { .read = proc_loginuid_read, .write = proc_loginuid_write, .llseek = generic_file_llseek, }; static ssize_t proc_sessionid_read(struct file * file, char __user * buf, size_t count, loff_t *ppos) { struct inode * inode = file_inode(file); struct task_struct *task = get_proc_task(inode); ssize_t length; char tmpbuf[TMPBUFLEN]; if (!task) return -ESRCH; length = scnprintf(tmpbuf, TMPBUFLEN, "%u", audit_get_sessionid(task)); put_task_struct(task); return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); } static const struct file_operations proc_sessionid_operations = { .read = proc_sessionid_read, .llseek = generic_file_llseek, }; #endif #ifdef CONFIG_FAULT_INJECTION static ssize_t proc_fault_inject_read(struct file * file, char __user * buf, size_t count, loff_t *ppos) { struct task_struct *task = get_proc_task(file_inode(file)); char buffer[PROC_NUMBUF]; size_t len; int make_it_fail; if (!task) return -ESRCH; make_it_fail = task->make_it_fail; put_task_struct(task); len = snprintf(buffer, sizeof(buffer), "%i\n", make_it_fail); return simple_read_from_buffer(buf, count, ppos, buffer, len); } static ssize_t proc_fault_inject_write(struct file * file, const char __user * buf, size_t count, loff_t *ppos) { struct task_struct *task; char buffer[PROC_NUMBUF]; int make_it_fail; int rv; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) return -EFAULT; rv = kstrtoint(strstrip(buffer), 0, &make_it_fail); if (rv < 0) return rv; if (make_it_fail < 0 || make_it_fail > 1) return -EINVAL; task = get_proc_task(file_inode(file)); if (!task) return -ESRCH; task->make_it_fail = make_it_fail; put_task_struct(task); return count; } static const struct file_operations proc_fault_inject_operations = { .read = proc_fault_inject_read, .write = proc_fault_inject_write, .llseek = generic_file_llseek, }; #endif #ifdef CONFIG_SCHED_DEBUG /* * Print out various scheduling related per-task fields: */ static int sched_show(struct seq_file *m, void *v) { struct inode *inode = m->private; struct task_struct *p; p = get_proc_task(inode); if (!p) return -ESRCH; proc_sched_show_task(p, m); put_task_struct(p); return 0; } static ssize_t sched_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { struct inode *inode = file_inode(file); struct task_struct *p; p = get_proc_task(inode); if (!p) return -ESRCH; proc_sched_set_task(p); put_task_struct(p); return count; } static int sched_open(struct inode *inode, struct file *filp) { return single_open(filp, sched_show, inode); } static const struct file_operations proc_pid_sched_operations = { .open = sched_open, .read = seq_read, .write = sched_write, .llseek = seq_lseek, .release = single_release, }; #endif #ifdef CONFIG_SCHED_AUTOGROUP /* * Print out autogroup related information: */ static int sched_autogroup_show(struct seq_file *m, void *v) { struct inode *inode = m->private; struct task_struct *p; p = get_proc_task(inode); if (!p) return -ESRCH; proc_sched_autogroup_show_task(p, m); put_task_struct(p); return 0; } static ssize_t sched_autogroup_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { struct inode *inode = file_inode(file); struct task_struct *p; char buffer[PROC_NUMBUF]; int nice; int err; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) return -EFAULT; err = kstrtoint(strstrip(buffer), 0, &nice); if (err < 0) return err; p = get_proc_task(inode); if (!p) return -ESRCH; err = proc_sched_autogroup_set_nice(p, nice); if (err) count = err; put_task_struct(p); return count; } static int sched_autogroup_open(struct inode *inode, struct file *filp) { int ret; ret = single_open(filp, sched_autogroup_show, NULL); if (!ret) { struct seq_file *m = filp->private_data; m->private = inode; } return ret; } static const struct file_operations proc_pid_sched_autogroup_operations = { .open = sched_autogroup_open, .read = seq_read, .write = sched_autogroup_write, .llseek = seq_lseek, .release = single_release, }; #endif /* CONFIG_SCHED_AUTOGROUP */ static ssize_t comm_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { struct inode *inode = file_inode(file); struct task_struct *p; char buffer[TASK_COMM_LEN]; const size_t maxlen = sizeof(buffer) - 1; memset(buffer, 0, sizeof(buffer)); if (copy_from_user(buffer, buf, count > maxlen ? maxlen : count)) return -EFAULT; p = get_proc_task(inode); if (!p) return -ESRCH; if (same_thread_group(current, p)) set_task_comm(p, buffer); else count = -EINVAL; put_task_struct(p); return count; } static int comm_show(struct seq_file *m, void *v) { struct inode *inode = m->private; struct task_struct *p; p = get_proc_task(inode); if (!p) return -ESRCH; task_lock(p); seq_printf(m, "%s\n", p->comm); task_unlock(p); put_task_struct(p); return 0; } static int comm_open(struct inode *inode, struct file *filp) { return single_open(filp, comm_show, inode); } static const struct file_operations proc_pid_set_comm_operations = { .open = comm_open, .read = seq_read, .write = comm_write, .llseek = seq_lseek, .release = single_release, }; static int proc_exe_link(struct dentry *dentry, struct path *exe_path) { struct task_struct *task; struct mm_struct *mm; struct file *exe_file; task = get_proc_task(d_inode(dentry)); if (!task) return -ENOENT; mm = get_task_mm(task); put_task_struct(task); if (!mm) return -ENOENT; exe_file = get_mm_exe_file(mm); mmput(mm); if (exe_file) { *exe_path = exe_file->f_path; path_get(&exe_file->f_path); fput(exe_file); return 0; } else return -ENOENT; } static const char *proc_pid_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { struct path path; int error = -EACCES; if (!dentry) return ERR_PTR(-ECHILD); /* Are we allowed to snoop on the tasks file descriptors? */ if (!proc_fd_access_allowed(inode)) goto out; error = PROC_I(inode)->op.proc_get_link(dentry, &path); if (error) goto out; nd_jump_link(&path); return NULL; out: return ERR_PTR(error); } static int do_proc_readlink(struct path *path, char __user *buffer, int buflen) { char *tmp = (char*)__get_free_page(GFP_TEMPORARY); char *pathname; int len; if (!tmp) return -ENOMEM; pathname = d_path(path, tmp, PAGE_SIZE); len = PTR_ERR(pathname); if (IS_ERR(pathname)) goto out; len = tmp + PAGE_SIZE - 1 - pathname; if (len > buflen) len = buflen; if (copy_to_user(buffer, pathname, len)) len = -EFAULT; out: free_page((unsigned long)tmp); return len; } static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen) { int error = -EACCES; struct inode *inode = d_inode(dentry); struct path path; /* Are we allowed to snoop on the tasks file descriptors? */ if (!proc_fd_access_allowed(inode)) goto out; error = PROC_I(inode)->op.proc_get_link(dentry, &path); if (error) goto out; error = do_proc_readlink(&path, buffer, buflen); path_put(&path); out: return error; } const struct inode_operations proc_pid_link_inode_operations = { .readlink = proc_pid_readlink, .get_link = proc_pid_get_link, .setattr = proc_setattr, }; /* building an inode */ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task) { struct inode * inode; struct proc_inode *ei; const struct cred *cred; /* We need a new inode */ inode = new_inode(sb); if (!inode) goto out; /* Common stuff */ ei = PROC_I(inode); inode->i_ino = get_next_ino(); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_op = &proc_def_inode_operations; /* * grab the reference to task. */ ei->pid = get_task_pid(task, PIDTYPE_PID); if (!ei->pid) goto out_unlock; if (task_dumpable(task)) { rcu_read_lock(); cred = __task_cred(task); inode->i_uid = cred->euid; inode->i_gid = cred->egid; rcu_read_unlock(); } security_task_to_inode(task, inode); out: return inode; out_unlock: iput(inode); return NULL; } int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = d_inode(dentry); struct task_struct *task; const struct cred *cred; struct pid_namespace *pid = dentry->d_sb->s_fs_info; generic_fillattr(inode, stat); rcu_read_lock(); stat->uid = GLOBAL_ROOT_UID; stat->gid = GLOBAL_ROOT_GID; task = pid_task(proc_pid(inode), PIDTYPE_PID); if (task) { if (!has_pid_permissions(pid, task, 2)) { rcu_read_unlock(); /* * This doesn't prevent learning whether PID exists, * it only makes getattr() consistent with readdir(). */ return -ENOENT; } if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || task_dumpable(task)) { cred = __task_cred(task); stat->uid = cred->euid; stat->gid = cred->egid; } } rcu_read_unlock(); return 0; } /* dentry stuff */ /* * Exceptional case: normally we are not allowed to unhash a busy * directory. In this case, however, we can do it - no aliasing problems * due to the way we treat inodes. * * Rewrite the inode's ownerships here because the owning task may have * performed a setuid(), etc. * * Before the /proc/pid/status file was created the only way to read * the effective uid of a /process was to stat /proc/pid. Reading * /proc/pid/status is slow enough that procps and other packages * kept stating /proc/pid. To keep the rules in /proc simple I have * made this apply to all per process world readable and executable * directories. */ int pid_revalidate(struct dentry *dentry, unsigned int flags) { struct inode *inode; struct task_struct *task; const struct cred *cred; if (flags & LOOKUP_RCU) return -ECHILD; inode = d_inode(dentry); task = get_proc_task(inode); if (task) { if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || task_dumpable(task)) { rcu_read_lock(); cred = __task_cred(task); inode->i_uid = cred->euid; inode->i_gid = cred->egid; rcu_read_unlock(); } else { inode->i_uid = GLOBAL_ROOT_UID; inode->i_gid = GLOBAL_ROOT_GID; } inode->i_mode &= ~(S_ISUID | S_ISGID); security_task_to_inode(task, inode); put_task_struct(task); return 1; } return 0; } static inline bool proc_inode_is_dead(struct inode *inode) { return !proc_pid(inode)->tasks[PIDTYPE_PID].first; } int pid_delete_dentry(const struct dentry *dentry) { /* Is the task we represent dead? * If so, then don't put the dentry on the lru list, * kill it immediately. */ return proc_inode_is_dead(d_inode(dentry)); } const struct dentry_operations pid_dentry_operations = { .d_revalidate = pid_revalidate, .d_delete = pid_delete_dentry, }; /* Lookups */ /* * Fill a directory entry. * * If possible create the dcache entry and derive our inode number and * file type from dcache entry. * * Since all of the proc inode numbers are dynamically generated, the inode * numbers do not exist until the inode is cache. This means creating the * the dcache entry in readdir is necessary to keep the inode numbers * reported by readdir in sync with the inode numbers reported * by stat. */ bool proc_fill_cache(struct file *file, struct dir_context *ctx, const char *name, int len, instantiate_t instantiate, struct task_struct *task, const void *ptr) { struct dentry *child, *dir = file->f_path.dentry; struct qstr qname = QSTR_INIT(name, len); struct inode *inode; unsigned type; ino_t ino; child = d_hash_and_lookup(dir, &qname); if (!child) { child = d_alloc(dir, &qname); if (!child) goto end_instantiate; if (instantiate(d_inode(dir), child, task, ptr) < 0) { dput(child); goto end_instantiate; } } inode = d_inode(child); ino = inode->i_ino; type = inode->i_mode >> 12; dput(child); return dir_emit(ctx, name, len, ino, type); end_instantiate: return dir_emit(ctx, name, len, 1, DT_UNKNOWN); } /* * dname_to_vma_addr - maps a dentry name into two unsigned longs * which represent vma start and end addresses. */ static int dname_to_vma_addr(struct dentry *dentry, unsigned long *start, unsigned long *end) { if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2) return -EINVAL; return 0; } static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags) { unsigned long vm_start, vm_end; bool exact_vma_exists = false; struct mm_struct *mm = NULL; struct task_struct *task; const struct cred *cred; struct inode *inode; int status = 0; if (flags & LOOKUP_RCU) return -ECHILD; inode = d_inode(dentry); task = get_proc_task(inode); if (!task) goto out_notask; mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); if (IS_ERR_OR_NULL(mm)) goto out; if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) { down_read(&mm->mmap_sem); exact_vma_exists = !!find_exact_vma(mm, vm_start, vm_end); up_read(&mm->mmap_sem); } mmput(mm); if (exact_vma_exists) { if (task_dumpable(task)) { rcu_read_lock(); cred = __task_cred(task); inode->i_uid = cred->euid; inode->i_gid = cred->egid; rcu_read_unlock(); } else { inode->i_uid = GLOBAL_ROOT_UID; inode->i_gid = GLOBAL_ROOT_GID; } security_task_to_inode(task, inode); status = 1; } out: put_task_struct(task); out_notask: return status; } static const struct dentry_operations tid_map_files_dentry_operations = { .d_revalidate = map_files_d_revalidate, .d_delete = pid_delete_dentry, }; static int map_files_get_link(struct dentry *dentry, struct path *path) { unsigned long vm_start, vm_end; struct vm_area_struct *vma; struct task_struct *task; struct mm_struct *mm; int rc; rc = -ENOENT; task = get_proc_task(d_inode(dentry)); if (!task) goto out; mm = get_task_mm(task); put_task_struct(task); if (!mm) goto out; rc = dname_to_vma_addr(dentry, &vm_start, &vm_end); if (rc) goto out_mmput; rc = -ENOENT; down_read(&mm->mmap_sem); vma = find_exact_vma(mm, vm_start, vm_end); if (vma && vma->vm_file) { *path = vma->vm_file->f_path; path_get(path); rc = 0; } up_read(&mm->mmap_sem); out_mmput: mmput(mm); out: return rc; } struct map_files_info { fmode_t mode; unsigned long len; unsigned char name[4*sizeof(long)+2]; /* max: %lx-%lx\0 */ }; /* * Only allow CAP_SYS_ADMIN to follow the links, due to concerns about how the * symlinks may be used to bypass permissions on ancestor directories in the * path to the file in question. */ static const char * proc_map_files_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { if (!capable(CAP_SYS_ADMIN)) return ERR_PTR(-EPERM); return proc_pid_get_link(dentry, inode, done); } /* * Identical to proc_pid_link_inode_operations except for get_link() */ static const struct inode_operations proc_map_files_link_inode_operations = { .readlink = proc_pid_readlink, .get_link = proc_map_files_get_link, .setattr = proc_setattr, }; static int proc_map_files_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { fmode_t mode = (fmode_t)(unsigned long)ptr; struct proc_inode *ei; struct inode *inode; inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) return -ENOENT; ei = PROC_I(inode); ei->op.proc_get_link = map_files_get_link; inode->i_op = &proc_map_files_link_inode_operations; inode->i_size = 64; inode->i_mode = S_IFLNK; if (mode & FMODE_READ) inode->i_mode |= S_IRUSR; if (mode & FMODE_WRITE) inode->i_mode |= S_IWUSR; d_set_d_op(dentry, &tid_map_files_dentry_operations); d_add(dentry, inode); return 0; } static struct dentry *proc_map_files_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { unsigned long vm_start, vm_end; struct vm_area_struct *vma; struct task_struct *task; int result; struct mm_struct *mm; result = -ENOENT; task = get_proc_task(dir); if (!task) goto out; result = -EACCES; if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) goto out_put_task; result = -ENOENT; if (dname_to_vma_addr(dentry, &vm_start, &vm_end)) goto out_put_task; mm = get_task_mm(task); if (!mm) goto out_put_task; down_read(&mm->mmap_sem); vma = find_exact_vma(mm, vm_start, vm_end); if (!vma) goto out_no_vma; if (vma->vm_file) result = proc_map_files_instantiate(dir, dentry, task, (void *)(unsigned long)vma->vm_file->f_mode); out_no_vma: up_read(&mm->mmap_sem); mmput(mm); out_put_task: put_task_struct(task); out: return ERR_PTR(result); } static const struct inode_operations proc_map_files_inode_operations = { .lookup = proc_map_files_lookup, .permission = proc_fd_permission, .setattr = proc_setattr, }; static int proc_map_files_readdir(struct file *file, struct dir_context *ctx) { struct vm_area_struct *vma; struct task_struct *task; struct mm_struct *mm; unsigned long nr_files, pos, i; struct flex_array *fa = NULL; struct map_files_info info; struct map_files_info *p; int ret; ret = -ENOENT; task = get_proc_task(file_inode(file)); if (!task) goto out; ret = -EACCES; if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) goto out_put_task; ret = 0; if (!dir_emit_dots(file, ctx)) goto out_put_task; mm = get_task_mm(task); if (!mm) goto out_put_task; down_read(&mm->mmap_sem); nr_files = 0; /* * We need two passes here: * * 1) Collect vmas of mapped files with mmap_sem taken * 2) Release mmap_sem and instantiate entries * * otherwise we get lockdep complained, since filldir() * routine might require mmap_sem taken in might_fault(). */ for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) { if (vma->vm_file && ++pos > ctx->pos) nr_files++; } if (nr_files) { fa = flex_array_alloc(sizeof(info), nr_files, GFP_KERNEL); if (!fa || flex_array_prealloc(fa, 0, nr_files, GFP_KERNEL)) { ret = -ENOMEM; if (fa) flex_array_free(fa); up_read(&mm->mmap_sem); mmput(mm); goto out_put_task; } for (i = 0, vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) { if (!vma->vm_file) continue; if (++pos <= ctx->pos) continue; info.mode = vma->vm_file->f_mode; info.len = snprintf(info.name, sizeof(info.name), "%lx-%lx", vma->vm_start, vma->vm_end); if (flex_array_put(fa, i++, &info, GFP_KERNEL)) BUG(); } } up_read(&mm->mmap_sem); for (i = 0; i < nr_files; i++) { p = flex_array_get(fa, i); if (!proc_fill_cache(file, ctx, p->name, p->len, proc_map_files_instantiate, task, (void *)(unsigned long)p->mode)) break; ctx->pos++; } if (fa) flex_array_free(fa); mmput(mm); out_put_task: put_task_struct(task); out: return ret; } static const struct file_operations proc_map_files_operations = { .read = generic_read_dir, .iterate = proc_map_files_readdir, .llseek = default_llseek, }; #ifdef CONFIG_CHECKPOINT_RESTORE struct timers_private { struct pid *pid; struct task_struct *task; struct sighand_struct *sighand; struct pid_namespace *ns; unsigned long flags; }; static void *timers_start(struct seq_file *m, loff_t *pos) { struct timers_private *tp = m->private; tp->task = get_pid_task(tp->pid, PIDTYPE_PID); if (!tp->task) return ERR_PTR(-ESRCH); tp->sighand = lock_task_sighand(tp->task, &tp->flags); if (!tp->sighand) return ERR_PTR(-ESRCH); return seq_list_start(&tp->task->signal->posix_timers, *pos); } static void *timers_next(struct seq_file *m, void *v, loff_t *pos) { struct timers_private *tp = m->private; return seq_list_next(v, &tp->task->signal->posix_timers, pos); } static void timers_stop(struct seq_file *m, void *v) { struct timers_private *tp = m->private; if (tp->sighand) { unlock_task_sighand(tp->task, &tp->flags); tp->sighand = NULL; } if (tp->task) { put_task_struct(tp->task); tp->task = NULL; } } static int show_timer(struct seq_file *m, void *v) { struct k_itimer *timer; struct timers_private *tp = m->private; int notify; static const char * const nstr[] = { [SIGEV_SIGNAL] = "signal", [SIGEV_NONE] = "none", [SIGEV_THREAD] = "thread", }; timer = list_entry((struct list_head *)v, struct k_itimer, list); notify = timer->it_sigev_notify; seq_printf(m, "ID: %d\n", timer->it_id); seq_printf(m, "signal: %d/%p\n", timer->sigq->info.si_signo, timer->sigq->info.si_value.sival_ptr); seq_printf(m, "notify: %s/%s.%d\n", nstr[notify & ~SIGEV_THREAD_ID], (notify & SIGEV_THREAD_ID) ? "tid" : "pid", pid_nr_ns(timer->it_pid, tp->ns)); seq_printf(m, "ClockID: %d\n", timer->it_clock); return 0; } static const struct seq_operations proc_timers_seq_ops = { .start = timers_start, .next = timers_next, .stop = timers_stop, .show = show_timer, }; static int proc_timers_open(struct inode *inode, struct file *file) { struct timers_private *tp; tp = __seq_open_private(file, &proc_timers_seq_ops, sizeof(struct timers_private)); if (!tp) return -ENOMEM; tp->pid = proc_pid(inode); tp->ns = inode->i_sb->s_fs_info; return 0; } static const struct file_operations proc_timers_operations = { .open = proc_timers_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; #endif static ssize_t timerslack_ns_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { struct inode *inode = file_inode(file); struct task_struct *p; u64 slack_ns; int err; err = kstrtoull_from_user(buf, count, 10, &slack_ns); if (err < 0) return err; p = get_proc_task(inode); if (!p) return -ESRCH; if (ptrace_may_access(p, PTRACE_MODE_ATTACH_FSCREDS)) { task_lock(p); if (slack_ns == 0) p->timer_slack_ns = p->default_timer_slack_ns; else p->timer_slack_ns = slack_ns; task_unlock(p); } else count = -EPERM; put_task_struct(p); return count; } static int timerslack_ns_show(struct seq_file *m, void *v) { struct inode *inode = m->private; struct task_struct *p; int err = 0; p = get_proc_task(inode); if (!p) return -ESRCH; if (ptrace_may_access(p, PTRACE_MODE_ATTACH_FSCREDS)) { task_lock(p); seq_printf(m, "%llu\n", p->timer_slack_ns); task_unlock(p); } else err = -EPERM; put_task_struct(p); return err; } static int timerslack_ns_open(struct inode *inode, struct file *filp) { return single_open(filp, timerslack_ns_show, inode); } static const struct file_operations proc_pid_set_timerslack_ns_operations = { .open = timerslack_ns_open, .read = seq_read, .write = timerslack_ns_write, .llseek = seq_lseek, .release = single_release, }; static int proc_pident_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { const struct pid_entry *p = ptr; struct inode *inode; struct proc_inode *ei; inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) goto out; ei = PROC_I(inode); inode->i_mode = p->mode; if (S_ISDIR(inode->i_mode)) set_nlink(inode, 2); /* Use getattr to fix if necessary */ if (p->iop) inode->i_op = p->iop; if (p->fop) inode->i_fop = p->fop; ei->op = p->op; d_set_d_op(dentry, &pid_dentry_operations); d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (pid_revalidate(dentry, 0)) return 0; out: return -ENOENT; } static struct dentry *proc_pident_lookup(struct inode *dir, struct dentry *dentry, const struct pid_entry *ents, unsigned int nents) { int error; struct task_struct *task = get_proc_task(dir); const struct pid_entry *p, *last; error = -ENOENT; if (!task) goto out_no_task; /* * Yes, it does not scale. And it should not. Don't add * new entries into /proc/<tgid>/ without very good reasons. */ last = &ents[nents - 1]; for (p = ents; p <= last; p++) { if (p->len != dentry->d_name.len) continue; if (!memcmp(dentry->d_name.name, p->name, p->len)) break; } if (p > last) goto out; error = proc_pident_instantiate(dir, dentry, task, p); out: put_task_struct(task); out_no_task: return ERR_PTR(error); } static int proc_pident_readdir(struct file *file, struct dir_context *ctx, const struct pid_entry *ents, unsigned int nents) { struct task_struct *task = get_proc_task(file_inode(file)); const struct pid_entry *p; if (!task) return -ENOENT; if (!dir_emit_dots(file, ctx)) goto out; if (ctx->pos >= nents + 2) goto out; for (p = ents + (ctx->pos - 2); p <= ents + nents - 1; p++) { if (!proc_fill_cache(file, ctx, p->name, p->len, proc_pident_instantiate, task, p)) break; ctx->pos++; } out: put_task_struct(task); return 0; } #ifdef CONFIG_SECURITY static ssize_t proc_pid_attr_read(struct file * file, char __user * buf, size_t count, loff_t *ppos) { struct inode * inode = file_inode(file); char *p = NULL; ssize_t length; struct task_struct *task = get_proc_task(inode); if (!task) return -ESRCH; length = security_getprocattr(task, (char*)file->f_path.dentry->d_name.name, &p); put_task_struct(task); if (length > 0) length = simple_read_from_buffer(buf, count, ppos, p, length); kfree(p); return length; } static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf, size_t count, loff_t *ppos) { struct inode * inode = file_inode(file); void *page; ssize_t length; struct task_struct *task = get_proc_task(inode); length = -ESRCH; if (!task) goto out_no_task; if (count > PAGE_SIZE) count = PAGE_SIZE; /* No partial writes. */ length = -EINVAL; if (*ppos != 0) goto out; page = memdup_user(buf, count); if (IS_ERR(page)) { length = PTR_ERR(page); goto out; } /* Guard against adverse ptrace interaction */ length = mutex_lock_interruptible(&task->signal->cred_guard_mutex); if (length < 0) goto out_free; length = security_setprocattr(task, (char*)file->f_path.dentry->d_name.name, page, count); mutex_unlock(&task->signal->cred_guard_mutex); out_free: kfree(page); out: put_task_struct(task); out_no_task: return length; } static const struct file_operations proc_pid_attr_operations = { .read = proc_pid_attr_read, .write = proc_pid_attr_write, .llseek = generic_file_llseek, }; static const struct pid_entry attr_dir_stuff[] = { REG("current", S_IRUGO|S_IWUGO, proc_pid_attr_operations), REG("prev", S_IRUGO, proc_pid_attr_operations), REG("exec", S_IRUGO|S_IWUGO, proc_pid_attr_operations), REG("fscreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations), REG("keycreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations), REG("sockcreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations), }; static int proc_attr_dir_readdir(struct file *file, struct dir_context *ctx) { return proc_pident_readdir(file, ctx, attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff)); } static const struct file_operations proc_attr_dir_operations = { .read = generic_read_dir, .iterate = proc_attr_dir_readdir, .llseek = default_llseek, }; static struct dentry *proc_attr_dir_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { return proc_pident_lookup(dir, dentry, attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff)); } static const struct inode_operations proc_attr_dir_inode_operations = { .lookup = proc_attr_dir_lookup, .getattr = pid_getattr, .setattr = proc_setattr, }; #endif #ifdef CONFIG_ELF_CORE static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task = get_proc_task(file_inode(file)); struct mm_struct *mm; char buffer[PROC_NUMBUF]; size_t len; int ret; if (!task) return -ESRCH; ret = 0; mm = get_task_mm(task); if (mm) { len = snprintf(buffer, sizeof(buffer), "%08lx\n", ((mm->flags & MMF_DUMP_FILTER_MASK) >> MMF_DUMP_FILTER_SHIFT)); mmput(mm); ret = simple_read_from_buffer(buf, count, ppos, buffer, len); } put_task_struct(task); return ret; } static ssize_t proc_coredump_filter_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task; struct mm_struct *mm; unsigned int val; int ret; int i; unsigned long mask; ret = kstrtouint_from_user(buf, count, 0, &val); if (ret < 0) return ret; ret = -ESRCH; task = get_proc_task(file_inode(file)); if (!task) goto out_no_task; mm = get_task_mm(task); if (!mm) goto out_no_mm; ret = 0; for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) { if (val & mask) set_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags); else clear_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags); } mmput(mm); out_no_mm: put_task_struct(task); out_no_task: if (ret < 0) return ret; return count; } static const struct file_operations proc_coredump_filter_operations = { .read = proc_coredump_filter_read, .write = proc_coredump_filter_write, .llseek = generic_file_llseek, }; #endif #ifdef CONFIG_TASK_IO_ACCOUNTING static int do_io_accounting(struct task_struct *task, struct seq_file *m, int whole) { struct task_io_accounting acct = task->ioac; unsigned long flags; int result; result = mutex_lock_killable(&task->signal->cred_guard_mutex); if (result) return result; if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) { result = -EACCES; goto out_unlock; } if (whole && lock_task_sighand(task, &flags)) { struct task_struct *t = task; task_io_accounting_add(&acct, &task->signal->ioac); while_each_thread(task, t) task_io_accounting_add(&acct, &t->ioac); unlock_task_sighand(task, &flags); } seq_printf(m, "rchar: %llu\n" "wchar: %llu\n" "syscr: %llu\n" "syscw: %llu\n" "read_bytes: %llu\n" "write_bytes: %llu\n" "cancelled_write_bytes: %llu\n", (unsigned long long)acct.rchar, (unsigned long long)acct.wchar, (unsigned long long)acct.syscr, (unsigned long long)acct.syscw, (unsigned long long)acct.read_bytes, (unsigned long long)acct.write_bytes, (unsigned long long)acct.cancelled_write_bytes); result = 0; out_unlock: mutex_unlock(&task->signal->cred_guard_mutex); return result; } static int proc_tid_io_accounting(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { return do_io_accounting(task, m, 0); } static int proc_tgid_io_accounting(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { return do_io_accounting(task, m, 1); } #endif /* CONFIG_TASK_IO_ACCOUNTING */ #ifdef CONFIG_USER_NS static int proc_id_map_open(struct inode *inode, struct file *file, const struct seq_operations *seq_ops) { struct user_namespace *ns = NULL; struct task_struct *task; struct seq_file *seq; int ret = -EINVAL; task = get_proc_task(inode); if (task) { rcu_read_lock(); ns = get_user_ns(task_cred_xxx(task, user_ns)); rcu_read_unlock(); put_task_struct(task); } if (!ns) goto err; ret = seq_open(file, seq_ops); if (ret) goto err_put_ns; seq = file->private_data; seq->private = ns; return 0; err_put_ns: put_user_ns(ns); err: return ret; } static int proc_id_map_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; struct user_namespace *ns = seq->private; put_user_ns(ns); return seq_release(inode, file); } static int proc_uid_map_open(struct inode *inode, struct file *file) { return proc_id_map_open(inode, file, &proc_uid_seq_operations); } static int proc_gid_map_open(struct inode *inode, struct file *file) { return proc_id_map_open(inode, file, &proc_gid_seq_operations); } static int proc_projid_map_open(struct inode *inode, struct file *file) { return proc_id_map_open(inode, file, &proc_projid_seq_operations); } static const struct file_operations proc_uid_map_operations = { .open = proc_uid_map_open, .write = proc_uid_map_write, .read = seq_read, .llseek = seq_lseek, .release = proc_id_map_release, }; static const struct file_operations proc_gid_map_operations = { .open = proc_gid_map_open, .write = proc_gid_map_write, .read = seq_read, .llseek = seq_lseek, .release = proc_id_map_release, }; static const struct file_operations proc_projid_map_operations = { .open = proc_projid_map_open, .write = proc_projid_map_write, .read = seq_read, .llseek = seq_lseek, .release = proc_id_map_release, }; static int proc_setgroups_open(struct inode *inode, struct file *file) { struct user_namespace *ns = NULL; struct task_struct *task; int ret; ret = -ESRCH; task = get_proc_task(inode); if (task) { rcu_read_lock(); ns = get_user_ns(task_cred_xxx(task, user_ns)); rcu_read_unlock(); put_task_struct(task); } if (!ns) goto err; if (file->f_mode & FMODE_WRITE) { ret = -EACCES; if (!ns_capable(ns, CAP_SYS_ADMIN)) goto err_put_ns; } ret = single_open(file, &proc_setgroups_show, ns); if (ret) goto err_put_ns; return 0; err_put_ns: put_user_ns(ns); err: return ret; } static int proc_setgroups_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; struct user_namespace *ns = seq->private; int ret = single_release(inode, file); put_user_ns(ns); return ret; } static const struct file_operations proc_setgroups_operations = { .open = proc_setgroups_open, .write = proc_setgroups_write, .read = seq_read, .llseek = seq_lseek, .release = proc_setgroups_release, }; #endif /* CONFIG_USER_NS */ static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { int err = lock_trace(task); if (!err) { seq_printf(m, "%08x\n", task->personality); unlock_trace(task); } return err; } /* * Thread groups */ static const struct file_operations proc_task_operations; static const struct inode_operations proc_task_inode_operations; static const struct pid_entry tgid_base_stuff[] = { DIR("task", S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations), DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), DIR("map_files", S_IRUSR|S_IXUSR, proc_map_files_inode_operations, proc_map_files_operations), DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations), DIR("ns", S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations), #ifdef CONFIG_NET DIR("net", S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations), #endif REG("environ", S_IRUSR, proc_environ_operations), ONE("auxv", S_IRUSR, proc_pid_auxv), ONE("status", S_IRUGO, proc_pid_status), ONE("personality", S_IRUSR, proc_pid_personality), ONE("limits", S_IRUGO, proc_pid_limits), #ifdef CONFIG_SCHED_DEBUG REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), #endif #ifdef CONFIG_SCHED_AUTOGROUP REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations), #endif REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), #ifdef CONFIG_HAVE_ARCH_TRACEHOOK ONE("syscall", S_IRUSR, proc_pid_syscall), #endif REG("cmdline", S_IRUGO, proc_pid_cmdline_ops), ONE("stat", S_IRUGO, proc_tgid_stat), ONE("statm", S_IRUGO, proc_pid_statm), REG("maps", S_IRUGO, proc_pid_maps_operations), #ifdef CONFIG_NUMA REG("numa_maps", S_IRUGO, proc_pid_numa_maps_operations), #endif REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations), LNK("cwd", proc_cwd_link), LNK("root", proc_root_link), LNK("exe", proc_exe_link), REG("mounts", S_IRUGO, proc_mounts_operations), REG("mountinfo", S_IRUGO, proc_mountinfo_operations), REG("mountstats", S_IRUSR, proc_mountstats_operations), #ifdef CONFIG_PROC_PAGE_MONITOR REG("clear_refs", S_IWUSR, proc_clear_refs_operations), REG("smaps", S_IRUGO, proc_pid_smaps_operations), REG("pagemap", S_IRUSR, proc_pagemap_operations), #endif #ifdef CONFIG_SECURITY DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), #endif #ifdef CONFIG_KALLSYMS ONE("wchan", S_IRUGO, proc_pid_wchan), #endif #ifdef CONFIG_STACKTRACE ONE("stack", S_IRUSR, proc_pid_stack), #endif #ifdef CONFIG_SCHED_INFO ONE("schedstat", S_IRUGO, proc_pid_schedstat), #endif #ifdef CONFIG_LATENCYTOP REG("latency", S_IRUGO, proc_lstats_operations), #endif #ifdef CONFIG_PROC_PID_CPUSET ONE("cpuset", S_IRUGO, proc_cpuset_show), #endif #ifdef CONFIG_CGROUPS ONE("cgroup", S_IRUGO, proc_cgroup_show), #endif ONE("oom_score", S_IRUGO, proc_oom_score), REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations), REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), #ifdef CONFIG_AUDITSYSCALL REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), REG("sessionid", S_IRUGO, proc_sessionid_operations), #endif #ifdef CONFIG_FAULT_INJECTION REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations), #endif #ifdef CONFIG_ELF_CORE REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations), #endif #ifdef CONFIG_TASK_IO_ACCOUNTING ONE("io", S_IRUSR, proc_tgid_io_accounting), #endif #ifdef CONFIG_HARDWALL ONE("hardwall", S_IRUGO, proc_pid_hardwall), #endif #ifdef CONFIG_USER_NS REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations), REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations), REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations), #endif #ifdef CONFIG_CHECKPOINT_RESTORE REG("timers", S_IRUGO, proc_timers_operations), #endif REG("timerslack_ns", S_IRUGO|S_IWUGO, proc_pid_set_timerslack_ns_operations), }; static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) { return proc_pident_readdir(file, ctx, tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff)); } static const struct file_operations proc_tgid_base_operations = { .read = generic_read_dir, .iterate = proc_tgid_base_readdir, .llseek = default_llseek, }; static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { return proc_pident_lookup(dir, dentry, tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff)); } static const struct inode_operations proc_tgid_base_inode_operations = { .lookup = proc_tgid_base_lookup, .getattr = pid_getattr, .setattr = proc_setattr, .permission = proc_pid_permission, }; static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid) { struct dentry *dentry, *leader, *dir; char buf[PROC_NUMBUF]; struct qstr name; name.name = buf; name.len = snprintf(buf, sizeof(buf), "%d", pid); /* no ->d_hash() rejects on procfs */ dentry = d_hash_and_lookup(mnt->mnt_root, &name); if (dentry) { d_invalidate(dentry); dput(dentry); } if (pid == tgid) return; name.name = buf; name.len = snprintf(buf, sizeof(buf), "%d", tgid); leader = d_hash_and_lookup(mnt->mnt_root, &name); if (!leader) goto out; name.name = "task"; name.len = strlen(name.name); dir = d_hash_and_lookup(leader, &name); if (!dir) goto out_put_leader; name.name = buf; name.len = snprintf(buf, sizeof(buf), "%d", pid); dentry = d_hash_and_lookup(dir, &name); if (dentry) { d_invalidate(dentry); dput(dentry); } dput(dir); out_put_leader: dput(leader); out: return; } /** * proc_flush_task - Remove dcache entries for @task from the /proc dcache. * @task: task that should be flushed. * * When flushing dentries from proc, one needs to flush them from global * proc (proc_mnt) and from all the namespaces' procs this task was seen * in. This call is supposed to do all of this job. * * Looks in the dcache for * /proc/@pid * /proc/@tgid/task/@pid * if either directory is present flushes it and all of it'ts children * from the dcache. * * It is safe and reasonable to cache /proc entries for a task until * that task exits. After that they just clog up the dcache with * useless entries, possibly causing useful dcache entries to be * flushed instead. This routine is proved to flush those useless * dcache entries at process exit time. * * NOTE: This routine is just an optimization so it does not guarantee * that no dcache entries will exist at process exit time it * just makes it very unlikely that any will persist. */ void proc_flush_task(struct task_struct *task) { int i; struct pid *pid, *tgid; struct upid *upid; pid = task_pid(task); tgid = task_tgid(task); for (i = 0; i <= pid->level; i++) { upid = &pid->numbers[i]; proc_flush_task_mnt(upid->ns->proc_mnt, upid->nr, tgid->numbers[i].nr); } } static int proc_pid_instantiate(struct inode *dir, struct dentry * dentry, struct task_struct *task, const void *ptr) { struct inode *inode; inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) goto out; inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; inode->i_op = &proc_tgid_base_inode_operations; inode->i_fop = &proc_tgid_base_operations; inode->i_flags|=S_IMMUTABLE; set_nlink(inode, 2 + pid_entry_count_dirs(tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff))); d_set_d_op(dentry, &pid_dentry_operations); d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (pid_revalidate(dentry, 0)) return 0; out: return -ENOENT; } struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags) { int result = -ENOENT; struct task_struct *task; unsigned tgid; struct pid_namespace *ns; tgid = name_to_int(&dentry->d_name); if (tgid == ~0U) goto out; ns = dentry->d_sb->s_fs_info; rcu_read_lock(); task = find_task_by_pid_ns(tgid, ns); if (task) get_task_struct(task); rcu_read_unlock(); if (!task) goto out; result = proc_pid_instantiate(dir, dentry, task, NULL); put_task_struct(task); out: return ERR_PTR(result); } /* * Find the first task with tgid >= tgid * */ struct tgid_iter { unsigned int tgid; struct task_struct *task; }; static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter iter) { struct pid *pid; if (iter.task) put_task_struct(iter.task); rcu_read_lock(); retry: iter.task = NULL; pid = find_ge_pid(iter.tgid, ns); if (pid) { iter.tgid = pid_nr_ns(pid, ns); iter.task = pid_task(pid, PIDTYPE_PID); /* What we to know is if the pid we have find is the * pid of a thread_group_leader. Testing for task * being a thread_group_leader is the obvious thing * todo but there is a window when it fails, due to * the pid transfer logic in de_thread. * * So we perform the straight forward test of seeing * if the pid we have found is the pid of a thread * group leader, and don't worry if the task we have * found doesn't happen to be a thread group leader. * As we don't care in the case of readdir. */ if (!iter.task || !has_group_leader_pid(iter.task)) { iter.tgid += 1; goto retry; } get_task_struct(iter.task); } rcu_read_unlock(); return iter; } #define TGID_OFFSET (FIRST_PROCESS_ENTRY + 2) /* for the /proc/ directory itself, after non-process stuff has been done */ int proc_pid_readdir(struct file *file, struct dir_context *ctx) { struct tgid_iter iter; struct pid_namespace *ns = file_inode(file)->i_sb->s_fs_info; loff_t pos = ctx->pos; if (pos >= PID_MAX_LIMIT + TGID_OFFSET) return 0; if (pos == TGID_OFFSET - 2) { struct inode *inode = d_inode(ns->proc_self); if (!dir_emit(ctx, "self", 4, inode->i_ino, DT_LNK)) return 0; ctx->pos = pos = pos + 1; } if (pos == TGID_OFFSET - 1) { struct inode *inode = d_inode(ns->proc_thread_self); if (!dir_emit(ctx, "thread-self", 11, inode->i_ino, DT_LNK)) return 0; ctx->pos = pos = pos + 1; } iter.tgid = pos - TGID_OFFSET; iter.task = NULL; for (iter = next_tgid(ns, iter); iter.task; iter.tgid += 1, iter = next_tgid(ns, iter)) { char name[PROC_NUMBUF]; int len; if (!has_pid_permissions(ns, iter.task, 2)) continue; len = snprintf(name, sizeof(name), "%d", iter.tgid); ctx->pos = iter.tgid + TGID_OFFSET; if (!proc_fill_cache(file, ctx, name, len, proc_pid_instantiate, iter.task, NULL)) { put_task_struct(iter.task); return 0; } } ctx->pos = PID_MAX_LIMIT + TGID_OFFSET; return 0; } /* * Tasks */ static const struct pid_entry tid_base_stuff[] = { DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations), DIR("ns", S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations), #ifdef CONFIG_NET DIR("net", S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations), #endif REG("environ", S_IRUSR, proc_environ_operations), ONE("auxv", S_IRUSR, proc_pid_auxv), ONE("status", S_IRUGO, proc_pid_status), ONE("personality", S_IRUSR, proc_pid_personality), ONE("limits", S_IRUGO, proc_pid_limits), #ifdef CONFIG_SCHED_DEBUG REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), #endif REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), #ifdef CONFIG_HAVE_ARCH_TRACEHOOK ONE("syscall", S_IRUSR, proc_pid_syscall), #endif REG("cmdline", S_IRUGO, proc_pid_cmdline_ops), ONE("stat", S_IRUGO, proc_tid_stat), ONE("statm", S_IRUGO, proc_pid_statm), REG("maps", S_IRUGO, proc_tid_maps_operations), #ifdef CONFIG_PROC_CHILDREN REG("children", S_IRUGO, proc_tid_children_operations), #endif #ifdef CONFIG_NUMA REG("numa_maps", S_IRUGO, proc_tid_numa_maps_operations), #endif REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations), LNK("cwd", proc_cwd_link), LNK("root", proc_root_link), LNK("exe", proc_exe_link), REG("mounts", S_IRUGO, proc_mounts_operations), REG("mountinfo", S_IRUGO, proc_mountinfo_operations), #ifdef CONFIG_PROC_PAGE_MONITOR REG("clear_refs", S_IWUSR, proc_clear_refs_operations), REG("smaps", S_IRUGO, proc_tid_smaps_operations), REG("pagemap", S_IRUSR, proc_pagemap_operations), #endif #ifdef CONFIG_SECURITY DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), #endif #ifdef CONFIG_KALLSYMS ONE("wchan", S_IRUGO, proc_pid_wchan), #endif #ifdef CONFIG_STACKTRACE ONE("stack", S_IRUSR, proc_pid_stack), #endif #ifdef CONFIG_SCHED_INFO ONE("schedstat", S_IRUGO, proc_pid_schedstat), #endif #ifdef CONFIG_LATENCYTOP REG("latency", S_IRUGO, proc_lstats_operations), #endif #ifdef CONFIG_PROC_PID_CPUSET ONE("cpuset", S_IRUGO, proc_cpuset_show), #endif #ifdef CONFIG_CGROUPS ONE("cgroup", S_IRUGO, proc_cgroup_show), #endif ONE("oom_score", S_IRUGO, proc_oom_score), REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations), REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), #ifdef CONFIG_AUDITSYSCALL REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), REG("sessionid", S_IRUGO, proc_sessionid_operations), #endif #ifdef CONFIG_FAULT_INJECTION REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations), #endif #ifdef CONFIG_TASK_IO_ACCOUNTING ONE("io", S_IRUSR, proc_tid_io_accounting), #endif #ifdef CONFIG_HARDWALL ONE("hardwall", S_IRUGO, proc_pid_hardwall), #endif #ifdef CONFIG_USER_NS REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations), REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations), REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations), #endif }; static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx) { return proc_pident_readdir(file, ctx, tid_base_stuff, ARRAY_SIZE(tid_base_stuff)); } static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { return proc_pident_lookup(dir, dentry, tid_base_stuff, ARRAY_SIZE(tid_base_stuff)); } static const struct file_operations proc_tid_base_operations = { .read = generic_read_dir, .iterate = proc_tid_base_readdir, .llseek = default_llseek, }; static const struct inode_operations proc_tid_base_inode_operations = { .lookup = proc_tid_base_lookup, .getattr = pid_getattr, .setattr = proc_setattr, }; static int proc_task_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { struct inode *inode; inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) goto out; inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; inode->i_op = &proc_tid_base_inode_operations; inode->i_fop = &proc_tid_base_operations; inode->i_flags|=S_IMMUTABLE; set_nlink(inode, 2 + pid_entry_count_dirs(tid_base_stuff, ARRAY_SIZE(tid_base_stuff))); d_set_d_op(dentry, &pid_dentry_operations); d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (pid_revalidate(dentry, 0)) return 0; out: return -ENOENT; } static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags) { int result = -ENOENT; struct task_struct *task; struct task_struct *leader = get_proc_task(dir); unsigned tid; struct pid_namespace *ns; if (!leader) goto out_no_task; tid = name_to_int(&dentry->d_name); if (tid == ~0U) goto out; ns = dentry->d_sb->s_fs_info; rcu_read_lock(); task = find_task_by_pid_ns(tid, ns); if (task) get_task_struct(task); rcu_read_unlock(); if (!task) goto out; if (!same_thread_group(leader, task)) goto out_drop_task; result = proc_task_instantiate(dir, dentry, task, NULL); out_drop_task: put_task_struct(task); out: put_task_struct(leader); out_no_task: return ERR_PTR(result); } /* * Find the first tid of a thread group to return to user space. * * Usually this is just the thread group leader, but if the users * buffer was too small or there was a seek into the middle of the * directory we have more work todo. * * In the case of a short read we start with find_task_by_pid. * * In the case of a seek we start with the leader and walk nr * threads past it. */ static struct task_struct *first_tid(struct pid *pid, int tid, loff_t f_pos, struct pid_namespace *ns) { struct task_struct *pos, *task; unsigned long nr = f_pos; if (nr != f_pos) /* 32bit overflow? */ return NULL; rcu_read_lock(); task = pid_task(pid, PIDTYPE_PID); if (!task) goto fail; /* Attempt to start with the tid of a thread */ if (tid && nr) { pos = find_task_by_pid_ns(tid, ns); if (pos && same_thread_group(pos, task)) goto found; } /* If nr exceeds the number of threads there is nothing todo */ if (nr >= get_nr_threads(task)) goto fail; /* If we haven't found our starting place yet start * with the leader and walk nr threads forward. */ pos = task = task->group_leader; do { if (!nr--) goto found; } while_each_thread(task, pos); fail: pos = NULL; goto out; found: get_task_struct(pos); out: rcu_read_unlock(); return pos; } /* * Find the next thread in the thread list. * Return NULL if there is an error or no next thread. * * The reference to the input task_struct is released. */ static struct task_struct *next_tid(struct task_struct *start) { struct task_struct *pos = NULL; rcu_read_lock(); if (pid_alive(start)) { pos = next_thread(start); if (thread_group_leader(pos)) pos = NULL; else get_task_struct(pos); } rcu_read_unlock(); put_task_struct(start); return pos; } /* for the /proc/TGID/task/ directories */ static int proc_task_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); struct task_struct *task; struct pid_namespace *ns; int tid; if (proc_inode_is_dead(inode)) return -ENOENT; if (!dir_emit_dots(file, ctx)) return 0; /* f_version caches the tgid value that the last readdir call couldn't * return. lseek aka telldir automagically resets f_version to 0. */ ns = inode->i_sb->s_fs_info; tid = (int)file->f_version; file->f_version = 0; for (task = first_tid(proc_pid(inode), tid, ctx->pos - 2, ns); task; task = next_tid(task), ctx->pos++) { char name[PROC_NUMBUF]; int len; tid = task_pid_nr_ns(task, ns); len = snprintf(name, sizeof(name), "%d", tid); if (!proc_fill_cache(file, ctx, name, len, proc_task_instantiate, task, NULL)) { /* returning this tgid failed, save it as the first * pid for the next readir call */ file->f_version = (u64)tid; put_task_struct(task); break; } } return 0; } static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = d_inode(dentry); struct task_struct *p = get_proc_task(inode); generic_fillattr(inode, stat); if (p) { stat->nlink += get_nr_threads(p); put_task_struct(p); } return 0; } static const struct inode_operations proc_task_inode_operations = { .lookup = proc_task_lookup, .getattr = proc_task_getattr, .setattr = proc_setattr, .permission = proc_pid_permission, }; static const struct file_operations proc_task_operations = { .read = generic_read_dir, .iterate = proc_task_readdir, .llseek = default_llseek, };
./CrossVul/dataset_final_sorted/CWE-362/c/good_5339_0
crossvul-cpp_data_bad_3460_0
/** * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 1997-2003 Erez Zadok * Copyright (C) 2001-2003 Stony Brook University * Copyright (C) 2004-2007 International Business Machines Corp. * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com> * Michael C. Thompson <mcthomps@us.ibm.com> * Tyler Hicks <tyhicks@ou.edu> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <linux/dcache.h> #include <linux/file.h> #include <linux/module.h> #include <linux/namei.h> #include <linux/skbuff.h> #include <linux/crypto.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/key.h> #include <linux/parser.h> #include <linux/fs_stack.h> #include <linux/slab.h> #include <linux/magic.h> #include "ecryptfs_kernel.h" /** * Module parameter that defines the ecryptfs_verbosity level. */ int ecryptfs_verbosity = 0; module_param(ecryptfs_verbosity, int, 0); MODULE_PARM_DESC(ecryptfs_verbosity, "Initial verbosity level (0 or 1; defaults to " "0, which is Quiet)"); /** * Module parameter that defines the number of message buffer elements */ unsigned int ecryptfs_message_buf_len = ECRYPTFS_DEFAULT_MSG_CTX_ELEMS; module_param(ecryptfs_message_buf_len, uint, 0); MODULE_PARM_DESC(ecryptfs_message_buf_len, "Number of message buffer elements"); /** * Module parameter that defines the maximum guaranteed amount of time to wait * for a response from ecryptfsd. The actual sleep time will be, more than * likely, a small amount greater than this specified value, but only less if * the message successfully arrives. */ signed long ecryptfs_message_wait_timeout = ECRYPTFS_MAX_MSG_CTX_TTL / HZ; module_param(ecryptfs_message_wait_timeout, long, 0); MODULE_PARM_DESC(ecryptfs_message_wait_timeout, "Maximum number of seconds that an operation will " "sleep while waiting for a message response from " "userspace"); /** * Module parameter that is an estimate of the maximum number of users * that will be concurrently using eCryptfs. Set this to the right * value to balance performance and memory use. */ unsigned int ecryptfs_number_of_users = ECRYPTFS_DEFAULT_NUM_USERS; module_param(ecryptfs_number_of_users, uint, 0); MODULE_PARM_DESC(ecryptfs_number_of_users, "An estimate of the number of " "concurrent users of eCryptfs"); void __ecryptfs_printk(const char *fmt, ...) { va_list args; va_start(args, fmt); if (fmt[1] == '7') { /* KERN_DEBUG */ if (ecryptfs_verbosity >= 1) vprintk(fmt, args); } else vprintk(fmt, args); va_end(args); } /** * ecryptfs_init_lower_file * @ecryptfs_dentry: Fully initialized eCryptfs dentry object, with * the lower dentry and the lower mount set * * eCryptfs only ever keeps a single open file for every lower * inode. All I/O operations to the lower inode occur through that * file. When the first eCryptfs dentry that interposes with the first * lower dentry for that inode is created, this function creates the * lower file struct and associates it with the eCryptfs * inode. When all eCryptfs files associated with the inode are released, the * file is closed. * * The lower file will be opened with read/write permissions, if * possible. Otherwise, it is opened read-only. * * This function does nothing if a lower file is already * associated with the eCryptfs inode. * * Returns zero on success; non-zero otherwise */ static int ecryptfs_init_lower_file(struct dentry *dentry, struct file **lower_file) { const struct cred *cred = current_cred(); struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); int rc; rc = ecryptfs_privileged_open(lower_file, lower_dentry, lower_mnt, cred); if (rc) { printk(KERN_ERR "Error opening lower file " "for lower_dentry [0x%p] and lower_mnt [0x%p]; " "rc = [%d]\n", lower_dentry, lower_mnt, rc); (*lower_file) = NULL; } return rc; } int ecryptfs_get_lower_file(struct dentry *dentry, struct inode *inode) { struct ecryptfs_inode_info *inode_info; int count, rc = 0; inode_info = ecryptfs_inode_to_private(inode); mutex_lock(&inode_info->lower_file_mutex); count = atomic_inc_return(&inode_info->lower_file_count); if (WARN_ON_ONCE(count < 1)) rc = -EINVAL; else if (count == 1) { rc = ecryptfs_init_lower_file(dentry, &inode_info->lower_file); if (rc) atomic_set(&inode_info->lower_file_count, 0); } mutex_unlock(&inode_info->lower_file_mutex); return rc; } void ecryptfs_put_lower_file(struct inode *inode) { struct ecryptfs_inode_info *inode_info; inode_info = ecryptfs_inode_to_private(inode); if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count, &inode_info->lower_file_mutex)) { fput(inode_info->lower_file); inode_info->lower_file = NULL; mutex_unlock(&inode_info->lower_file_mutex); } } enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig, ecryptfs_opt_cipher, ecryptfs_opt_ecryptfs_cipher, ecryptfs_opt_ecryptfs_key_bytes, ecryptfs_opt_passthrough, ecryptfs_opt_xattr_metadata, ecryptfs_opt_encrypted_view, ecryptfs_opt_fnek_sig, ecryptfs_opt_fn_cipher, ecryptfs_opt_fn_cipher_key_bytes, ecryptfs_opt_unlink_sigs, ecryptfs_opt_mount_auth_tok_only, ecryptfs_opt_err }; static const match_table_t tokens = { {ecryptfs_opt_sig, "sig=%s"}, {ecryptfs_opt_ecryptfs_sig, "ecryptfs_sig=%s"}, {ecryptfs_opt_cipher, "cipher=%s"}, {ecryptfs_opt_ecryptfs_cipher, "ecryptfs_cipher=%s"}, {ecryptfs_opt_ecryptfs_key_bytes, "ecryptfs_key_bytes=%u"}, {ecryptfs_opt_passthrough, "ecryptfs_passthrough"}, {ecryptfs_opt_xattr_metadata, "ecryptfs_xattr_metadata"}, {ecryptfs_opt_encrypted_view, "ecryptfs_encrypted_view"}, {ecryptfs_opt_fnek_sig, "ecryptfs_fnek_sig=%s"}, {ecryptfs_opt_fn_cipher, "ecryptfs_fn_cipher=%s"}, {ecryptfs_opt_fn_cipher_key_bytes, "ecryptfs_fn_key_bytes=%u"}, {ecryptfs_opt_unlink_sigs, "ecryptfs_unlink_sigs"}, {ecryptfs_opt_mount_auth_tok_only, "ecryptfs_mount_auth_tok_only"}, {ecryptfs_opt_err, NULL} }; static int ecryptfs_init_global_auth_toks( struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { struct ecryptfs_global_auth_tok *global_auth_tok; struct ecryptfs_auth_tok *auth_tok; int rc = 0; list_for_each_entry(global_auth_tok, &mount_crypt_stat->global_auth_tok_list, mount_crypt_stat_list) { rc = ecryptfs_keyring_auth_tok_for_sig( &global_auth_tok->global_auth_tok_key, &auth_tok, global_auth_tok->sig); if (rc) { printk(KERN_ERR "Could not find valid key in user " "session keyring for sig specified in mount " "option: [%s]\n", global_auth_tok->sig); global_auth_tok->flags |= ECRYPTFS_AUTH_TOK_INVALID; goto out; } else { global_auth_tok->flags &= ~ECRYPTFS_AUTH_TOK_INVALID; up_write(&(global_auth_tok->global_auth_tok_key)->sem); } } out: return rc; } static void ecryptfs_init_mount_crypt_stat( struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { memset((void *)mount_crypt_stat, 0, sizeof(struct ecryptfs_mount_crypt_stat)); INIT_LIST_HEAD(&mount_crypt_stat->global_auth_tok_list); mutex_init(&mount_crypt_stat->global_auth_tok_list_mutex); mount_crypt_stat->flags |= ECRYPTFS_MOUNT_CRYPT_STAT_INITIALIZED; } /** * ecryptfs_parse_options * @sb: The ecryptfs super block * @options: The options passed to the kernel * * Parse mount options: * debug=N - ecryptfs_verbosity level for debug output * sig=XXX - description(signature) of the key to use * * Returns the dentry object of the lower-level (lower/interposed) * directory; We want to mount our stackable file system on top of * that lower directory. * * The signature of the key to use must be the description of a key * already in the keyring. Mounting will fail if the key can not be * found. * * Returns zero on success; non-zero on error */ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options) { char *p; int rc = 0; int sig_set = 0; int cipher_name_set = 0; int fn_cipher_name_set = 0; int cipher_key_bytes; int cipher_key_bytes_set = 0; int fn_cipher_key_bytes; int fn_cipher_key_bytes_set = 0; struct ecryptfs_mount_crypt_stat *mount_crypt_stat = &sbi->mount_crypt_stat; substring_t args[MAX_OPT_ARGS]; int token; char *sig_src; char *cipher_name_dst; char *cipher_name_src; char *fn_cipher_name_dst; char *fn_cipher_name_src; char *fnek_dst; char *fnek_src; char *cipher_key_bytes_src; char *fn_cipher_key_bytes_src; if (!options) { rc = -EINVAL; goto out; } ecryptfs_init_mount_crypt_stat(mount_crypt_stat); while ((p = strsep(&options, ",")) != NULL) { if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case ecryptfs_opt_sig: case ecryptfs_opt_ecryptfs_sig: sig_src = args[0].from; rc = ecryptfs_add_global_auth_tok(mount_crypt_stat, sig_src, 0); if (rc) { printk(KERN_ERR "Error attempting to register " "global sig; rc = [%d]\n", rc); goto out; } sig_set = 1; break; case ecryptfs_opt_cipher: case ecryptfs_opt_ecryptfs_cipher: cipher_name_src = args[0].from; cipher_name_dst = mount_crypt_stat-> global_default_cipher_name; strncpy(cipher_name_dst, cipher_name_src, ECRYPTFS_MAX_CIPHER_NAME_SIZE); cipher_name_dst[ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0'; cipher_name_set = 1; break; case ecryptfs_opt_ecryptfs_key_bytes: cipher_key_bytes_src = args[0].from; cipher_key_bytes = (int)simple_strtol(cipher_key_bytes_src, &cipher_key_bytes_src, 0); mount_crypt_stat->global_default_cipher_key_size = cipher_key_bytes; cipher_key_bytes_set = 1; break; case ecryptfs_opt_passthrough: mount_crypt_stat->flags |= ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED; break; case ecryptfs_opt_xattr_metadata: mount_crypt_stat->flags |= ECRYPTFS_XATTR_METADATA_ENABLED; break; case ecryptfs_opt_encrypted_view: mount_crypt_stat->flags |= ECRYPTFS_XATTR_METADATA_ENABLED; mount_crypt_stat->flags |= ECRYPTFS_ENCRYPTED_VIEW_ENABLED; break; case ecryptfs_opt_fnek_sig: fnek_src = args[0].from; fnek_dst = mount_crypt_stat->global_default_fnek_sig; strncpy(fnek_dst, fnek_src, ECRYPTFS_SIG_SIZE_HEX); mount_crypt_stat->global_default_fnek_sig[ ECRYPTFS_SIG_SIZE_HEX] = '\0'; rc = ecryptfs_add_global_auth_tok( mount_crypt_stat, mount_crypt_stat->global_default_fnek_sig, ECRYPTFS_AUTH_TOK_FNEK); if (rc) { printk(KERN_ERR "Error attempting to register " "global fnek sig [%s]; rc = [%d]\n", mount_crypt_stat->global_default_fnek_sig, rc); goto out; } mount_crypt_stat->flags |= (ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES | ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK); break; case ecryptfs_opt_fn_cipher: fn_cipher_name_src = args[0].from; fn_cipher_name_dst = mount_crypt_stat->global_default_fn_cipher_name; strncpy(fn_cipher_name_dst, fn_cipher_name_src, ECRYPTFS_MAX_CIPHER_NAME_SIZE); mount_crypt_stat->global_default_fn_cipher_name[ ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0'; fn_cipher_name_set = 1; break; case ecryptfs_opt_fn_cipher_key_bytes: fn_cipher_key_bytes_src = args[0].from; fn_cipher_key_bytes = (int)simple_strtol(fn_cipher_key_bytes_src, &fn_cipher_key_bytes_src, 0); mount_crypt_stat->global_default_fn_cipher_key_bytes = fn_cipher_key_bytes; fn_cipher_key_bytes_set = 1; break; case ecryptfs_opt_unlink_sigs: mount_crypt_stat->flags |= ECRYPTFS_UNLINK_SIGS; break; case ecryptfs_opt_mount_auth_tok_only: mount_crypt_stat->flags |= ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY; break; case ecryptfs_opt_err: default: printk(KERN_WARNING "%s: eCryptfs: unrecognized option [%s]\n", __func__, p); } } if (!sig_set) { rc = -EINVAL; ecryptfs_printk(KERN_ERR, "You must supply at least one valid " "auth tok signature as a mount " "parameter; see the eCryptfs README\n"); goto out; } if (!cipher_name_set) { int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER); BUG_ON(cipher_name_len >= ECRYPTFS_MAX_CIPHER_NAME_SIZE); strcpy(mount_crypt_stat->global_default_cipher_name, ECRYPTFS_DEFAULT_CIPHER); } if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !fn_cipher_name_set) strcpy(mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_cipher_name); if (!cipher_key_bytes_set) mount_crypt_stat->global_default_cipher_key_size = 0; if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !fn_cipher_key_bytes_set) mount_crypt_stat->global_default_fn_cipher_key_bytes = mount_crypt_stat->global_default_cipher_key_size; mutex_lock(&key_tfm_list_mutex); if (!ecryptfs_tfm_exists(mount_crypt_stat->global_default_cipher_name, NULL)) { rc = ecryptfs_add_new_key_tfm( NULL, mount_crypt_stat->global_default_cipher_name, mount_crypt_stat->global_default_cipher_key_size); if (rc) { printk(KERN_ERR "Error attempting to initialize " "cipher with name = [%s] and key size = [%td]; " "rc = [%d]\n", mount_crypt_stat->global_default_cipher_name, mount_crypt_stat->global_default_cipher_key_size, rc); rc = -EINVAL; mutex_unlock(&key_tfm_list_mutex); goto out; } } if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !ecryptfs_tfm_exists( mount_crypt_stat->global_default_fn_cipher_name, NULL)) { rc = ecryptfs_add_new_key_tfm( NULL, mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_fn_cipher_key_bytes); if (rc) { printk(KERN_ERR "Error attempting to initialize " "cipher with name = [%s] and key size = [%td]; " "rc = [%d]\n", mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_fn_cipher_key_bytes, rc); rc = -EINVAL; mutex_unlock(&key_tfm_list_mutex); goto out; } } mutex_unlock(&key_tfm_list_mutex); rc = ecryptfs_init_global_auth_toks(mount_crypt_stat); if (rc) printk(KERN_WARNING "One or more global auth toks could not " "properly register; rc = [%d]\n", rc); out: return rc; } struct kmem_cache *ecryptfs_sb_info_cache; static struct file_system_type ecryptfs_fs_type; /** * ecryptfs_get_sb * @fs_type * @flags * @dev_name: The path to mount over * @raw_data: The options passed into the kernel */ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data) { struct super_block *s; struct ecryptfs_sb_info *sbi; struct ecryptfs_dentry_info *root_info; const char *err = "Getting sb failed"; struct inode *inode; struct path path; int rc; sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL); if (!sbi) { rc = -ENOMEM; goto out; } rc = ecryptfs_parse_options(sbi, raw_data); if (rc) { err = "Error parsing options"; goto out; } s = sget(fs_type, NULL, set_anon_super, NULL); if (IS_ERR(s)) { rc = PTR_ERR(s); goto out; } s->s_flags = flags; rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY); if (rc) goto out1; ecryptfs_set_superblock_private(s, sbi); s->s_bdi = &sbi->bdi; /* ->kill_sb() will take care of sbi after that point */ sbi = NULL; s->s_op = &ecryptfs_sops; s->s_d_op = &ecryptfs_dops; err = "Reading sb failed"; rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path); if (rc) { ecryptfs_printk(KERN_WARNING, "kern_path() failed\n"); goto out1; } if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) { rc = -EINVAL; printk(KERN_ERR "Mount on filesystem of type " "eCryptfs explicitly disallowed due to " "known incompatibilities\n"); goto out_free; } ecryptfs_set_superblock_lower(s, path.dentry->d_sb); s->s_maxbytes = path.dentry->d_sb->s_maxbytes; s->s_blocksize = path.dentry->d_sb->s_blocksize; s->s_magic = ECRYPTFS_SUPER_MAGIC; inode = ecryptfs_get_inode(path.dentry->d_inode, s); rc = PTR_ERR(inode); if (IS_ERR(inode)) goto out_free; s->s_root = d_alloc_root(inode); if (!s->s_root) { iput(inode); rc = -ENOMEM; goto out_free; } rc = -ENOMEM; root_info = kmem_cache_zalloc(ecryptfs_dentry_info_cache, GFP_KERNEL); if (!root_info) goto out_free; /* ->kill_sb() will take care of root_info */ ecryptfs_set_dentry_private(s->s_root, root_info); ecryptfs_set_dentry_lower(s->s_root, path.dentry); ecryptfs_set_dentry_lower_mnt(s->s_root, path.mnt); s->s_flags |= MS_ACTIVE; return dget(s->s_root); out_free: path_put(&path); out1: deactivate_locked_super(s); out: if (sbi) { ecryptfs_destroy_mount_crypt_stat(&sbi->mount_crypt_stat); kmem_cache_free(ecryptfs_sb_info_cache, sbi); } printk(KERN_ERR "%s; rc = [%d]\n", err, rc); return ERR_PTR(rc); } /** * ecryptfs_kill_block_super * @sb: The ecryptfs super block * * Used to bring the superblock down and free the private data. */ static void ecryptfs_kill_block_super(struct super_block *sb) { struct ecryptfs_sb_info *sb_info = ecryptfs_superblock_to_private(sb); kill_anon_super(sb); if (!sb_info) return; ecryptfs_destroy_mount_crypt_stat(&sb_info->mount_crypt_stat); bdi_destroy(&sb_info->bdi); kmem_cache_free(ecryptfs_sb_info_cache, sb_info); } static struct file_system_type ecryptfs_fs_type = { .owner = THIS_MODULE, .name = "ecryptfs", .mount = ecryptfs_mount, .kill_sb = ecryptfs_kill_block_super, .fs_flags = 0 }; /** * inode_info_init_once * * Initializes the ecryptfs_inode_info_cache when it is created */ static void inode_info_init_once(void *vptr) { struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr; inode_init_once(&ei->vfs_inode); } static struct ecryptfs_cache_info { struct kmem_cache **cache; const char *name; size_t size; void (*ctor)(void *obj); } ecryptfs_cache_infos[] = { { .cache = &ecryptfs_auth_tok_list_item_cache, .name = "ecryptfs_auth_tok_list_item", .size = sizeof(struct ecryptfs_auth_tok_list_item), }, { .cache = &ecryptfs_file_info_cache, .name = "ecryptfs_file_cache", .size = sizeof(struct ecryptfs_file_info), }, { .cache = &ecryptfs_dentry_info_cache, .name = "ecryptfs_dentry_info_cache", .size = sizeof(struct ecryptfs_dentry_info), }, { .cache = &ecryptfs_inode_info_cache, .name = "ecryptfs_inode_cache", .size = sizeof(struct ecryptfs_inode_info), .ctor = inode_info_init_once, }, { .cache = &ecryptfs_sb_info_cache, .name = "ecryptfs_sb_cache", .size = sizeof(struct ecryptfs_sb_info), }, { .cache = &ecryptfs_header_cache, .name = "ecryptfs_headers", .size = PAGE_CACHE_SIZE, }, { .cache = &ecryptfs_xattr_cache, .name = "ecryptfs_xattr_cache", .size = PAGE_CACHE_SIZE, }, { .cache = &ecryptfs_key_record_cache, .name = "ecryptfs_key_record_cache", .size = sizeof(struct ecryptfs_key_record), }, { .cache = &ecryptfs_key_sig_cache, .name = "ecryptfs_key_sig_cache", .size = sizeof(struct ecryptfs_key_sig), }, { .cache = &ecryptfs_global_auth_tok_cache, .name = "ecryptfs_global_auth_tok_cache", .size = sizeof(struct ecryptfs_global_auth_tok), }, { .cache = &ecryptfs_key_tfm_cache, .name = "ecryptfs_key_tfm_cache", .size = sizeof(struct ecryptfs_key_tfm), }, { .cache = &ecryptfs_open_req_cache, .name = "ecryptfs_open_req_cache", .size = sizeof(struct ecryptfs_open_req), }, }; static void ecryptfs_free_kmem_caches(void) { int i; for (i = 0; i < ARRAY_SIZE(ecryptfs_cache_infos); i++) { struct ecryptfs_cache_info *info; info = &ecryptfs_cache_infos[i]; if (*(info->cache)) kmem_cache_destroy(*(info->cache)); } } /** * ecryptfs_init_kmem_caches * * Returns zero on success; non-zero otherwise */ static int ecryptfs_init_kmem_caches(void) { int i; for (i = 0; i < ARRAY_SIZE(ecryptfs_cache_infos); i++) { struct ecryptfs_cache_info *info; info = &ecryptfs_cache_infos[i]; *(info->cache) = kmem_cache_create(info->name, info->size, 0, SLAB_HWCACHE_ALIGN, info->ctor); if (!*(info->cache)) { ecryptfs_free_kmem_caches(); ecryptfs_printk(KERN_WARNING, "%s: " "kmem_cache_create failed\n", info->name); return -ENOMEM; } } return 0; } static struct kobject *ecryptfs_kobj; static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr, char *buff) { return snprintf(buff, PAGE_SIZE, "%d\n", ECRYPTFS_VERSIONING_MASK); } static struct kobj_attribute version_attr = __ATTR_RO(version); static struct attribute *attributes[] = { &version_attr.attr, NULL, }; static struct attribute_group attr_group = { .attrs = attributes, }; static int do_sysfs_registration(void) { int rc; ecryptfs_kobj = kobject_create_and_add("ecryptfs", fs_kobj); if (!ecryptfs_kobj) { printk(KERN_ERR "Unable to create ecryptfs kset\n"); rc = -ENOMEM; goto out; } rc = sysfs_create_group(ecryptfs_kobj, &attr_group); if (rc) { printk(KERN_ERR "Unable to create ecryptfs version attributes\n"); kobject_put(ecryptfs_kobj); } out: return rc; } static void do_sysfs_unregistration(void) { sysfs_remove_group(ecryptfs_kobj, &attr_group); kobject_put(ecryptfs_kobj); } static int __init ecryptfs_init(void) { int rc; if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_CACHE_SIZE) { rc = -EINVAL; ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is " "larger than the host's page size, and so " "eCryptfs cannot run on this system. The " "default eCryptfs extent size is [%u] bytes; " "the page size is [%lu] bytes.\n", ECRYPTFS_DEFAULT_EXTENT_SIZE, (unsigned long)PAGE_CACHE_SIZE); goto out; } rc = ecryptfs_init_kmem_caches(); if (rc) { printk(KERN_ERR "Failed to allocate one or more kmem_cache objects\n"); goto out; } rc = register_filesystem(&ecryptfs_fs_type); if (rc) { printk(KERN_ERR "Failed to register filesystem\n"); goto out_free_kmem_caches; } rc = do_sysfs_registration(); if (rc) { printk(KERN_ERR "sysfs registration failed\n"); goto out_unregister_filesystem; } rc = ecryptfs_init_kthread(); if (rc) { printk(KERN_ERR "%s: kthread initialization failed; " "rc = [%d]\n", __func__, rc); goto out_do_sysfs_unregistration; } rc = ecryptfs_init_messaging(); if (rc) { printk(KERN_ERR "Failure occurred while attempting to " "initialize the communications channel to " "ecryptfsd\n"); goto out_destroy_kthread; } rc = ecryptfs_init_crypto(); if (rc) { printk(KERN_ERR "Failure whilst attempting to init crypto; " "rc = [%d]\n", rc); goto out_release_messaging; } if (ecryptfs_verbosity > 0) printk(KERN_CRIT "eCryptfs verbosity set to %d. Secret values " "will be written to the syslog!\n", ecryptfs_verbosity); goto out; out_release_messaging: ecryptfs_release_messaging(); out_destroy_kthread: ecryptfs_destroy_kthread(); out_do_sysfs_unregistration: do_sysfs_unregistration(); out_unregister_filesystem: unregister_filesystem(&ecryptfs_fs_type); out_free_kmem_caches: ecryptfs_free_kmem_caches(); out: return rc; } static void __exit ecryptfs_exit(void) { int rc; rc = ecryptfs_destroy_crypto(); if (rc) printk(KERN_ERR "Failure whilst attempting to destroy crypto; " "rc = [%d]\n", rc); ecryptfs_release_messaging(); ecryptfs_destroy_kthread(); do_sysfs_unregistration(); unregister_filesystem(&ecryptfs_fs_type); ecryptfs_free_kmem_caches(); } MODULE_AUTHOR("Michael A. Halcrow <mhalcrow@us.ibm.com>"); MODULE_DESCRIPTION("eCryptfs"); MODULE_LICENSE("GPL"); module_init(ecryptfs_init) module_exit(ecryptfs_exit)
./CrossVul/dataset_final_sorted/CWE-362/c/bad_3460_0
crossvul-cpp_data_bad_4964_0
/* * Timers abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/module.h> #include <linux/string.h> #include <sound/core.h> #include <sound/timer.h> #include <sound/control.h> #include <sound/info.h> #include <sound/minors.h> #include <sound/initval.h> #include <linux/kmod.h> #if IS_ENABLED(CONFIG_SND_HRTIMER) #define DEFAULT_TIMER_LIMIT 4 #elif IS_ENABLED(CONFIG_SND_RTCTIMER) #define DEFAULT_TIMER_LIMIT 2 #else #define DEFAULT_TIMER_LIMIT 1 #endif static int timer_limit = DEFAULT_TIMER_LIMIT; static int timer_tstamp_monotonic = 1; MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ALSA timer interface"); MODULE_LICENSE("GPL"); module_param(timer_limit, int, 0444); MODULE_PARM_DESC(timer_limit, "Maximum global timers in system."); module_param(timer_tstamp_monotonic, int, 0444); MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default)."); MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER); MODULE_ALIAS("devname:snd/timer"); struct snd_timer_user { struct snd_timer_instance *timeri; int tread; /* enhanced read with timestamps and events */ unsigned long ticks; unsigned long overrun; int qhead; int qtail; int qused; int queue_size; struct snd_timer_read *queue; struct snd_timer_tread *tqueue; spinlock_t qlock; unsigned long last_resolution; unsigned int filter; struct timespec tstamp; /* trigger tstamp */ wait_queue_head_t qchange_sleep; struct fasync_struct *fasync; struct mutex ioctl_lock; }; /* list of timers */ static LIST_HEAD(snd_timer_list); /* list of slave instances */ static LIST_HEAD(snd_timer_slave_list); /* lock for slave active lists */ static DEFINE_SPINLOCK(slave_active_lock); static DEFINE_MUTEX(register_mutex); static int snd_timer_free(struct snd_timer *timer); static int snd_timer_dev_free(struct snd_device *device); static int snd_timer_dev_register(struct snd_device *device); static int snd_timer_dev_disconnect(struct snd_device *device); static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left); /* * create a timer instance with the given owner string. * when timer is not NULL, increments the module counter */ static struct snd_timer_instance *snd_timer_instance_new(char *owner, struct snd_timer *timer) { struct snd_timer_instance *timeri; timeri = kzalloc(sizeof(*timeri), GFP_KERNEL); if (timeri == NULL) return NULL; timeri->owner = kstrdup(owner, GFP_KERNEL); if (! timeri->owner) { kfree(timeri); return NULL; } INIT_LIST_HEAD(&timeri->open_list); INIT_LIST_HEAD(&timeri->active_list); INIT_LIST_HEAD(&timeri->ack_list); INIT_LIST_HEAD(&timeri->slave_list_head); INIT_LIST_HEAD(&timeri->slave_active_head); timeri->timer = timer; if (timer && !try_module_get(timer->module)) { kfree(timeri->owner); kfree(timeri); return NULL; } return timeri; } /* * find a timer instance from the given timer id */ static struct snd_timer *snd_timer_find(struct snd_timer_id *tid) { struct snd_timer *timer = NULL; list_for_each_entry(timer, &snd_timer_list, device_list) { if (timer->tmr_class != tid->dev_class) continue; if ((timer->tmr_class == SNDRV_TIMER_CLASS_CARD || timer->tmr_class == SNDRV_TIMER_CLASS_PCM) && (timer->card == NULL || timer->card->number != tid->card)) continue; if (timer->tmr_device != tid->device) continue; if (timer->tmr_subdevice != tid->subdevice) continue; return timer; } return NULL; } #ifdef CONFIG_MODULES static void snd_timer_request(struct snd_timer_id *tid) { switch (tid->dev_class) { case SNDRV_TIMER_CLASS_GLOBAL: if (tid->device < timer_limit) request_module("snd-timer-%i", tid->device); break; case SNDRV_TIMER_CLASS_CARD: case SNDRV_TIMER_CLASS_PCM: if (tid->card < snd_ecards_limit) request_module("snd-card-%i", tid->card); break; default: break; } } #endif /* * look for a master instance matching with the slave id of the given slave. * when found, relink the open_link of the slave. * * call this with register_mutex down. */ static void snd_timer_check_slave(struct snd_timer_instance *slave) { struct snd_timer *timer; struct snd_timer_instance *master; /* FIXME: it's really dumb to look up all entries.. */ list_for_each_entry(timer, &snd_timer_list, device_list) { list_for_each_entry(master, &timer->open_list_head, open_list) { if (slave->slave_class == master->slave_class && slave->slave_id == master->slave_id) { list_move_tail(&slave->open_list, &master->slave_list_head); spin_lock_irq(&slave_active_lock); slave->master = master; slave->timer = master->timer; spin_unlock_irq(&slave_active_lock); return; } } } } /* * look for slave instances matching with the slave id of the given master. * when found, relink the open_link of slaves. * * call this with register_mutex down. */ static void snd_timer_check_master(struct snd_timer_instance *master) { struct snd_timer_instance *slave, *tmp; /* check all pending slaves */ list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) { if (slave->slave_class == master->slave_class && slave->slave_id == master->slave_id) { list_move_tail(&slave->open_list, &master->slave_list_head); spin_lock_irq(&slave_active_lock); slave->master = master; slave->timer = master->timer; if (slave->flags & SNDRV_TIMER_IFLG_RUNNING) list_add_tail(&slave->active_list, &master->slave_active_head); spin_unlock_irq(&slave_active_lock); } } } /* * open a timer instance * when opening a master, the slave id must be here given. */ int snd_timer_open(struct snd_timer_instance **ti, char *owner, struct snd_timer_id *tid, unsigned int slave_id) { struct snd_timer *timer; struct snd_timer_instance *timeri = NULL; if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) { /* open a slave instance */ if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE || tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) { pr_debug("ALSA: timer: invalid slave class %i\n", tid->dev_sclass); return -EINVAL; } mutex_lock(&register_mutex); timeri = snd_timer_instance_new(owner, NULL); if (!timeri) { mutex_unlock(&register_mutex); return -ENOMEM; } timeri->slave_class = tid->dev_sclass; timeri->slave_id = tid->device; timeri->flags |= SNDRV_TIMER_IFLG_SLAVE; list_add_tail(&timeri->open_list, &snd_timer_slave_list); snd_timer_check_slave(timeri); mutex_unlock(&register_mutex); *ti = timeri; return 0; } /* open a master instance */ mutex_lock(&register_mutex); timer = snd_timer_find(tid); #ifdef CONFIG_MODULES if (!timer) { mutex_unlock(&register_mutex); snd_timer_request(tid); mutex_lock(&register_mutex); timer = snd_timer_find(tid); } #endif if (!timer) { mutex_unlock(&register_mutex); return -ENODEV; } if (!list_empty(&timer->open_list_head)) { timeri = list_entry(timer->open_list_head.next, struct snd_timer_instance, open_list); if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) { mutex_unlock(&register_mutex); return -EBUSY; } } timeri = snd_timer_instance_new(owner, timer); if (!timeri) { mutex_unlock(&register_mutex); return -ENOMEM; } timeri->slave_class = tid->dev_sclass; timeri->slave_id = slave_id; if (list_empty(&timer->open_list_head) && timer->hw.open) timer->hw.open(timer); list_add_tail(&timeri->open_list, &timer->open_list_head); snd_timer_check_master(timeri); mutex_unlock(&register_mutex); *ti = timeri; return 0; } static int _snd_timer_stop(struct snd_timer_instance *timeri, int keep_flag, int event); /* * close a timer instance */ int snd_timer_close(struct snd_timer_instance *timeri) { struct snd_timer *timer = NULL; struct snd_timer_instance *slave, *tmp; if (snd_BUG_ON(!timeri)) return -ENXIO; /* force to stop the timer */ snd_timer_stop(timeri); if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { /* wait, until the active callback is finished */ spin_lock_irq(&slave_active_lock); while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { spin_unlock_irq(&slave_active_lock); udelay(10); spin_lock_irq(&slave_active_lock); } spin_unlock_irq(&slave_active_lock); mutex_lock(&register_mutex); list_del(&timeri->open_list); mutex_unlock(&register_mutex); } else { timer = timeri->timer; if (snd_BUG_ON(!timer)) goto out; /* wait, until the active callback is finished */ spin_lock_irq(&timer->lock); while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { spin_unlock_irq(&timer->lock); udelay(10); spin_lock_irq(&timer->lock); } spin_unlock_irq(&timer->lock); mutex_lock(&register_mutex); list_del(&timeri->open_list); if (timer && list_empty(&timer->open_list_head) && timer->hw.close) timer->hw.close(timer); /* remove slave links */ list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head, open_list) { spin_lock_irq(&slave_active_lock); _snd_timer_stop(slave, 1, SNDRV_TIMER_EVENT_RESOLUTION); list_move_tail(&slave->open_list, &snd_timer_slave_list); slave->master = NULL; slave->timer = NULL; spin_unlock_irq(&slave_active_lock); } mutex_unlock(&register_mutex); } out: if (timeri->private_free) timeri->private_free(timeri); kfree(timeri->owner); kfree(timeri); if (timer) module_put(timer->module); return 0; } unsigned long snd_timer_resolution(struct snd_timer_instance *timeri) { struct snd_timer * timer; if (timeri == NULL) return 0; if ((timer = timeri->timer) != NULL) { if (timer->hw.c_resolution) return timer->hw.c_resolution(timer); return timer->hw.resolution; } return 0; } static void snd_timer_notify1(struct snd_timer_instance *ti, int event) { struct snd_timer *timer; unsigned long flags; unsigned long resolution = 0; struct snd_timer_instance *ts; struct timespec tstamp; if (timer_tstamp_monotonic) ktime_get_ts(&tstamp); else getnstimeofday(&tstamp); if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START || event > SNDRV_TIMER_EVENT_PAUSE)) return; if (event == SNDRV_TIMER_EVENT_START || event == SNDRV_TIMER_EVENT_CONTINUE) resolution = snd_timer_resolution(ti); if (ti->ccallback) ti->ccallback(ti, event, &tstamp, resolution); if (ti->flags & SNDRV_TIMER_IFLG_SLAVE) return; timer = ti->timer; if (timer == NULL) return; if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) return; spin_lock_irqsave(&timer->lock, flags); list_for_each_entry(ts, &ti->slave_active_head, active_list) if (ts->ccallback) ts->ccallback(ti, event + 100, &tstamp, resolution); spin_unlock_irqrestore(&timer->lock, flags); } static int snd_timer_start1(struct snd_timer *timer, struct snd_timer_instance *timeri, unsigned long sticks) { list_move_tail(&timeri->active_list, &timer->active_list_head); if (timer->running) { if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) goto __start_now; timer->flags |= SNDRV_TIMER_FLG_RESCHED; timeri->flags |= SNDRV_TIMER_IFLG_START; return 1; /* delayed start */ } else { timer->sticks = sticks; timer->hw.start(timer); __start_now: timer->running++; timeri->flags |= SNDRV_TIMER_IFLG_RUNNING; return 0; } } static int snd_timer_start_slave(struct snd_timer_instance *timeri) { unsigned long flags; spin_lock_irqsave(&slave_active_lock, flags); timeri->flags |= SNDRV_TIMER_IFLG_RUNNING; if (timeri->master) list_add_tail(&timeri->active_list, &timeri->master->slave_active_head); spin_unlock_irqrestore(&slave_active_lock, flags); return 1; /* delayed start */ } /* * start the timer instance */ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks) { struct snd_timer *timer; int result = -EINVAL; unsigned long flags; if (timeri == NULL || ticks < 1) return -EINVAL; if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { result = snd_timer_start_slave(timeri); snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); return result; } timer = timeri->timer; if (timer == NULL) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); timeri->ticks = timeri->cticks = ticks; timeri->pticks = 0; result = snd_timer_start1(timer, timeri, ticks); spin_unlock_irqrestore(&timer->lock, flags); snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); return result; } static int _snd_timer_stop(struct snd_timer_instance * timeri, int keep_flag, int event) { struct snd_timer *timer; unsigned long flags; if (snd_BUG_ON(!timeri)) return -ENXIO; if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { if (!keep_flag) { spin_lock_irqsave(&slave_active_lock, flags); timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING; spin_unlock_irqrestore(&slave_active_lock, flags); } goto __end; } timer = timeri->timer; if (!timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); list_del_init(&timeri->ack_list); list_del_init(&timeri->active_list); if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) && !(--timer->running)) { timer->hw.stop(timer); if (timer->flags & SNDRV_TIMER_FLG_RESCHED) { timer->flags &= ~SNDRV_TIMER_FLG_RESCHED; snd_timer_reschedule(timer, 0); if (timer->flags & SNDRV_TIMER_FLG_CHANGE) { timer->flags &= ~SNDRV_TIMER_FLG_CHANGE; timer->hw.start(timer); } } } if (!keep_flag) timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START); spin_unlock_irqrestore(&timer->lock, flags); __end: if (event != SNDRV_TIMER_EVENT_RESOLUTION) snd_timer_notify1(timeri, event); return 0; } /* * stop the timer instance. * * do not call this from the timer callback! */ int snd_timer_stop(struct snd_timer_instance *timeri) { struct snd_timer *timer; unsigned long flags; int err; err = _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_STOP); if (err < 0) return err; timer = timeri->timer; if (!timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); timeri->cticks = timeri->ticks; timeri->pticks = 0; spin_unlock_irqrestore(&timer->lock, flags); return 0; } /* * start again.. the tick is kept. */ int snd_timer_continue(struct snd_timer_instance *timeri) { struct snd_timer *timer; int result = -EINVAL; unsigned long flags; if (timeri == NULL) return result; if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) return snd_timer_start_slave(timeri); timer = timeri->timer; if (! timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); if (!timeri->cticks) timeri->cticks = 1; timeri->pticks = 0; result = snd_timer_start1(timer, timeri, timer->sticks); spin_unlock_irqrestore(&timer->lock, flags); snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE); return result; } /* * pause.. remember the ticks left */ int snd_timer_pause(struct snd_timer_instance * timeri) { return _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_PAUSE); } /* * reschedule the timer * * start pending instances and check the scheduling ticks. * when the scheduling ticks is changed set CHANGE flag to reprogram the timer. */ static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left) { struct snd_timer_instance *ti; unsigned long ticks = ~0UL; list_for_each_entry(ti, &timer->active_list_head, active_list) { if (ti->flags & SNDRV_TIMER_IFLG_START) { ti->flags &= ~SNDRV_TIMER_IFLG_START; ti->flags |= SNDRV_TIMER_IFLG_RUNNING; timer->running++; } if (ti->flags & SNDRV_TIMER_IFLG_RUNNING) { if (ticks > ti->cticks) ticks = ti->cticks; } } if (ticks == ~0UL) { timer->flags &= ~SNDRV_TIMER_FLG_RESCHED; return; } if (ticks > timer->hw.ticks) ticks = timer->hw.ticks; if (ticks_left != ticks) timer->flags |= SNDRV_TIMER_FLG_CHANGE; timer->sticks = ticks; } /* * timer tasklet * */ static void snd_timer_tasklet(unsigned long arg) { struct snd_timer *timer = (struct snd_timer *) arg; struct snd_timer_instance *ti; struct list_head *p; unsigned long resolution, ticks; unsigned long flags; spin_lock_irqsave(&timer->lock, flags); /* now process all callbacks */ while (!list_empty(&timer->sack_list_head)) { p = timer->sack_list_head.next; /* get first item */ ti = list_entry(p, struct snd_timer_instance, ack_list); /* remove from ack_list and make empty */ list_del_init(p); ticks = ti->pticks; ti->pticks = 0; resolution = ti->resolution; ti->flags |= SNDRV_TIMER_IFLG_CALLBACK; spin_unlock(&timer->lock); if (ti->callback) ti->callback(ti, resolution, ticks); spin_lock(&timer->lock); ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK; } spin_unlock_irqrestore(&timer->lock, flags); } /* * timer interrupt * * ticks_left is usually equal to timer->sticks. * */ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left) { struct snd_timer_instance *ti, *ts, *tmp; unsigned long resolution, ticks; struct list_head *p, *ack_list_head; unsigned long flags; int use_tasklet = 0; if (timer == NULL) return; spin_lock_irqsave(&timer->lock, flags); /* remember the current resolution */ if (timer->hw.c_resolution) resolution = timer->hw.c_resolution(timer); else resolution = timer->hw.resolution; /* loop for all active instances * Here we cannot use list_for_each_entry because the active_list of a * processed instance is relinked to done_list_head before the callback * is called. */ list_for_each_entry_safe(ti, tmp, &timer->active_list_head, active_list) { if (!(ti->flags & SNDRV_TIMER_IFLG_RUNNING)) continue; ti->pticks += ticks_left; ti->resolution = resolution; if (ti->cticks < ticks_left) ti->cticks = 0; else ti->cticks -= ticks_left; if (ti->cticks) /* not expired */ continue; if (ti->flags & SNDRV_TIMER_IFLG_AUTO) { ti->cticks = ti->ticks; } else { ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING; if (--timer->running) list_del_init(&ti->active_list); } if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) || (ti->flags & SNDRV_TIMER_IFLG_FAST)) ack_list_head = &timer->ack_list_head; else ack_list_head = &timer->sack_list_head; if (list_empty(&ti->ack_list)) list_add_tail(&ti->ack_list, ack_list_head); list_for_each_entry(ts, &ti->slave_active_head, active_list) { ts->pticks = ti->pticks; ts->resolution = resolution; if (list_empty(&ts->ack_list)) list_add_tail(&ts->ack_list, ack_list_head); } } if (timer->flags & SNDRV_TIMER_FLG_RESCHED) snd_timer_reschedule(timer, timer->sticks); if (timer->running) { if (timer->hw.flags & SNDRV_TIMER_HW_STOP) { timer->hw.stop(timer); timer->flags |= SNDRV_TIMER_FLG_CHANGE; } if (!(timer->hw.flags & SNDRV_TIMER_HW_AUTO) || (timer->flags & SNDRV_TIMER_FLG_CHANGE)) { /* restart timer */ timer->flags &= ~SNDRV_TIMER_FLG_CHANGE; timer->hw.start(timer); } } else { timer->hw.stop(timer); } /* now process all fast callbacks */ while (!list_empty(&timer->ack_list_head)) { p = timer->ack_list_head.next; /* get first item */ ti = list_entry(p, struct snd_timer_instance, ack_list); /* remove from ack_list and make empty */ list_del_init(p); ticks = ti->pticks; ti->pticks = 0; ti->flags |= SNDRV_TIMER_IFLG_CALLBACK; spin_unlock(&timer->lock); if (ti->callback) ti->callback(ti, resolution, ticks); spin_lock(&timer->lock); ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK; } /* do we have any slow callbacks? */ use_tasklet = !list_empty(&timer->sack_list_head); spin_unlock_irqrestore(&timer->lock, flags); if (use_tasklet) tasklet_schedule(&timer->task_queue); } /* */ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid, struct snd_timer **rtimer) { struct snd_timer *timer; int err; static struct snd_device_ops ops = { .dev_free = snd_timer_dev_free, .dev_register = snd_timer_dev_register, .dev_disconnect = snd_timer_dev_disconnect, }; if (snd_BUG_ON(!tid)) return -EINVAL; if (rtimer) *rtimer = NULL; timer = kzalloc(sizeof(*timer), GFP_KERNEL); if (!timer) return -ENOMEM; timer->tmr_class = tid->dev_class; timer->card = card; timer->tmr_device = tid->device; timer->tmr_subdevice = tid->subdevice; if (id) strlcpy(timer->id, id, sizeof(timer->id)); INIT_LIST_HEAD(&timer->device_list); INIT_LIST_HEAD(&timer->open_list_head); INIT_LIST_HEAD(&timer->active_list_head); INIT_LIST_HEAD(&timer->ack_list_head); INIT_LIST_HEAD(&timer->sack_list_head); spin_lock_init(&timer->lock); tasklet_init(&timer->task_queue, snd_timer_tasklet, (unsigned long)timer); if (card != NULL) { timer->module = card->module; err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops); if (err < 0) { snd_timer_free(timer); return err; } } if (rtimer) *rtimer = timer; return 0; } static int snd_timer_free(struct snd_timer *timer) { if (!timer) return 0; mutex_lock(&register_mutex); if (! list_empty(&timer->open_list_head)) { struct list_head *p, *n; struct snd_timer_instance *ti; pr_warn("ALSA: timer %p is busy?\n", timer); list_for_each_safe(p, n, &timer->open_list_head) { list_del_init(p); ti = list_entry(p, struct snd_timer_instance, open_list); ti->timer = NULL; } } list_del(&timer->device_list); mutex_unlock(&register_mutex); if (timer->private_free) timer->private_free(timer); kfree(timer); return 0; } static int snd_timer_dev_free(struct snd_device *device) { struct snd_timer *timer = device->device_data; return snd_timer_free(timer); } static int snd_timer_dev_register(struct snd_device *dev) { struct snd_timer *timer = dev->device_data; struct snd_timer *timer1; if (snd_BUG_ON(!timer || !timer->hw.start || !timer->hw.stop)) return -ENXIO; if (!(timer->hw.flags & SNDRV_TIMER_HW_SLAVE) && !timer->hw.resolution && timer->hw.c_resolution == NULL) return -EINVAL; mutex_lock(&register_mutex); list_for_each_entry(timer1, &snd_timer_list, device_list) { if (timer1->tmr_class > timer->tmr_class) break; if (timer1->tmr_class < timer->tmr_class) continue; if (timer1->card && timer->card) { if (timer1->card->number > timer->card->number) break; if (timer1->card->number < timer->card->number) continue; } if (timer1->tmr_device > timer->tmr_device) break; if (timer1->tmr_device < timer->tmr_device) continue; if (timer1->tmr_subdevice > timer->tmr_subdevice) break; if (timer1->tmr_subdevice < timer->tmr_subdevice) continue; /* conflicts.. */ mutex_unlock(&register_mutex); return -EBUSY; } list_add_tail(&timer->device_list, &timer1->device_list); mutex_unlock(&register_mutex); return 0; } static int snd_timer_dev_disconnect(struct snd_device *device) { struct snd_timer *timer = device->device_data; mutex_lock(&register_mutex); list_del_init(&timer->device_list); mutex_unlock(&register_mutex); return 0; } void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp) { unsigned long flags; unsigned long resolution = 0; struct snd_timer_instance *ti, *ts; if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)) return; if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART || event > SNDRV_TIMER_EVENT_MRESUME)) return; spin_lock_irqsave(&timer->lock, flags); if (event == SNDRV_TIMER_EVENT_MSTART || event == SNDRV_TIMER_EVENT_MCONTINUE || event == SNDRV_TIMER_EVENT_MRESUME) { if (timer->hw.c_resolution) resolution = timer->hw.c_resolution(timer); else resolution = timer->hw.resolution; } list_for_each_entry(ti, &timer->active_list_head, active_list) { if (ti->ccallback) ti->ccallback(ti, event, tstamp, resolution); list_for_each_entry(ts, &ti->slave_active_head, active_list) if (ts->ccallback) ts->ccallback(ts, event, tstamp, resolution); } spin_unlock_irqrestore(&timer->lock, flags); } /* * exported functions for global timers */ int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer) { struct snd_timer_id tid; tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL; tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE; tid.card = -1; tid.device = device; tid.subdevice = 0; return snd_timer_new(NULL, id, &tid, rtimer); } int snd_timer_global_free(struct snd_timer *timer) { return snd_timer_free(timer); } int snd_timer_global_register(struct snd_timer *timer) { struct snd_device dev; memset(&dev, 0, sizeof(dev)); dev.device_data = timer; return snd_timer_dev_register(&dev); } /* * System timer */ struct snd_timer_system_private { struct timer_list tlist; unsigned long last_expires; unsigned long last_jiffies; unsigned long correction; }; static void snd_timer_s_function(unsigned long data) { struct snd_timer *timer = (struct snd_timer *)data; struct snd_timer_system_private *priv = timer->private_data; unsigned long jiff = jiffies; if (time_after(jiff, priv->last_expires)) priv->correction += (long)jiff - (long)priv->last_expires; snd_timer_interrupt(timer, (long)jiff - (long)priv->last_jiffies); } static int snd_timer_s_start(struct snd_timer * timer) { struct snd_timer_system_private *priv; unsigned long njiff; priv = (struct snd_timer_system_private *) timer->private_data; njiff = (priv->last_jiffies = jiffies); if (priv->correction > timer->sticks - 1) { priv->correction -= timer->sticks - 1; njiff++; } else { njiff += timer->sticks - priv->correction; priv->correction = 0; } priv->last_expires = priv->tlist.expires = njiff; add_timer(&priv->tlist); return 0; } static int snd_timer_s_stop(struct snd_timer * timer) { struct snd_timer_system_private *priv; unsigned long jiff; priv = (struct snd_timer_system_private *) timer->private_data; del_timer(&priv->tlist); jiff = jiffies; if (time_before(jiff, priv->last_expires)) timer->sticks = priv->last_expires - jiff; else timer->sticks = 1; priv->correction = 0; return 0; } static struct snd_timer_hardware snd_timer_system = { .flags = SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET, .resolution = 1000000000L / HZ, .ticks = 10000000L, .start = snd_timer_s_start, .stop = snd_timer_s_stop }; static void snd_timer_free_system(struct snd_timer *timer) { kfree(timer->private_data); } static int snd_timer_register_system(void) { struct snd_timer *timer; struct snd_timer_system_private *priv; int err; err = snd_timer_global_new("system", SNDRV_TIMER_GLOBAL_SYSTEM, &timer); if (err < 0) return err; strcpy(timer->name, "system timer"); timer->hw = snd_timer_system; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv == NULL) { snd_timer_free(timer); return -ENOMEM; } setup_timer(&priv->tlist, snd_timer_s_function, (unsigned long) timer); timer->private_data = priv; timer->private_free = snd_timer_free_system; return snd_timer_global_register(timer); } #ifdef CONFIG_SND_PROC_FS /* * Info interface */ static void snd_timer_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_timer *timer; struct snd_timer_instance *ti; mutex_lock(&register_mutex); list_for_each_entry(timer, &snd_timer_list, device_list) { switch (timer->tmr_class) { case SNDRV_TIMER_CLASS_GLOBAL: snd_iprintf(buffer, "G%i: ", timer->tmr_device); break; case SNDRV_TIMER_CLASS_CARD: snd_iprintf(buffer, "C%i-%i: ", timer->card->number, timer->tmr_device); break; case SNDRV_TIMER_CLASS_PCM: snd_iprintf(buffer, "P%i-%i-%i: ", timer->card->number, timer->tmr_device, timer->tmr_subdevice); break; default: snd_iprintf(buffer, "?%i-%i-%i-%i: ", timer->tmr_class, timer->card ? timer->card->number : -1, timer->tmr_device, timer->tmr_subdevice); } snd_iprintf(buffer, "%s :", timer->name); if (timer->hw.resolution) snd_iprintf(buffer, " %lu.%03luus (%lu ticks)", timer->hw.resolution / 1000, timer->hw.resolution % 1000, timer->hw.ticks); if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) snd_iprintf(buffer, " SLAVE"); snd_iprintf(buffer, "\n"); list_for_each_entry(ti, &timer->open_list_head, open_list) snd_iprintf(buffer, " Client %s : %s\n", ti->owner ? ti->owner : "unknown", ti->flags & (SNDRV_TIMER_IFLG_START | SNDRV_TIMER_IFLG_RUNNING) ? "running" : "stopped"); } mutex_unlock(&register_mutex); } static struct snd_info_entry *snd_timer_proc_entry; static void __init snd_timer_proc_init(void) { struct snd_info_entry *entry; entry = snd_info_create_module_entry(THIS_MODULE, "timers", NULL); if (entry != NULL) { entry->c.text.read = snd_timer_proc_read; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } snd_timer_proc_entry = entry; } static void __exit snd_timer_proc_done(void) { snd_info_free_entry(snd_timer_proc_entry); } #else /* !CONFIG_SND_PROC_FS */ #define snd_timer_proc_init() #define snd_timer_proc_done() #endif /* * USER SPACE interface */ static void snd_timer_user_interrupt(struct snd_timer_instance *timeri, unsigned long resolution, unsigned long ticks) { struct snd_timer_user *tu = timeri->callback_data; struct snd_timer_read *r; int prev; spin_lock(&tu->qlock); if (tu->qused > 0) { prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1; r = &tu->queue[prev]; if (r->resolution == resolution) { r->ticks += ticks; goto __wake; } } if (tu->qused >= tu->queue_size) { tu->overrun++; } else { r = &tu->queue[tu->qtail++]; tu->qtail %= tu->queue_size; r->resolution = resolution; r->ticks = ticks; tu->qused++; } __wake: spin_unlock(&tu->qlock); kill_fasync(&tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } static void snd_timer_user_append_to_tqueue(struct snd_timer_user *tu, struct snd_timer_tread *tread) { if (tu->qused >= tu->queue_size) { tu->overrun++; } else { memcpy(&tu->tqueue[tu->qtail++], tread, sizeof(*tread)); tu->qtail %= tu->queue_size; tu->qused++; } } static void snd_timer_user_ccallback(struct snd_timer_instance *timeri, int event, struct timespec *tstamp, unsigned long resolution) { struct snd_timer_user *tu = timeri->callback_data; struct snd_timer_tread r1; unsigned long flags; if (event >= SNDRV_TIMER_EVENT_START && event <= SNDRV_TIMER_EVENT_PAUSE) tu->tstamp = *tstamp; if ((tu->filter & (1 << event)) == 0 || !tu->tread) return; r1.event = event; r1.tstamp = *tstamp; r1.val = resolution; spin_lock_irqsave(&tu->qlock, flags); snd_timer_user_append_to_tqueue(tu, &r1); spin_unlock_irqrestore(&tu->qlock, flags); kill_fasync(&tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri, unsigned long resolution, unsigned long ticks) { struct snd_timer_user *tu = timeri->callback_data; struct snd_timer_tread *r, r1; struct timespec tstamp; int prev, append = 0; memset(&tstamp, 0, sizeof(tstamp)); spin_lock(&tu->qlock); if ((tu->filter & ((1 << SNDRV_TIMER_EVENT_RESOLUTION) | (1 << SNDRV_TIMER_EVENT_TICK))) == 0) { spin_unlock(&tu->qlock); return; } if (tu->last_resolution != resolution || ticks > 0) { if (timer_tstamp_monotonic) ktime_get_ts(&tstamp); else getnstimeofday(&tstamp); } if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) && tu->last_resolution != resolution) { r1.event = SNDRV_TIMER_EVENT_RESOLUTION; r1.tstamp = tstamp; r1.val = resolution; snd_timer_user_append_to_tqueue(tu, &r1); tu->last_resolution = resolution; append++; } if ((tu->filter & (1 << SNDRV_TIMER_EVENT_TICK)) == 0) goto __wake; if (ticks == 0) goto __wake; if (tu->qused > 0) { prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1; r = &tu->tqueue[prev]; if (r->event == SNDRV_TIMER_EVENT_TICK) { r->tstamp = tstamp; r->val += ticks; append++; goto __wake; } } r1.event = SNDRV_TIMER_EVENT_TICK; r1.tstamp = tstamp; r1.val = ticks; snd_timer_user_append_to_tqueue(tu, &r1); append++; __wake: spin_unlock(&tu->qlock); if (append == 0) return; kill_fasync(&tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } static int snd_timer_user_open(struct inode *inode, struct file *file) { struct snd_timer_user *tu; int err; err = nonseekable_open(inode, file); if (err < 0) return err; tu = kzalloc(sizeof(*tu), GFP_KERNEL); if (tu == NULL) return -ENOMEM; spin_lock_init(&tu->qlock); init_waitqueue_head(&tu->qchange_sleep); mutex_init(&tu->ioctl_lock); tu->ticks = 1; tu->queue_size = 128; tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read), GFP_KERNEL); if (tu->queue == NULL) { kfree(tu); return -ENOMEM; } file->private_data = tu; return 0; } static int snd_timer_user_release(struct inode *inode, struct file *file) { struct snd_timer_user *tu; if (file->private_data) { tu = file->private_data; file->private_data = NULL; mutex_lock(&tu->ioctl_lock); if (tu->timeri) snd_timer_close(tu->timeri); mutex_unlock(&tu->ioctl_lock); kfree(tu->queue); kfree(tu->tqueue); kfree(tu); } return 0; } static void snd_timer_user_zero_id(struct snd_timer_id *id) { id->dev_class = SNDRV_TIMER_CLASS_NONE; id->dev_sclass = SNDRV_TIMER_SCLASS_NONE; id->card = -1; id->device = -1; id->subdevice = -1; } static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *timer) { id->dev_class = timer->tmr_class; id->dev_sclass = SNDRV_TIMER_SCLASS_NONE; id->card = timer->card ? timer->card->number : -1; id->device = timer->tmr_device; id->subdevice = timer->tmr_subdevice; } static int snd_timer_user_next_device(struct snd_timer_id __user *_tid) { struct snd_timer_id id; struct snd_timer *timer; struct list_head *p; if (copy_from_user(&id, _tid, sizeof(id))) return -EFAULT; mutex_lock(&register_mutex); if (id.dev_class < 0) { /* first item */ if (list_empty(&snd_timer_list)) snd_timer_user_zero_id(&id); else { timer = list_entry(snd_timer_list.next, struct snd_timer, device_list); snd_timer_user_copy_id(&id, timer); } } else { switch (id.dev_class) { case SNDRV_TIMER_CLASS_GLOBAL: id.device = id.device < 0 ? 0 : id.device + 1; list_for_each(p, &snd_timer_list) { timer = list_entry(p, struct snd_timer, device_list); if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_device >= id.device) { snd_timer_user_copy_id(&id, timer); break; } } if (p == &snd_timer_list) snd_timer_user_zero_id(&id); break; case SNDRV_TIMER_CLASS_CARD: case SNDRV_TIMER_CLASS_PCM: if (id.card < 0) { id.card = 0; } else { if (id.card < 0) { id.card = 0; } else { if (id.device < 0) { id.device = 0; } else { if (id.subdevice < 0) { id.subdevice = 0; } else { id.subdevice++; } } } } list_for_each(p, &snd_timer_list) { timer = list_entry(p, struct snd_timer, device_list); if (timer->tmr_class > id.dev_class) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_class < id.dev_class) continue; if (timer->card->number > id.card) { snd_timer_user_copy_id(&id, timer); break; } if (timer->card->number < id.card) continue; if (timer->tmr_device > id.device) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_device < id.device) continue; if (timer->tmr_subdevice > id.subdevice) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_subdevice < id.subdevice) continue; snd_timer_user_copy_id(&id, timer); break; } if (p == &snd_timer_list) snd_timer_user_zero_id(&id); break; default: snd_timer_user_zero_id(&id); } } mutex_unlock(&register_mutex); if (copy_to_user(_tid, &id, sizeof(*_tid))) return -EFAULT; return 0; } static int snd_timer_user_ginfo(struct file *file, struct snd_timer_ginfo __user *_ginfo) { struct snd_timer_ginfo *ginfo; struct snd_timer_id tid; struct snd_timer *t; struct list_head *p; int err = 0; ginfo = memdup_user(_ginfo, sizeof(*ginfo)); if (IS_ERR(ginfo)) return PTR_ERR(ginfo); tid = ginfo->tid; memset(ginfo, 0, sizeof(*ginfo)); ginfo->tid = tid; mutex_lock(&register_mutex); t = snd_timer_find(&tid); if (t != NULL) { ginfo->card = t->card ? t->card->number : -1; if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) ginfo->flags |= SNDRV_TIMER_FLG_SLAVE; strlcpy(ginfo->id, t->id, sizeof(ginfo->id)); strlcpy(ginfo->name, t->name, sizeof(ginfo->name)); ginfo->resolution = t->hw.resolution; if (t->hw.resolution_min > 0) { ginfo->resolution_min = t->hw.resolution_min; ginfo->resolution_max = t->hw.resolution_max; } list_for_each(p, &t->open_list_head) { ginfo->clients++; } } else { err = -ENODEV; } mutex_unlock(&register_mutex); if (err >= 0 && copy_to_user(_ginfo, ginfo, sizeof(*ginfo))) err = -EFAULT; kfree(ginfo); return err; } static int snd_timer_user_gparams(struct file *file, struct snd_timer_gparams __user *_gparams) { struct snd_timer_gparams gparams; struct snd_timer *t; int err; if (copy_from_user(&gparams, _gparams, sizeof(gparams))) return -EFAULT; mutex_lock(&register_mutex); t = snd_timer_find(&gparams.tid); if (!t) { err = -ENODEV; goto _error; } if (!list_empty(&t->open_list_head)) { err = -EBUSY; goto _error; } if (!t->hw.set_period) { err = -ENOSYS; goto _error; } err = t->hw.set_period(t, gparams.period_num, gparams.period_den); _error: mutex_unlock(&register_mutex); return err; } static int snd_timer_user_gstatus(struct file *file, struct snd_timer_gstatus __user *_gstatus) { struct snd_timer_gstatus gstatus; struct snd_timer_id tid; struct snd_timer *t; int err = 0; if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus))) return -EFAULT; tid = gstatus.tid; memset(&gstatus, 0, sizeof(gstatus)); gstatus.tid = tid; mutex_lock(&register_mutex); t = snd_timer_find(&tid); if (t != NULL) { if (t->hw.c_resolution) gstatus.resolution = t->hw.c_resolution(t); else gstatus.resolution = t->hw.resolution; if (t->hw.precise_resolution) { t->hw.precise_resolution(t, &gstatus.resolution_num, &gstatus.resolution_den); } else { gstatus.resolution_num = gstatus.resolution; gstatus.resolution_den = 1000000000uL; } } else { err = -ENODEV; } mutex_unlock(&register_mutex); if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus))) err = -EFAULT; return err; } static int snd_timer_user_tselect(struct file *file, struct snd_timer_select __user *_tselect) { struct snd_timer_user *tu; struct snd_timer_select tselect; char str[32]; int err = 0; tu = file->private_data; if (tu->timeri) { snd_timer_close(tu->timeri); tu->timeri = NULL; } if (copy_from_user(&tselect, _tselect, sizeof(tselect))) { err = -EFAULT; goto __err; } sprintf(str, "application %i", current->pid); if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE) tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION; err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid); if (err < 0) goto __err; kfree(tu->queue); tu->queue = NULL; kfree(tu->tqueue); tu->tqueue = NULL; if (tu->tread) { tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread), GFP_KERNEL); if (tu->tqueue == NULL) err = -ENOMEM; } else { tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read), GFP_KERNEL); if (tu->queue == NULL) err = -ENOMEM; } if (err < 0) { snd_timer_close(tu->timeri); tu->timeri = NULL; } else { tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST; tu->timeri->callback = tu->tread ? snd_timer_user_tinterrupt : snd_timer_user_interrupt; tu->timeri->ccallback = snd_timer_user_ccallback; tu->timeri->callback_data = (void *)tu; } __err: return err; } static int snd_timer_user_info(struct file *file, struct snd_timer_info __user *_info) { struct snd_timer_user *tu; struct snd_timer_info *info; struct snd_timer *t; int err = 0; tu = file->private_data; if (!tu->timeri) return -EBADFD; t = tu->timeri->timer; if (!t) return -EBADFD; info = kzalloc(sizeof(*info), GFP_KERNEL); if (! info) return -ENOMEM; info->card = t->card ? t->card->number : -1; if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) info->flags |= SNDRV_TIMER_FLG_SLAVE; strlcpy(info->id, t->id, sizeof(info->id)); strlcpy(info->name, t->name, sizeof(info->name)); info->resolution = t->hw.resolution; if (copy_to_user(_info, info, sizeof(*_info))) err = -EFAULT; kfree(info); return err; } static int snd_timer_user_params(struct file *file, struct snd_timer_params __user *_params) { struct snd_timer_user *tu; struct snd_timer_params params; struct snd_timer *t; struct snd_timer_read *tr; struct snd_timer_tread *ttr; int err; tu = file->private_data; if (!tu->timeri) return -EBADFD; t = tu->timeri->timer; if (!t) return -EBADFD; if (copy_from_user(&params, _params, sizeof(params))) return -EFAULT; if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) { err = -EINVAL; goto _end; } if (params.queue_size > 0 && (params.queue_size < 32 || params.queue_size > 1024)) { err = -EINVAL; goto _end; } if (params.filter & ~((1<<SNDRV_TIMER_EVENT_RESOLUTION)| (1<<SNDRV_TIMER_EVENT_TICK)| (1<<SNDRV_TIMER_EVENT_START)| (1<<SNDRV_TIMER_EVENT_STOP)| (1<<SNDRV_TIMER_EVENT_CONTINUE)| (1<<SNDRV_TIMER_EVENT_PAUSE)| (1<<SNDRV_TIMER_EVENT_SUSPEND)| (1<<SNDRV_TIMER_EVENT_RESUME)| (1<<SNDRV_TIMER_EVENT_MSTART)| (1<<SNDRV_TIMER_EVENT_MSTOP)| (1<<SNDRV_TIMER_EVENT_MCONTINUE)| (1<<SNDRV_TIMER_EVENT_MPAUSE)| (1<<SNDRV_TIMER_EVENT_MSUSPEND)| (1<<SNDRV_TIMER_EVENT_MRESUME))) { err = -EINVAL; goto _end; } snd_timer_stop(tu->timeri); spin_lock_irq(&t->lock); tu->timeri->flags &= ~(SNDRV_TIMER_IFLG_AUTO| SNDRV_TIMER_IFLG_EXCLUSIVE| SNDRV_TIMER_IFLG_EARLY_EVENT); if (params.flags & SNDRV_TIMER_PSFLG_AUTO) tu->timeri->flags |= SNDRV_TIMER_IFLG_AUTO; if (params.flags & SNDRV_TIMER_PSFLG_EXCLUSIVE) tu->timeri->flags |= SNDRV_TIMER_IFLG_EXCLUSIVE; if (params.flags & SNDRV_TIMER_PSFLG_EARLY_EVENT) tu->timeri->flags |= SNDRV_TIMER_IFLG_EARLY_EVENT; spin_unlock_irq(&t->lock); if (params.queue_size > 0 && (unsigned int)tu->queue_size != params.queue_size) { if (tu->tread) { ttr = kmalloc(params.queue_size * sizeof(*ttr), GFP_KERNEL); if (ttr) { kfree(tu->tqueue); tu->queue_size = params.queue_size; tu->tqueue = ttr; } } else { tr = kmalloc(params.queue_size * sizeof(*tr), GFP_KERNEL); if (tr) { kfree(tu->queue); tu->queue_size = params.queue_size; tu->queue = tr; } } } tu->qhead = tu->qtail = tu->qused = 0; if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) { if (tu->tread) { struct snd_timer_tread tread; tread.event = SNDRV_TIMER_EVENT_EARLY; tread.tstamp.tv_sec = 0; tread.tstamp.tv_nsec = 0; tread.val = 0; snd_timer_user_append_to_tqueue(tu, &tread); } else { struct snd_timer_read *r = &tu->queue[0]; r->resolution = 0; r->ticks = 0; tu->qused++; tu->qtail++; } } tu->filter = params.filter; tu->ticks = params.ticks; err = 0; _end: if (copy_to_user(_params, &params, sizeof(params))) return -EFAULT; return err; } static int snd_timer_user_status(struct file *file, struct snd_timer_status __user *_status) { struct snd_timer_user *tu; struct snd_timer_status status; tu = file->private_data; if (!tu->timeri) return -EBADFD; memset(&status, 0, sizeof(status)); status.tstamp = tu->tstamp; status.resolution = snd_timer_resolution(tu->timeri); status.lost = tu->timeri->lost; status.overrun = tu->overrun; spin_lock_irq(&tu->qlock); status.queue = tu->qused; spin_unlock_irq(&tu->qlock); if (copy_to_user(_status, &status, sizeof(status))) return -EFAULT; return 0; } static int snd_timer_user_start(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; snd_timer_stop(tu->timeri); tu->timeri->lost = 0; tu->last_resolution = 0; return (err = snd_timer_start(tu->timeri, tu->ticks)) < 0 ? err : 0; } static int snd_timer_user_stop(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; return (err = snd_timer_stop(tu->timeri)) < 0 ? err : 0; } static int snd_timer_user_continue(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; tu->timeri->lost = 0; return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0; } static int snd_timer_user_pause(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; return (err = snd_timer_pause(tu->timeri)) < 0 ? err : 0; } enum { SNDRV_TIMER_IOCTL_START_OLD = _IO('T', 0x20), SNDRV_TIMER_IOCTL_STOP_OLD = _IO('T', 0x21), SNDRV_TIMER_IOCTL_CONTINUE_OLD = _IO('T', 0x22), SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23), }; static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_timer_user *tu; void __user *argp = (void __user *)arg; int __user *p = argp; tu = file->private_data; switch (cmd) { case SNDRV_TIMER_IOCTL_PVERSION: return put_user(SNDRV_TIMER_VERSION, p) ? -EFAULT : 0; case SNDRV_TIMER_IOCTL_NEXT_DEVICE: return snd_timer_user_next_device(argp); case SNDRV_TIMER_IOCTL_TREAD: { int xarg; if (tu->timeri) /* too late */ return -EBUSY; if (get_user(xarg, p)) return -EFAULT; tu->tread = xarg ? 1 : 0; return 0; } case SNDRV_TIMER_IOCTL_GINFO: return snd_timer_user_ginfo(file, argp); case SNDRV_TIMER_IOCTL_GPARAMS: return snd_timer_user_gparams(file, argp); case SNDRV_TIMER_IOCTL_GSTATUS: return snd_timer_user_gstatus(file, argp); case SNDRV_TIMER_IOCTL_SELECT: return snd_timer_user_tselect(file, argp); case SNDRV_TIMER_IOCTL_INFO: return snd_timer_user_info(file, argp); case SNDRV_TIMER_IOCTL_PARAMS: return snd_timer_user_params(file, argp); case SNDRV_TIMER_IOCTL_STATUS: return snd_timer_user_status(file, argp); case SNDRV_TIMER_IOCTL_START: case SNDRV_TIMER_IOCTL_START_OLD: return snd_timer_user_start(file); case SNDRV_TIMER_IOCTL_STOP: case SNDRV_TIMER_IOCTL_STOP_OLD: return snd_timer_user_stop(file); case SNDRV_TIMER_IOCTL_CONTINUE: case SNDRV_TIMER_IOCTL_CONTINUE_OLD: return snd_timer_user_continue(file); case SNDRV_TIMER_IOCTL_PAUSE: case SNDRV_TIMER_IOCTL_PAUSE_OLD: return snd_timer_user_pause(file); } return -ENOTTY; } static long snd_timer_user_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_timer_user *tu = file->private_data; long ret; mutex_lock(&tu->ioctl_lock); ret = __snd_timer_user_ioctl(file, cmd, arg); mutex_unlock(&tu->ioctl_lock); return ret; } static int snd_timer_user_fasync(int fd, struct file * file, int on) { struct snd_timer_user *tu; tu = file->private_data; return fasync_helper(fd, file, on, &tu->fasync); } static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, size_t count, loff_t *offset) { struct snd_timer_user *tu; long result = 0, unit; int err = 0; tu = file->private_data; unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read); spin_lock_irq(&tu->qlock); while ((long)count - result >= unit) { while (!tu->qused) { wait_queue_t wait; if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { err = -EAGAIN; break; } set_current_state(TASK_INTERRUPTIBLE); init_waitqueue_entry(&wait, current); add_wait_queue(&tu->qchange_sleep, &wait); spin_unlock_irq(&tu->qlock); schedule(); spin_lock_irq(&tu->qlock); remove_wait_queue(&tu->qchange_sleep, &wait); if (signal_pending(current)) { err = -ERESTARTSYS; break; } } spin_unlock_irq(&tu->qlock); if (err < 0) goto _error; if (tu->tread) { if (copy_to_user(buffer, &tu->tqueue[tu->qhead++], sizeof(struct snd_timer_tread))) { err = -EFAULT; goto _error; } } else { if (copy_to_user(buffer, &tu->queue[tu->qhead++], sizeof(struct snd_timer_read))) { err = -EFAULT; goto _error; } } tu->qhead %= tu->queue_size; result += unit; buffer += unit; spin_lock_irq(&tu->qlock); tu->qused--; } spin_unlock_irq(&tu->qlock); _error: return result > 0 ? result : err; } static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait) { unsigned int mask; struct snd_timer_user *tu; tu = file->private_data; poll_wait(file, &tu->qchange_sleep, wait); mask = 0; if (tu->qused) mask |= POLLIN | POLLRDNORM; return mask; } #ifdef CONFIG_COMPAT #include "timer_compat.c" #else #define snd_timer_user_ioctl_compat NULL #endif static const struct file_operations snd_timer_f_ops = { .owner = THIS_MODULE, .read = snd_timer_user_read, .open = snd_timer_user_open, .release = snd_timer_user_release, .llseek = no_llseek, .poll = snd_timer_user_poll, .unlocked_ioctl = snd_timer_user_ioctl, .compat_ioctl = snd_timer_user_ioctl_compat, .fasync = snd_timer_user_fasync, }; /* unregister the system timer */ static void snd_timer_free_all(void) { struct snd_timer *timer, *n; list_for_each_entry_safe(timer, n, &snd_timer_list, device_list) snd_timer_free(timer); } static struct device timer_dev; /* * ENTRY functions */ static int __init alsa_timer_init(void) { int err; snd_device_initialize(&timer_dev, NULL); dev_set_name(&timer_dev, "timer"); #ifdef SNDRV_OSS_INFO_DEV_TIMERS snd_oss_info_register(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1, "system timer"); #endif err = snd_timer_register_system(); if (err < 0) { pr_err("ALSA: unable to register system timer (%i)\n", err); put_device(&timer_dev); return err; } err = snd_register_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0, &snd_timer_f_ops, NULL, &timer_dev); if (err < 0) { pr_err("ALSA: unable to register timer device (%i)\n", err); snd_timer_free_all(); put_device(&timer_dev); return err; } snd_timer_proc_init(); return 0; } static void __exit alsa_timer_exit(void) { snd_unregister_device(&timer_dev); snd_timer_free_all(); put_device(&timer_dev); snd_timer_proc_done(); #ifdef SNDRV_OSS_INFO_DEV_TIMERS snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1); #endif } module_init(alsa_timer_init) module_exit(alsa_timer_exit) EXPORT_SYMBOL(snd_timer_open); EXPORT_SYMBOL(snd_timer_close); EXPORT_SYMBOL(snd_timer_resolution); EXPORT_SYMBOL(snd_timer_start); EXPORT_SYMBOL(snd_timer_stop); EXPORT_SYMBOL(snd_timer_continue); EXPORT_SYMBOL(snd_timer_pause); EXPORT_SYMBOL(snd_timer_new); EXPORT_SYMBOL(snd_timer_notify); EXPORT_SYMBOL(snd_timer_global_new); EXPORT_SYMBOL(snd_timer_global_free); EXPORT_SYMBOL(snd_timer_global_register); EXPORT_SYMBOL(snd_timer_interrupt);
./CrossVul/dataset_final_sorted/CWE-362/c/bad_4964_0
crossvul-cpp_data_bad_5214_0
/* * linux/fs/ioctl.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/syscalls.h> #include <linux/mm.h> #include <linux/capability.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/security.h> #include <linux/export.h> #include <linux/uaccess.h> #include <linux/writeback.h> #include <linux/buffer_head.h> #include <linux/falloc.h> #include "internal.h" #include <asm/ioctls.h> /* So that the fiemap access checks can't overflow on 32 bit machines. */ #define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent)) /** * vfs_ioctl - call filesystem specific ioctl methods * @filp: open file to invoke ioctl method on * @cmd: ioctl command to execute * @arg: command-specific argument for ioctl * * Invokes filesystem specific ->unlocked_ioctl, if one exists; otherwise * returns -ENOTTY. * * Returns 0 on success, -errno on error. */ long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int error = -ENOTTY; if (!filp->f_op->unlocked_ioctl) goto out; error = filp->f_op->unlocked_ioctl(filp, cmd, arg); if (error == -ENOIOCTLCMD) error = -ENOTTY; out: return error; } static int ioctl_fibmap(struct file *filp, int __user *p) { struct address_space *mapping = filp->f_mapping; int res, block; /* do we support this mess? */ if (!mapping->a_ops->bmap) return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; res = get_user(block, p); if (res) return res; res = mapping->a_ops->bmap(mapping, block); return put_user(res, p); } /** * fiemap_fill_next_extent - Fiemap helper function * @fieinfo: Fiemap context passed into ->fiemap * @logical: Extent logical start offset, in bytes * @phys: Extent physical start offset, in bytes * @len: Extent length, in bytes * @flags: FIEMAP_EXTENT flags that describe this extent * * Called from file system ->fiemap callback. Will populate extent * info as passed in via arguments and copy to user memory. On * success, extent count on fieinfo is incremented. * * Returns 0 on success, -errno on error, 1 if this was the last * extent that will fit in user array. */ #define SET_UNKNOWN_FLAGS (FIEMAP_EXTENT_DELALLOC) #define SET_NO_UNMOUNTED_IO_FLAGS (FIEMAP_EXTENT_DATA_ENCRYPTED) #define SET_NOT_ALIGNED_FLAGS (FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE) int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical, u64 phys, u64 len, u32 flags) { struct fiemap_extent extent; struct fiemap_extent __user *dest = fieinfo->fi_extents_start; /* only count the extents */ if (fieinfo->fi_extents_max == 0) { fieinfo->fi_extents_mapped++; return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0; } if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max) return 1; if (flags & SET_UNKNOWN_FLAGS) flags |= FIEMAP_EXTENT_UNKNOWN; if (flags & SET_NO_UNMOUNTED_IO_FLAGS) flags |= FIEMAP_EXTENT_ENCODED; if (flags & SET_NOT_ALIGNED_FLAGS) flags |= FIEMAP_EXTENT_NOT_ALIGNED; memset(&extent, 0, sizeof(extent)); extent.fe_logical = logical; extent.fe_physical = phys; extent.fe_length = len; extent.fe_flags = flags; dest += fieinfo->fi_extents_mapped; if (copy_to_user(dest, &extent, sizeof(extent))) return -EFAULT; fieinfo->fi_extents_mapped++; if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max) return 1; return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0; } EXPORT_SYMBOL(fiemap_fill_next_extent); /** * fiemap_check_flags - check validity of requested flags for fiemap * @fieinfo: Fiemap context passed into ->fiemap * @fs_flags: Set of fiemap flags that the file system understands * * Called from file system ->fiemap callback. This will compute the * intersection of valid fiemap flags and those that the fs supports. That * value is then compared against the user supplied flags. In case of bad user * flags, the invalid values will be written into the fieinfo structure, and * -EBADR is returned, which tells ioctl_fiemap() to return those values to * userspace. For this reason, a return code of -EBADR should be preserved. * * Returns 0 on success, -EBADR on bad flags. */ int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags) { u32 incompat_flags; incompat_flags = fieinfo->fi_flags & ~(FIEMAP_FLAGS_COMPAT & fs_flags); if (incompat_flags) { fieinfo->fi_flags = incompat_flags; return -EBADR; } return 0; } EXPORT_SYMBOL(fiemap_check_flags); static int fiemap_check_ranges(struct super_block *sb, u64 start, u64 len, u64 *new_len) { u64 maxbytes = (u64) sb->s_maxbytes; *new_len = len; if (len == 0) return -EINVAL; if (start > maxbytes) return -EFBIG; /* * Shrink request scope to what the fs can actually handle. */ if (len > maxbytes || (maxbytes - len) < start) *new_len = maxbytes - start; return 0; } static int ioctl_fiemap(struct file *filp, unsigned long arg) { struct fiemap fiemap; struct fiemap __user *ufiemap = (struct fiemap __user *) arg; struct fiemap_extent_info fieinfo = { 0, }; struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; u64 len; int error; if (!inode->i_op->fiemap) return -EOPNOTSUPP; if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap))) return -EFAULT; if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS) return -EINVAL; error = fiemap_check_ranges(sb, fiemap.fm_start, fiemap.fm_length, &len); if (error) return error; fieinfo.fi_flags = fiemap.fm_flags; fieinfo.fi_extents_max = fiemap.fm_extent_count; fieinfo.fi_extents_start = ufiemap->fm_extents; if (fiemap.fm_extent_count != 0 && !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start, fieinfo.fi_extents_max * sizeof(struct fiemap_extent))) return -EFAULT; if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC) filemap_write_and_wait(inode->i_mapping); error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len); fiemap.fm_flags = fieinfo.fi_flags; fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped; if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap))) error = -EFAULT; return error; } static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd, u64 off, u64 olen, u64 destoff) { struct fd src_file = fdget(srcfd); int ret; if (!src_file.file) return -EBADF; ret = vfs_clone_file_range(src_file.file, off, dst_file, destoff, olen); fdput(src_file); return ret; } static long ioctl_file_clone_range(struct file *file, void __user *argp) { struct file_clone_range args; if (copy_from_user(&args, argp, sizeof(args))) return -EFAULT; return ioctl_file_clone(file, args.src_fd, args.src_offset, args.src_length, args.dest_offset); } #ifdef CONFIG_BLOCK static inline sector_t logical_to_blk(struct inode *inode, loff_t offset) { return (offset >> inode->i_blkbits); } static inline loff_t blk_to_logical(struct inode *inode, sector_t blk) { return (blk << inode->i_blkbits); } /** * __generic_block_fiemap - FIEMAP for block based inodes (no locking) * @inode: the inode to map * @fieinfo: the fiemap info struct that will be passed back to userspace * @start: where to start mapping in the inode * @len: how much space to map * @get_block: the fs's get_block function * * This does FIEMAP for block based inodes. Basically it will just loop * through get_block until we hit the number of extents we want to map, or we * go past the end of the file and hit a hole. * * If it is possible to have data blocks beyond a hole past @inode->i_size, then * please do not use this function, it will stop at the first unmapped block * beyond i_size. * * If you use this function directly, you need to do your own locking. Use * generic_block_fiemap if you want the locking done for you. */ int __generic_block_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, loff_t start, loff_t len, get_block_t *get_block) { struct buffer_head map_bh; sector_t start_blk, last_blk; loff_t isize = i_size_read(inode); u64 logical = 0, phys = 0, size = 0; u32 flags = FIEMAP_EXTENT_MERGED; bool past_eof = false, whole_file = false; int ret = 0; ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); if (ret) return ret; /* * Either the i_mutex or other appropriate locking needs to be held * since we expect isize to not change at all through the duration of * this call. */ if (len >= isize) { whole_file = true; len = isize; } /* * Some filesystems can't deal with being asked to map less than * blocksize, so make sure our len is at least block length. */ if (logical_to_blk(inode, len) == 0) len = blk_to_logical(inode, 1); start_blk = logical_to_blk(inode, start); last_blk = logical_to_blk(inode, start + len - 1); do { /* * we set b_size to the total size we want so it will map as * many contiguous blocks as possible at once */ memset(&map_bh, 0, sizeof(struct buffer_head)); map_bh.b_size = len; ret = get_block(inode, start_blk, &map_bh, 0); if (ret) break; /* HOLE */ if (!buffer_mapped(&map_bh)) { start_blk++; /* * We want to handle the case where there is an * allocated block at the front of the file, and then * nothing but holes up to the end of the file properly, * to make sure that extent at the front gets properly * marked with FIEMAP_EXTENT_LAST */ if (!past_eof && blk_to_logical(inode, start_blk) >= isize) past_eof = 1; /* * First hole after going past the EOF, this is our * last extent */ if (past_eof && size) { flags = FIEMAP_EXTENT_MERGED|FIEMAP_EXTENT_LAST; ret = fiemap_fill_next_extent(fieinfo, logical, phys, size, flags); } else if (size) { ret = fiemap_fill_next_extent(fieinfo, logical, phys, size, flags); size = 0; } /* if we have holes up to/past EOF then we're done */ if (start_blk > last_blk || past_eof || ret) break; } else { /* * We have gone over the length of what we wanted to * map, and it wasn't the entire file, so add the extent * we got last time and exit. * * This is for the case where say we want to map all the * way up to the second to the last block in a file, but * the last block is a hole, making the second to last * block FIEMAP_EXTENT_LAST. In this case we want to * see if there is a hole after the second to last block * so we can mark it properly. If we found data after * we exceeded the length we were requesting, then we * are good to go, just add the extent to the fieinfo * and break */ if (start_blk > last_blk && !whole_file) { ret = fiemap_fill_next_extent(fieinfo, logical, phys, size, flags); break; } /* * if size != 0 then we know we already have an extent * to add, so add it. */ if (size) { ret = fiemap_fill_next_extent(fieinfo, logical, phys, size, flags); if (ret) break; } logical = blk_to_logical(inode, start_blk); phys = blk_to_logical(inode, map_bh.b_blocknr); size = map_bh.b_size; flags = FIEMAP_EXTENT_MERGED; start_blk += logical_to_blk(inode, size); /* * If we are past the EOF, then we need to make sure as * soon as we find a hole that the last extent we found * is marked with FIEMAP_EXTENT_LAST */ if (!past_eof && logical + size >= isize) past_eof = true; } cond_resched(); if (fatal_signal_pending(current)) { ret = -EINTR; break; } } while (1); /* If ret is 1 then we just hit the end of the extent array */ if (ret == 1) ret = 0; return ret; } EXPORT_SYMBOL(__generic_block_fiemap); /** * generic_block_fiemap - FIEMAP for block based inodes * @inode: The inode to map * @fieinfo: The mapping information * @start: The initial block to map * @len: The length of the extect to attempt to map * @get_block: The block mapping function for the fs * * Calls __generic_block_fiemap to map the inode, after taking * the inode's mutex lock. */ int generic_block_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len, get_block_t *get_block) { int ret; inode_lock(inode); ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block); inode_unlock(inode); return ret; } EXPORT_SYMBOL(generic_block_fiemap); #endif /* CONFIG_BLOCK */ /* * This provides compatibility with legacy XFS pre-allocation ioctls * which predate the fallocate syscall. * * Only the l_start, l_len and l_whence fields of the 'struct space_resv' * are used here, rest are ignored. */ int ioctl_preallocate(struct file *filp, void __user *argp) { struct inode *inode = file_inode(filp); struct space_resv sr; if (copy_from_user(&sr, argp, sizeof(sr))) return -EFAULT; switch (sr.l_whence) { case SEEK_SET: break; case SEEK_CUR: sr.l_start += filp->f_pos; break; case SEEK_END: sr.l_start += i_size_read(inode); break; default: return -EINVAL; } return vfs_fallocate(filp, FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len); } static int file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); int __user *p = (int __user *)arg; switch (cmd) { case FIBMAP: return ioctl_fibmap(filp, p); case FIONREAD: return put_user(i_size_read(inode) - filp->f_pos, p); case FS_IOC_RESVSP: case FS_IOC_RESVSP64: return ioctl_preallocate(filp, p); } return vfs_ioctl(filp, cmd, arg); } static int ioctl_fionbio(struct file *filp, int __user *argp) { unsigned int flag; int on, error; error = get_user(on, argp); if (error) return error; flag = O_NONBLOCK; #ifdef __sparc__ /* SunOS compatibility item. */ if (O_NONBLOCK != O_NDELAY) flag |= O_NDELAY; #endif spin_lock(&filp->f_lock); if (on) filp->f_flags |= flag; else filp->f_flags &= ~flag; spin_unlock(&filp->f_lock); return error; } static int ioctl_fioasync(unsigned int fd, struct file *filp, int __user *argp) { unsigned int flag; int on, error; error = get_user(on, argp); if (error) return error; flag = on ? FASYNC : 0; /* Did FASYNC state change ? */ if ((flag ^ filp->f_flags) & FASYNC) { if (filp->f_op->fasync) /* fasync() adjusts filp->f_flags */ error = filp->f_op->fasync(fd, filp, on); else error = -ENOTTY; } return error < 0 ? error : 0; } static int ioctl_fsfreeze(struct file *filp) { struct super_block *sb = file_inode(filp)->i_sb; if (!capable(CAP_SYS_ADMIN)) return -EPERM; /* If filesystem doesn't support freeze feature, return. */ if (sb->s_op->freeze_fs == NULL && sb->s_op->freeze_super == NULL) return -EOPNOTSUPP; /* Freeze */ if (sb->s_op->freeze_super) return sb->s_op->freeze_super(sb); return freeze_super(sb); } static int ioctl_fsthaw(struct file *filp) { struct super_block *sb = file_inode(filp)->i_sb; if (!capable(CAP_SYS_ADMIN)) return -EPERM; /* Thaw */ if (sb->s_op->thaw_super) return sb->s_op->thaw_super(sb); return thaw_super(sb); } static long ioctl_file_dedupe_range(struct file *file, void __user *arg) { struct file_dedupe_range __user *argp = arg; struct file_dedupe_range *same = NULL; int ret; unsigned long size; u16 count; if (get_user(count, &argp->dest_count)) { ret = -EFAULT; goto out; } size = offsetof(struct file_dedupe_range __user, info[count]); same = memdup_user(argp, size); if (IS_ERR(same)) { ret = PTR_ERR(same); same = NULL; goto out; } ret = vfs_dedupe_file_range(file, same); if (ret) goto out; ret = copy_to_user(argp, same, size); if (ret) ret = -EFAULT; out: kfree(same); return ret; } /* * When you add any new common ioctls to the switches above and below * please update compat_sys_ioctl() too. * * do_vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d. * It's just a simple helper for sys_ioctl and compat_sys_ioctl. */ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, unsigned long arg) { int error = 0; int __user *argp = (int __user *)arg; struct inode *inode = file_inode(filp); switch (cmd) { case FIOCLEX: set_close_on_exec(fd, 1); break; case FIONCLEX: set_close_on_exec(fd, 0); break; case FIONBIO: error = ioctl_fionbio(filp, argp); break; case FIOASYNC: error = ioctl_fioasync(fd, filp, argp); break; case FIOQSIZE: if (S_ISDIR(inode->i_mode) || S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) { loff_t res = inode_get_bytes(inode); error = copy_to_user(argp, &res, sizeof(res)) ? -EFAULT : 0; } else error = -ENOTTY; break; case FIFREEZE: error = ioctl_fsfreeze(filp); break; case FITHAW: error = ioctl_fsthaw(filp); break; case FS_IOC_FIEMAP: return ioctl_fiemap(filp, arg); case FIGETBSZ: return put_user(inode->i_sb->s_blocksize, argp); case FICLONE: return ioctl_file_clone(filp, arg, 0, 0, 0); case FICLONERANGE: return ioctl_file_clone_range(filp, argp); case FIDEDUPERANGE: return ioctl_file_dedupe_range(filp, argp); default: if (S_ISREG(inode->i_mode)) error = file_ioctl(filp, cmd, arg); else error = vfs_ioctl(filp, cmd, arg); break; } return error; } SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) { int error; struct fd f = fdget(fd); if (!f.file) return -EBADF; error = security_file_ioctl(f.file, cmd, arg); if (!error) error = do_vfs_ioctl(f.file, fd, cmd, arg); fdput(f); return error; }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_5214_0
crossvul-cpp_data_bad_5572_1
/* * linux/kernel/ptrace.c * * (C) Copyright 1999 Linus Torvalds * * Common interfaces for "ptrace()" which we do not want * to continually duplicate across every architecture. */ #include <linux/capability.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/ptrace.h> #include <linux/security.h> #include <linux/signal.h> #include <linux/audit.h> #include <linux/pid_namespace.h> #include <linux/syscalls.h> #include <linux/uaccess.h> #include <linux/regset.h> #include <linux/hw_breakpoint.h> #include <linux/cn_proc.h> static int ptrace_trapping_sleep_fn(void *flags) { schedule(); return 0; } /* * ptrace a task: make the debugger its new parent and * move it to the ptrace list. * * Must be called with the tasklist lock write-held. */ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) { BUG_ON(!list_empty(&child->ptrace_entry)); list_add(&child->ptrace_entry, &new_parent->ptraced); child->parent = new_parent; } /** * __ptrace_unlink - unlink ptracee and restore its execution state * @child: ptracee to be unlinked * * Remove @child from the ptrace list, move it back to the original parent, * and restore the execution state so that it conforms to the group stop * state. * * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer * exiting. For PTRACE_DETACH, unless the ptracee has been killed between * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. * If the ptracer is exiting, the ptracee can be in any state. * * After detach, the ptracee should be in a state which conforms to the * group stop. If the group is stopped or in the process of stopping, the * ptracee should be put into TASK_STOPPED; otherwise, it should be woken * up from TASK_TRACED. * * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, * it goes through TRACED -> RUNNING -> STOPPED transition which is similar * to but in the opposite direction of what happens while attaching to a * stopped task. However, in this direction, the intermediate RUNNING * state is not hidden even from the current ptracer and if it immediately * re-attaches and performs a WNOHANG wait(2), it may fail. * * CONTEXT: * write_lock_irq(tasklist_lock) */ void __ptrace_unlink(struct task_struct *child) { BUG_ON(!child->ptrace); child->ptrace = 0; child->parent = child->real_parent; list_del_init(&child->ptrace_entry); spin_lock(&child->sighand->siglock); /* * Clear all pending traps and TRAPPING. TRAPPING should be * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly. */ task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK); task_clear_jobctl_trapping(child); /* * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and * @child isn't dead. */ if (!(child->flags & PF_EXITING) && (child->signal->flags & SIGNAL_STOP_STOPPED || child->signal->group_stop_count)) { child->jobctl |= JOBCTL_STOP_PENDING; /* * This is only possible if this thread was cloned by the * traced task running in the stopped group, set the signal * for the future reports. * FIXME: we should change ptrace_init_task() to handle this * case. */ if (!(child->jobctl & JOBCTL_STOP_SIGMASK)) child->jobctl |= SIGSTOP; } /* * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick * @child in the butt. Note that @resume should be used iff @child * is in TASK_TRACED; otherwise, we might unduly disrupt * TASK_KILLABLE sleeps. */ if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) ptrace_signal_wake_up(child, true); spin_unlock(&child->sighand->siglock); } /** * ptrace_check_attach - check whether ptracee is ready for ptrace operation * @child: ptracee to check for * @ignore_state: don't check whether @child is currently %TASK_TRACED * * Check whether @child is being ptraced by %current and ready for further * ptrace operations. If @ignore_state is %false, @child also should be in * %TASK_TRACED state and on return the child is guaranteed to be traced * and not executing. If @ignore_state is %true, @child can be in any * state. * * CONTEXT: * Grabs and releases tasklist_lock and @child->sighand->siglock. * * RETURNS: * 0 on success, -ESRCH if %child is not ready. */ static int ptrace_check_attach(struct task_struct *child, bool ignore_state) { int ret = -ESRCH; /* * We take the read lock around doing both checks to close a * possible race where someone else was tracing our child and * detached between these two checks. After this locked check, * we are sure that this is our traced child and that can only * be changed by us so it's not changing right after this. */ read_lock(&tasklist_lock); if ((child->ptrace & PT_PTRACED) && child->parent == current) { /* * child->sighand can't be NULL, release_task() * does ptrace_unlink() before __exit_signal(). */ spin_lock_irq(&child->sighand->siglock); WARN_ON_ONCE(task_is_stopped(child)); if (ignore_state || (task_is_traced(child) && !(child->jobctl & JOBCTL_LISTENING))) ret = 0; spin_unlock_irq(&child->sighand->siglock); } read_unlock(&tasklist_lock); if (!ret && !ignore_state) ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; /* All systems go.. */ return ret; } static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) { if (mode & PTRACE_MODE_NOAUDIT) return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); else return has_ns_capability(current, ns, CAP_SYS_PTRACE); } /* Returns 0 on success, -errno on denial. */ static int __ptrace_may_access(struct task_struct *task, unsigned int mode) { const struct cred *cred = current_cred(), *tcred; /* May we inspect the given task? * This check is used both for attaching with ptrace * and for allowing access to sensitive information in /proc. * * ptrace_attach denies several cases that /proc allows * because setting up the necessary parent/child relationship * or halting the specified task is impossible. */ int dumpable = 0; /* Don't let security modules deny introspection */ if (task == current) return 0; rcu_read_lock(); tcred = __task_cred(task); if (uid_eq(cred->uid, tcred->euid) && uid_eq(cred->uid, tcred->suid) && uid_eq(cred->uid, tcred->uid) && gid_eq(cred->gid, tcred->egid) && gid_eq(cred->gid, tcred->sgid) && gid_eq(cred->gid, tcred->gid)) goto ok; if (ptrace_has_cap(tcred->user_ns, mode)) goto ok; rcu_read_unlock(); return -EPERM; ok: rcu_read_unlock(); smp_rmb(); if (task->mm) dumpable = get_dumpable(task->mm); rcu_read_lock(); if (!dumpable && !ptrace_has_cap(__task_cred(task)->user_ns, mode)) { rcu_read_unlock(); return -EPERM; } rcu_read_unlock(); return security_ptrace_access_check(task, mode); } bool ptrace_may_access(struct task_struct *task, unsigned int mode) { int err; task_lock(task); err = __ptrace_may_access(task, mode); task_unlock(task); return !err; } static int ptrace_attach(struct task_struct *task, long request, unsigned long addr, unsigned long flags) { bool seize = (request == PTRACE_SEIZE); int retval; retval = -EIO; if (seize) { if (addr != 0) goto out; if (flags & ~(unsigned long)PTRACE_O_MASK) goto out; flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT); } else { flags = PT_PTRACED; } audit_ptrace(task); retval = -EPERM; if (unlikely(task->flags & PF_KTHREAD)) goto out; if (same_thread_group(task, current)) goto out; /* * Protect exec's credential calculations against our interference; * SUID, SGID and LSM creds get determined differently * under ptrace. */ retval = -ERESTARTNOINTR; if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) goto out; task_lock(task); retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); task_unlock(task); if (retval) goto unlock_creds; write_lock_irq(&tasklist_lock); retval = -EPERM; if (unlikely(task->exit_state)) goto unlock_tasklist; if (task->ptrace) goto unlock_tasklist; if (seize) flags |= PT_SEIZED; rcu_read_lock(); if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE)) flags |= PT_PTRACE_CAP; rcu_read_unlock(); task->ptrace = flags; __ptrace_link(task, current); /* SEIZE doesn't trap tracee on attach */ if (!seize) send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); spin_lock(&task->sighand->siglock); /* * If the task is already STOPPED, set JOBCTL_TRAP_STOP and * TRAPPING, and kick it so that it transits to TRACED. TRAPPING * will be cleared if the child completes the transition or any * event which clears the group stop states happens. We'll wait * for the transition to complete before returning from this * function. * * This hides STOPPED -> RUNNING -> TRACED transition from the * attaching thread but a different thread in the same group can * still observe the transient RUNNING state. IOW, if another * thread's WNOHANG wait(2) on the stopped tracee races against * ATTACH, the wait(2) may fail due to the transient RUNNING. * * The following task_is_stopped() test is safe as both transitions * in and out of STOPPED are protected by siglock. */ if (task_is_stopped(task) && task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) signal_wake_up_state(task, __TASK_STOPPED); spin_unlock(&task->sighand->siglock); retval = 0; unlock_tasklist: write_unlock_irq(&tasklist_lock); unlock_creds: mutex_unlock(&task->signal->cred_guard_mutex); out: if (!retval) { wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE); proc_ptrace_connector(task, PTRACE_ATTACH); } return retval; } /** * ptrace_traceme -- helper for PTRACE_TRACEME * * Performs checks and sets PT_PTRACED. * Should be used by all ptrace implementations for PTRACE_TRACEME. */ static int ptrace_traceme(void) { int ret = -EPERM; write_lock_irq(&tasklist_lock); /* Are we already being traced? */ if (!current->ptrace) { ret = security_ptrace_traceme(current->parent); /* * Check PF_EXITING to ensure ->real_parent has not passed * exit_ptrace(). Otherwise we don't report the error but * pretend ->real_parent untraces us right after return. */ if (!ret && !(current->real_parent->flags & PF_EXITING)) { current->ptrace = PT_PTRACED; __ptrace_link(current, current->real_parent); } } write_unlock_irq(&tasklist_lock); return ret; } /* * Called with irqs disabled, returns true if childs should reap themselves. */ static int ignoring_children(struct sighand_struct *sigh) { int ret; spin_lock(&sigh->siglock); ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); spin_unlock(&sigh->siglock); return ret; } /* * Called with tasklist_lock held for writing. * Unlink a traced task, and clean it up if it was a traced zombie. * Return true if it needs to be reaped with release_task(). * (We can't call release_task() here because we already hold tasklist_lock.) * * If it's a zombie, our attachedness prevented normal parent notification * or self-reaping. Do notification now if it would have happened earlier. * If it should reap itself, return true. * * If it's our own child, there is no notification to do. But if our normal * children self-reap, then this child was prevented by ptrace and we must * reap it now, in that case we must also wake up sub-threads sleeping in * do_wait(). */ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) { bool dead; __ptrace_unlink(p); if (p->exit_state != EXIT_ZOMBIE) return false; dead = !thread_group_leader(p); if (!dead && thread_group_empty(p)) { if (!same_thread_group(p->real_parent, tracer)) dead = do_notify_parent(p, p->exit_signal); else if (ignoring_children(tracer->sighand)) { __wake_up_parent(p, tracer); dead = true; } } /* Mark it as in the process of being reaped. */ if (dead) p->exit_state = EXIT_DEAD; return dead; } static int ptrace_detach(struct task_struct *child, unsigned int data) { bool dead = false; if (!valid_signal(data)) return -EIO; /* Architecture-specific hardware disable .. */ ptrace_disable(child); clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); write_lock_irq(&tasklist_lock); /* * This child can be already killed. Make sure de_thread() or * our sub-thread doing do_wait() didn't do release_task() yet. */ if (child->ptrace) { child->exit_code = data; dead = __ptrace_detach(current, child); } write_unlock_irq(&tasklist_lock); proc_ptrace_connector(child, PTRACE_DETACH); if (unlikely(dead)) release_task(child); return 0; } /* * Detach all tasks we were using ptrace on. Called with tasklist held * for writing, and returns with it held too. But note it can release * and reacquire the lock. */ void exit_ptrace(struct task_struct *tracer) __releases(&tasklist_lock) __acquires(&tasklist_lock) { struct task_struct *p, *n; LIST_HEAD(ptrace_dead); if (likely(list_empty(&tracer->ptraced))) return; list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { if (unlikely(p->ptrace & PT_EXITKILL)) send_sig_info(SIGKILL, SEND_SIG_FORCED, p); if (__ptrace_detach(tracer, p)) list_add(&p->ptrace_entry, &ptrace_dead); } write_unlock_irq(&tasklist_lock); BUG_ON(!list_empty(&tracer->ptraced)); list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { list_del_init(&p->ptrace_entry); release_task(p); } write_lock_irq(&tasklist_lock); } int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) { int copied = 0; while (len > 0) { char buf[128]; int this_len, retval; this_len = (len > sizeof(buf)) ? sizeof(buf) : len; retval = access_process_vm(tsk, src, buf, this_len, 0); if (!retval) { if (copied) break; return -EIO; } if (copy_to_user(dst, buf, retval)) return -EFAULT; copied += retval; src += retval; dst += retval; len -= retval; } return copied; } int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) { int copied = 0; while (len > 0) { char buf[128]; int this_len, retval; this_len = (len > sizeof(buf)) ? sizeof(buf) : len; if (copy_from_user(buf, src, this_len)) return -EFAULT; retval = access_process_vm(tsk, dst, buf, this_len, 1); if (!retval) { if (copied) break; return -EIO; } copied += retval; src += retval; dst += retval; len -= retval; } return copied; } static int ptrace_setoptions(struct task_struct *child, unsigned long data) { unsigned flags; if (data & ~(unsigned long)PTRACE_O_MASK) return -EINVAL; /* Avoid intermediate state when all opts are cleared */ flags = child->ptrace; flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT); flags |= (data << PT_OPT_FLAG_SHIFT); child->ptrace = flags; return 0; } static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) { unsigned long flags; int error = -ESRCH; if (lock_task_sighand(child, &flags)) { error = -EINVAL; if (likely(child->last_siginfo != NULL)) { *info = *child->last_siginfo; error = 0; } unlock_task_sighand(child, &flags); } return error; } static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) { unsigned long flags; int error = -ESRCH; if (lock_task_sighand(child, &flags)) { error = -EINVAL; if (likely(child->last_siginfo != NULL)) { *child->last_siginfo = *info; error = 0; } unlock_task_sighand(child, &flags); } return error; } #ifdef PTRACE_SINGLESTEP #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) #else #define is_singlestep(request) 0 #endif #ifdef PTRACE_SINGLEBLOCK #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) #else #define is_singleblock(request) 0 #endif #ifdef PTRACE_SYSEMU #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) #else #define is_sysemu_singlestep(request) 0 #endif static int ptrace_resume(struct task_struct *child, long request, unsigned long data) { if (!valid_signal(data)) return -EIO; if (request == PTRACE_SYSCALL) set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); else clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); #ifdef TIF_SYSCALL_EMU if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) set_tsk_thread_flag(child, TIF_SYSCALL_EMU); else clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); #endif if (is_singleblock(request)) { if (unlikely(!arch_has_block_step())) return -EIO; user_enable_block_step(child); } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { if (unlikely(!arch_has_single_step())) return -EIO; user_enable_single_step(child); } else { user_disable_single_step(child); } child->exit_code = data; wake_up_state(child, __TASK_TRACED); return 0; } #ifdef CONFIG_HAVE_ARCH_TRACEHOOK static const struct user_regset * find_regset(const struct user_regset_view *view, unsigned int type) { const struct user_regset *regset; int n; for (n = 0; n < view->n; ++n) { regset = view->regsets + n; if (regset->core_note_type == type) return regset; } return NULL; } static int ptrace_regset(struct task_struct *task, int req, unsigned int type, struct iovec *kiov) { const struct user_regset_view *view = task_user_regset_view(task); const struct user_regset *regset = find_regset(view, type); int regset_no; if (!regset || (kiov->iov_len % regset->size) != 0) return -EINVAL; regset_no = regset - view->regsets; kiov->iov_len = min(kiov->iov_len, (__kernel_size_t) (regset->n * regset->size)); if (req == PTRACE_GETREGSET) return copy_regset_to_user(task, view, regset_no, 0, kiov->iov_len, kiov->iov_base); else return copy_regset_from_user(task, view, regset_no, 0, kiov->iov_len, kiov->iov_base); } #endif int ptrace_request(struct task_struct *child, long request, unsigned long addr, unsigned long data) { bool seized = child->ptrace & PT_SEIZED; int ret = -EIO; siginfo_t siginfo, *si; void __user *datavp = (void __user *) data; unsigned long __user *datalp = datavp; unsigned long flags; switch (request) { case PTRACE_PEEKTEXT: case PTRACE_PEEKDATA: return generic_ptrace_peekdata(child, addr, data); case PTRACE_POKETEXT: case PTRACE_POKEDATA: return generic_ptrace_pokedata(child, addr, data); #ifdef PTRACE_OLDSETOPTIONS case PTRACE_OLDSETOPTIONS: #endif case PTRACE_SETOPTIONS: ret = ptrace_setoptions(child, data); break; case PTRACE_GETEVENTMSG: ret = put_user(child->ptrace_message, datalp); break; case PTRACE_GETSIGINFO: ret = ptrace_getsiginfo(child, &siginfo); if (!ret) ret = copy_siginfo_to_user(datavp, &siginfo); break; case PTRACE_SETSIGINFO: if (copy_from_user(&siginfo, datavp, sizeof siginfo)) ret = -EFAULT; else ret = ptrace_setsiginfo(child, &siginfo); break; case PTRACE_INTERRUPT: /* * Stop tracee without any side-effect on signal or job * control. At least one trap is guaranteed to happen * after this request. If @child is already trapped, the * current trap is not disturbed and another trap will * happen after the current trap is ended with PTRACE_CONT. * * The actual trap might not be PTRACE_EVENT_STOP trap but * the pending condition is cleared regardless. */ if (unlikely(!seized || !lock_task_sighand(child, &flags))) break; /* * INTERRUPT doesn't disturb existing trap sans one * exception. If ptracer issued LISTEN for the current * STOP, this INTERRUPT should clear LISTEN and re-trap * tracee into STOP. */ if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); unlock_task_sighand(child, &flags); ret = 0; break; case PTRACE_LISTEN: /* * Listen for events. Tracee must be in STOP. It's not * resumed per-se but is not considered to be in TRACED by * wait(2) or ptrace(2). If an async event (e.g. group * stop state change) happens, tracee will enter STOP trap * again. Alternatively, ptracer can issue INTERRUPT to * finish listening and re-trap tracee into STOP. */ if (unlikely(!seized || !lock_task_sighand(child, &flags))) break; si = child->last_siginfo; if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { child->jobctl |= JOBCTL_LISTENING; /* * If NOTIFY is set, it means event happened between * start of this trap and now. Trigger re-trap. */ if (child->jobctl & JOBCTL_TRAP_NOTIFY) ptrace_signal_wake_up(child, true); ret = 0; } unlock_task_sighand(child, &flags); break; case PTRACE_DETACH: /* detach a process that was attached. */ ret = ptrace_detach(child, data); break; #ifdef CONFIG_BINFMT_ELF_FDPIC case PTRACE_GETFDPIC: { struct mm_struct *mm = get_task_mm(child); unsigned long tmp = 0; ret = -ESRCH; if (!mm) break; switch (addr) { case PTRACE_GETFDPIC_EXEC: tmp = mm->context.exec_fdpic_loadmap; break; case PTRACE_GETFDPIC_INTERP: tmp = mm->context.interp_fdpic_loadmap; break; default: break; } mmput(mm); ret = put_user(tmp, datalp); break; } #endif #ifdef PTRACE_SINGLESTEP case PTRACE_SINGLESTEP: #endif #ifdef PTRACE_SINGLEBLOCK case PTRACE_SINGLEBLOCK: #endif #ifdef PTRACE_SYSEMU case PTRACE_SYSEMU: case PTRACE_SYSEMU_SINGLESTEP: #endif case PTRACE_SYSCALL: case PTRACE_CONT: return ptrace_resume(child, request, data); case PTRACE_KILL: if (child->exit_state) /* already dead */ return 0; return ptrace_resume(child, request, SIGKILL); #ifdef CONFIG_HAVE_ARCH_TRACEHOOK case PTRACE_GETREGSET: case PTRACE_SETREGSET: { struct iovec kiov; struct iovec __user *uiov = datavp; if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) return -EFAULT; if (__get_user(kiov.iov_base, &uiov->iov_base) || __get_user(kiov.iov_len, &uiov->iov_len)) return -EFAULT; ret = ptrace_regset(child, request, addr, &kiov); if (!ret) ret = __put_user(kiov.iov_len, &uiov->iov_len); break; } #endif default: break; } return ret; } static struct task_struct *ptrace_get_task_struct(pid_t pid) { struct task_struct *child; rcu_read_lock(); child = find_task_by_vpid(pid); if (child) get_task_struct(child); rcu_read_unlock(); if (!child) return ERR_PTR(-ESRCH); return child; } #ifndef arch_ptrace_attach #define arch_ptrace_attach(child) do { } while (0) #endif SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, unsigned long, data) { struct task_struct *child; long ret; if (request == PTRACE_TRACEME) { ret = ptrace_traceme(); if (!ret) arch_ptrace_attach(current); goto out; } child = ptrace_get_task_struct(pid); if (IS_ERR(child)) { ret = PTR_ERR(child); goto out; } if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { ret = ptrace_attach(child, request, addr, data); /* * Some architectures need to do book-keeping after * a ptrace attach. */ if (!ret) arch_ptrace_attach(child); goto out_put_task_struct; } ret = ptrace_check_attach(child, request == PTRACE_KILL || request == PTRACE_INTERRUPT); if (ret < 0) goto out_put_task_struct; ret = arch_ptrace(child, request, addr, data); out_put_task_struct: put_task_struct(child); out: return ret; } int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, unsigned long data) { unsigned long tmp; int copied; copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); if (copied != sizeof(tmp)) return -EIO; return put_user(tmp, (unsigned long __user *)data); } int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, unsigned long data) { int copied; copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); return (copied == sizeof(data)) ? 0 : -EIO; } #if defined CONFIG_COMPAT #include <linux/compat.h> int compat_ptrace_request(struct task_struct *child, compat_long_t request, compat_ulong_t addr, compat_ulong_t data) { compat_ulong_t __user *datap = compat_ptr(data); compat_ulong_t word; siginfo_t siginfo; int ret; switch (request) { case PTRACE_PEEKTEXT: case PTRACE_PEEKDATA: ret = access_process_vm(child, addr, &word, sizeof(word), 0); if (ret != sizeof(word)) ret = -EIO; else ret = put_user(word, datap); break; case PTRACE_POKETEXT: case PTRACE_POKEDATA: ret = access_process_vm(child, addr, &data, sizeof(data), 1); ret = (ret != sizeof(data) ? -EIO : 0); break; case PTRACE_GETEVENTMSG: ret = put_user((compat_ulong_t) child->ptrace_message, datap); break; case PTRACE_GETSIGINFO: ret = ptrace_getsiginfo(child, &siginfo); if (!ret) ret = copy_siginfo_to_user32( (struct compat_siginfo __user *) datap, &siginfo); break; case PTRACE_SETSIGINFO: memset(&siginfo, 0, sizeof siginfo); if (copy_siginfo_from_user32( &siginfo, (struct compat_siginfo __user *) datap)) ret = -EFAULT; else ret = ptrace_setsiginfo(child, &siginfo); break; #ifdef CONFIG_HAVE_ARCH_TRACEHOOK case PTRACE_GETREGSET: case PTRACE_SETREGSET: { struct iovec kiov; struct compat_iovec __user *uiov = (struct compat_iovec __user *) datap; compat_uptr_t ptr; compat_size_t len; if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) return -EFAULT; if (__get_user(ptr, &uiov->iov_base) || __get_user(len, &uiov->iov_len)) return -EFAULT; kiov.iov_base = compat_ptr(ptr); kiov.iov_len = len; ret = ptrace_regset(child, request, addr, &kiov); if (!ret) ret = __put_user(kiov.iov_len, &uiov->iov_len); break; } #endif default: ret = ptrace_request(child, request, addr, data); } return ret; } asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, compat_long_t addr, compat_long_t data) { struct task_struct *child; long ret; if (request == PTRACE_TRACEME) { ret = ptrace_traceme(); goto out; } child = ptrace_get_task_struct(pid); if (IS_ERR(child)) { ret = PTR_ERR(child); goto out; } if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { ret = ptrace_attach(child, request, addr, data); /* * Some architectures need to do book-keeping after * a ptrace attach. */ if (!ret) arch_ptrace_attach(child); goto out_put_task_struct; } ret = ptrace_check_attach(child, request == PTRACE_KILL || request == PTRACE_INTERRUPT); if (!ret) ret = compat_arch_ptrace(child, request, addr, data); out_put_task_struct: put_task_struct(child); out: return ret; } #endif /* CONFIG_COMPAT */ #ifdef CONFIG_HAVE_HW_BREAKPOINT int ptrace_get_breakpoints(struct task_struct *tsk) { if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt)) return 0; return -1; } void ptrace_put_breakpoints(struct task_struct *tsk) { if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt)) flush_ptrace_hw_breakpoint(tsk); } #endif /* CONFIG_HAVE_HW_BREAKPOINT */
./CrossVul/dataset_final_sorted/CWE-362/c/bad_5572_1
crossvul-cpp_data_bad_4726_0
/* * Kernel-based Virtual Machine driver for Linux * * derived from drivers/kvm/kvm_main.c * * Copyright (C) 2006 Qumranet, Inc. * Copyright (C) 2008 Qumranet, Inc. * Copyright IBM Corporation, 2008 * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * Amit Shah <amit.shah@qumranet.com> * Ben-Ami Yassour <benami@il.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include <linux/kvm_host.h> #include "irq.h" #include "mmu.h" #include "i8254.h" #include "tss.h" #include "kvm_cache_regs.h" #include "x86.h" #include <linux/clocksource.h> #include <linux/interrupt.h> #include <linux/kvm.h> #include <linux/fs.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/mman.h> #include <linux/highmem.h> #include <linux/iommu.h> #include <linux/intel-iommu.h> #include <linux/cpufreq.h> #include <linux/user-return-notifier.h> #include <linux/srcu.h> #include <linux/slab.h> #include <linux/perf_event.h> #include <linux/uaccess.h> #include <linux/hash.h> #include <trace/events/kvm.h> #define CREATE_TRACE_POINTS #include "trace.h" #include <asm/debugreg.h> #include <asm/msr.h> #include <asm/desc.h> #include <asm/mtrr.h> #include <asm/mce.h> #include <asm/i387.h> #include <asm/xcr.h> #include <asm/pvclock.h> #include <asm/div64.h> #define MAX_IO_MSRS 256 #define CR0_RESERVED_BITS \ (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) #define CR4_RESERVED_BITS \ (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \ | X86_CR4_OSXSAVE \ | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) #define KVM_MAX_MCE_BANKS 32 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P) /* EFER defaults: * - enable syscall per default because its emulated by KVM * - enable LME and LMA per default on 64 bit KVM */ #ifdef CONFIG_X86_64 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL; #else static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL; #endif #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU static void update_cr8_intercept(struct kvm_vcpu *vcpu); static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries); struct kvm_x86_ops *kvm_x86_ops; EXPORT_SYMBOL_GPL(kvm_x86_ops); int ignore_msrs = 0; module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR); #define KVM_NR_SHARED_MSRS 16 struct kvm_shared_msrs_global { int nr; u32 msrs[KVM_NR_SHARED_MSRS]; }; struct kvm_shared_msrs { struct user_return_notifier urn; bool registered; struct kvm_shared_msr_values { u64 host; u64 curr; } values[KVM_NR_SHARED_MSRS]; }; static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs); struct kvm_stats_debugfs_item debugfs_entries[] = { { "pf_fixed", VCPU_STAT(pf_fixed) }, { "pf_guest", VCPU_STAT(pf_guest) }, { "tlb_flush", VCPU_STAT(tlb_flush) }, { "invlpg", VCPU_STAT(invlpg) }, { "exits", VCPU_STAT(exits) }, { "io_exits", VCPU_STAT(io_exits) }, { "mmio_exits", VCPU_STAT(mmio_exits) }, { "signal_exits", VCPU_STAT(signal_exits) }, { "irq_window", VCPU_STAT(irq_window_exits) }, { "nmi_window", VCPU_STAT(nmi_window_exits) }, { "halt_exits", VCPU_STAT(halt_exits) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "hypercalls", VCPU_STAT(hypercalls) }, { "request_irq", VCPU_STAT(request_irq_exits) }, { "irq_exits", VCPU_STAT(irq_exits) }, { "host_state_reload", VCPU_STAT(host_state_reload) }, { "efer_reload", VCPU_STAT(efer_reload) }, { "fpu_reload", VCPU_STAT(fpu_reload) }, { "insn_emulation", VCPU_STAT(insn_emulation) }, { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, { "irq_injections", VCPU_STAT(irq_injections) }, { "nmi_injections", VCPU_STAT(nmi_injections) }, { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, { "mmu_pte_write", VM_STAT(mmu_pte_write) }, { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) }, { "mmu_flooded", VM_STAT(mmu_flooded) }, { "mmu_recycled", VM_STAT(mmu_recycled) }, { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, { "mmu_unsync", VM_STAT(mmu_unsync) }, { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, { "largepages", VM_STAT(lpages) }, { NULL } }; u64 __read_mostly host_xcr0; static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) { int i; for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++) vcpu->arch.apf.gfns[i] = ~0; } static void kvm_on_user_return(struct user_return_notifier *urn) { unsigned slot; struct kvm_shared_msrs *locals = container_of(urn, struct kvm_shared_msrs, urn); struct kvm_shared_msr_values *values; for (slot = 0; slot < shared_msrs_global.nr; ++slot) { values = &locals->values[slot]; if (values->host != values->curr) { wrmsrl(shared_msrs_global.msrs[slot], values->host); values->curr = values->host; } } locals->registered = false; user_return_notifier_unregister(urn); } static void shared_msr_update(unsigned slot, u32 msr) { struct kvm_shared_msrs *smsr; u64 value; smsr = &__get_cpu_var(shared_msrs); /* only read, and nobody should modify it at this time, * so don't need lock */ if (slot >= shared_msrs_global.nr) { printk(KERN_ERR "kvm: invalid MSR slot!"); return; } rdmsrl_safe(msr, &value); smsr->values[slot].host = value; smsr->values[slot].curr = value; } void kvm_define_shared_msr(unsigned slot, u32 msr) { if (slot >= shared_msrs_global.nr) shared_msrs_global.nr = slot + 1; shared_msrs_global.msrs[slot] = msr; /* we need ensured the shared_msr_global have been updated */ smp_wmb(); } EXPORT_SYMBOL_GPL(kvm_define_shared_msr); static void kvm_shared_msr_cpu_online(void) { unsigned i; for (i = 0; i < shared_msrs_global.nr; ++i) shared_msr_update(i, shared_msrs_global.msrs[i]); } void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) { struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); if (((value ^ smsr->values[slot].curr) & mask) == 0) return; smsr->values[slot].curr = value; wrmsrl(shared_msrs_global.msrs[slot], value); if (!smsr->registered) { smsr->urn.on_user_return = kvm_on_user_return; user_return_notifier_register(&smsr->urn); smsr->registered = true; } } EXPORT_SYMBOL_GPL(kvm_set_shared_msr); static void drop_user_return_notifiers(void *ignore) { struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); if (smsr->registered) kvm_on_user_return(&smsr->urn); } u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) { if (irqchip_in_kernel(vcpu->kvm)) return vcpu->arch.apic_base; else return vcpu->arch.apic_base; } EXPORT_SYMBOL_GPL(kvm_get_apic_base); void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) { /* TODO: reserve bits check */ if (irqchip_in_kernel(vcpu->kvm)) kvm_lapic_set_base(vcpu, data); else vcpu->arch.apic_base = data; } EXPORT_SYMBOL_GPL(kvm_set_apic_base); #define EXCPT_BENIGN 0 #define EXCPT_CONTRIBUTORY 1 #define EXCPT_PF 2 static int exception_class(int vector) { switch (vector) { case PF_VECTOR: return EXCPT_PF; case DE_VECTOR: case TS_VECTOR: case NP_VECTOR: case SS_VECTOR: case GP_VECTOR: return EXCPT_CONTRIBUTORY; default: break; } return EXCPT_BENIGN; } static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error, u32 error_code, bool reinject) { u32 prev_nr; int class1, class2; kvm_make_request(KVM_REQ_EVENT, vcpu); if (!vcpu->arch.exception.pending) { queue: vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = has_error; vcpu->arch.exception.nr = nr; vcpu->arch.exception.error_code = error_code; vcpu->arch.exception.reinject = reinject; return; } /* to check exception */ prev_nr = vcpu->arch.exception.nr; if (prev_nr == DF_VECTOR) { /* triple fault -> shutdown */ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } class1 = exception_class(prev_nr); class2 = exception_class(nr); if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { /* generate double fault per SDM Table 5-5 */ vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = true; vcpu->arch.exception.nr = DF_VECTOR; vcpu->arch.exception.error_code = 0; } else /* replace previous exception with a new one in a hope that instruction re-execution will regenerate lost exception */ goto queue; } void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception); void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception); void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { ++vcpu->stat.pf_guest; vcpu->arch.cr2 = fault->address; kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); } void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { if (mmu_is_nested(vcpu) && !fault->nested_page_fault) vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); else vcpu->arch.mmu.inject_page_fault(vcpu, fault); } void kvm_inject_nmi(struct kvm_vcpu *vcpu) { kvm_make_request(KVM_REQ_EVENT, vcpu); vcpu->arch.nmi_pending = 1; } EXPORT_SYMBOL_GPL(kvm_inject_nmi); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception_e); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); /* * Checks if cpl <= required_cpl; if true, return true. Otherwise queue * a #GP and return false. */ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) { if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) return true; kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return false; } EXPORT_SYMBOL_GPL(kvm_require_cpl); /* * This function will be used to read from the physical memory of the currently * running guest. The difference to kvm_read_guest_page is that this function * can read from guest physical or from the guest's guest physical memory. */ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gfn_t ngfn, void *data, int offset, int len, u32 access) { gfn_t real_gfn; gpa_t ngpa; ngpa = gfn_to_gpa(ngfn); real_gfn = mmu->translate_gpa(vcpu, ngpa, access); if (real_gfn == UNMAPPED_GVA) return -EFAULT; real_gfn = gpa_to_gfn(real_gfn); return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len); } EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len, u32 access) { return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, data, offset, len, access); } /* * Load the pae pdptrs. Return true is they are all valid. */ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) { gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; int i; int ret; u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, offset * sizeof(u64), sizeof(pdpte), PFERR_USER_MASK|PFERR_WRITE_MASK); if (ret < 0) { ret = 0; goto out; } for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { if (is_present_gpte(pdpte[i]) && (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { ret = 0; goto out; } } ret = 1; memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty); out: return ret; } EXPORT_SYMBOL_GPL(load_pdptrs); static bool pdptrs_changed(struct kvm_vcpu *vcpu) { u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; bool changed = true; int offset; gfn_t gfn; int r; if (is_long_mode(vcpu) || !is_pae(vcpu)) return false; if (!test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail)) return true; gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT; offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1); r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), PFERR_USER_MASK | PFERR_WRITE_MASK); if (r < 0) goto out; changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; out: return changed; } int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { unsigned long old_cr0 = kvm_read_cr0(vcpu); unsigned long update_bits = X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW; cr0 |= X86_CR0_ET; #ifdef CONFIG_X86_64 if (cr0 & 0xffffffff00000000UL) return 1; #endif cr0 &= ~CR0_RESERVED_BITS; if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) return 1; if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) return 1; if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { #ifdef CONFIG_X86_64 if ((vcpu->arch.efer & EFER_LME)) { int cs_db, cs_l; if (!is_pae(vcpu)) return 1; kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); if (cs_l) return 1; } else #endif if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)) return 1; } kvm_x86_ops->set_cr0(vcpu, cr0); if ((cr0 ^ old_cr0) & X86_CR0_PG) kvm_clear_async_pf_completion_queue(vcpu); if ((cr0 ^ old_cr0) & update_bits) kvm_mmu_reset_context(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr0); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) { (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); } EXPORT_SYMBOL_GPL(kvm_lmsw); int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { u64 xcr0; /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ if (index != XCR_XFEATURE_ENABLED_MASK) return 1; xcr0 = xcr; if (kvm_x86_ops->get_cpl(vcpu) != 0) return 1; if (!(xcr0 & XSTATE_FP)) return 1; if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) return 1; if (xcr0 & ~host_xcr0) return 1; vcpu->arch.xcr0 = xcr0; vcpu->guest_xcr0_loaded = 0; return 0; } int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { if (__kvm_set_xcr(vcpu, index, xcr)) { kvm_inject_gp(vcpu, 0); return 1; } return 0; } EXPORT_SYMBOL_GPL(kvm_set_xcr); static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; best = kvm_find_cpuid_entry(vcpu, 1, 0); return best && (best->ecx & bit(X86_FEATURE_XSAVE)); } static void update_cpuid(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; best = kvm_find_cpuid_entry(vcpu, 1, 0); if (!best) return; /* Update OSXSAVE bit */ if (cpu_has_xsave && best->function == 0x1) { best->ecx &= ~(bit(X86_FEATURE_OSXSAVE)); if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) best->ecx |= bit(X86_FEATURE_OSXSAVE); } } int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; if (cr4 & CR4_RESERVED_BITS) return 1; if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) return 1; if (is_long_mode(vcpu)) { if (!(cr4 & X86_CR4_PAE)) return 1; } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) && ((cr4 ^ old_cr4) & pdptr_bits) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)) return 1; if (cr4 & X86_CR4_VMXE) return 1; kvm_x86_ops->set_cr4(vcpu, cr4); if ((cr4 ^ old_cr4) & pdptr_bits) kvm_mmu_reset_context(vcpu); if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) update_cpuid(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr4); int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) { kvm_mmu_sync_roots(vcpu); kvm_mmu_flush_tlb(vcpu); return 0; } if (is_long_mode(vcpu)) { if (cr3 & CR3_L_MODE_RESERVED_BITS) return 1; } else { if (is_pae(vcpu)) { if (cr3 & CR3_PAE_RESERVED_BITS) return 1; if (is_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) return 1; } /* * We don't check reserved bits in nonpae mode, because * this isn't enforced, and VMware depends on this. */ } /* * Does the new cr3 value map to physical memory? (Note, we * catch an invalid cr3 even in real-mode, because it would * cause trouble later on when we turn on paging anyway.) * * A real CPU would silently accept an invalid cr3 and would * attempt to use it - with largely undefined (and often hard * to debug) behavior on the guest side. */ if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) return 1; vcpu->arch.cr3 = cr3; vcpu->arch.mmu.new_cr3(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr3); int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) { if (cr8 & CR8_RESERVED_BITS) return 1; if (irqchip_in_kernel(vcpu->kvm)) kvm_lapic_set_tpr(vcpu, cr8); else vcpu->arch.cr8 = cr8; return 0; } void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) { if (__kvm_set_cr8(vcpu, cr8)) kvm_inject_gp(vcpu, 0); } EXPORT_SYMBOL_GPL(kvm_set_cr8); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) { if (irqchip_in_kernel(vcpu->kvm)) return kvm_lapic_get_cr8(vcpu); else return vcpu->arch.cr8; } EXPORT_SYMBOL_GPL(kvm_get_cr8); static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { switch (dr) { case 0 ... 3: vcpu->arch.db[dr] = val; if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) vcpu->arch.eff_db[dr] = val; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* #UD */ /* fall through */ case 6: if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* #UD */ /* fall through */ default: /* 7 */ if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7); vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK); } break; } return 0; } int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { int res; res = __kvm_set_dr(vcpu, dr, val); if (res > 0) kvm_queue_exception(vcpu, UD_VECTOR); else if (res < 0) kvm_inject_gp(vcpu, 0); return res; } EXPORT_SYMBOL_GPL(kvm_set_dr); static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { switch (dr) { case 0 ... 3: *val = vcpu->arch.db[dr]; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ case 6: *val = vcpu->arch.dr6; break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ default: /* 7 */ *val = vcpu->arch.dr7; break; } return 0; } int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { if (_kvm_get_dr(vcpu, dr, val)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } return 0; } EXPORT_SYMBOL_GPL(kvm_get_dr); /* * List of msr numbers which we expose to userspace through KVM_GET_MSRS * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. * * This list is modified at module load time to reflect the * capabilities of the host cpu. This capabilities test skips MSRs that are * kvm-specific. Those are put in the beginning of the list. */ #define KVM_SAVE_MSRS_BEGIN 8 static u32 msrs_to_save[] = { MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_STAR, #ifdef CONFIG_X86_64 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA }; static unsigned num_msrs_to_save; static u32 emulated_msrs[] = { MSR_IA32_MISC_ENABLE, MSR_IA32_MCG_STATUS, MSR_IA32_MCG_CTL, }; static int set_efer(struct kvm_vcpu *vcpu, u64 efer) { u64 old_efer = vcpu->arch.efer; if (efer & efer_reserved_bits) return 1; if (is_paging(vcpu) && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) return 1; if (efer & EFER_FFXSR) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) return 1; } if (efer & EFER_SVME) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) return 1; } efer &= ~EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA; kvm_x86_ops->set_efer(vcpu, efer); vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled; /* Update reserved bits */ if ((efer ^ old_efer) & EFER_NX) kvm_mmu_reset_context(vcpu); return 0; } void kvm_enable_efer_bits(u64 mask) { efer_reserved_bits &= ~mask; } EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); /* * Writes msr value into into the appropriate "register". * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) { return kvm_x86_ops->set_msr(vcpu, msr_index, data); } /* * Adapt set_msr() to msr_io()'s calling convention */ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { return kvm_set_msr(vcpu, index, *data); } static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) { int version; int r; struct pvclock_wall_clock wc; struct timespec boot; if (!wall_clock) return; r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); if (r) return; if (version & 1) ++version; /* first time write, random junk */ ++version; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); /* * The guest calculates current wall clock time by adding * system time (updated by kvm_guest_time_update below) to the * wall clock specified here. guest system time equals host * system time for us, thus we must fill in host boot time here. */ getboottime(&boot); wc.sec = boot.tv_sec; wc.nsec = boot.tv_nsec; wc.version = version; kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); version++; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); } static uint32_t div_frac(uint32_t dividend, uint32_t divisor) { uint32_t quotient, remainder; /* Don't try to replace with do_div(), this one calculates * "(dividend << 32) / divisor" */ __asm__ ( "divl %4" : "=a" (quotient), "=d" (remainder) : "0" (0), "1" (dividend), "r" (divisor) ); return quotient; } static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, s8 *pshift, u32 *pmultiplier) { uint64_t scaled64; int32_t shift = 0; uint64_t tps64; uint32_t tps32; tps64 = base_khz * 1000LL; scaled64 = scaled_khz * 1000LL; while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { tps64 >>= 1; shift--; } tps32 = (uint32_t)tps64; while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) scaled64 >>= 1; else tps32 <<= 1; shift++; } *pshift = shift; *pmultiplier = div_frac(scaled64, tps32); pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n", __func__, base_khz, scaled_khz, shift, *pmultiplier); } static inline u64 get_kernel_ns(void) { struct timespec ts; WARN_ON(preemptible()); ktime_get_ts(&ts); monotonic_to_bootbased(&ts); return timespec_to_ns(&ts); } static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); unsigned long max_tsc_khz; static inline int kvm_tsc_changes_freq(void) { int cpu = get_cpu(); int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && cpufreq_quick_get(cpu) != 0; put_cpu(); return ret; } static inline u64 nsec_to_cycles(u64 nsec) { u64 ret; WARN_ON(preemptible()); if (kvm_tsc_changes_freq()) printk_once(KERN_WARNING "kvm: unreliable cycle conversion on adjustable rate TSC\n"); ret = nsec * __get_cpu_var(cpu_tsc_khz); do_div(ret, USEC_PER_SEC); return ret; } static void kvm_arch_set_tsc_khz(struct kvm *kvm, u32 this_tsc_khz) { /* Compute a scale to convert nanoseconds in TSC cycles */ kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000, &kvm->arch.virtual_tsc_shift, &kvm->arch.virtual_tsc_mult); kvm->arch.virtual_tsc_khz = this_tsc_khz; } static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) { u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec, vcpu->kvm->arch.virtual_tsc_mult, vcpu->kvm->arch.virtual_tsc_shift); tsc += vcpu->arch.last_tsc_write; return tsc; } void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; u64 offset, ns, elapsed; unsigned long flags; s64 sdiff; spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); offset = data - native_read_tsc(); ns = get_kernel_ns(); elapsed = ns - kvm->arch.last_tsc_nsec; sdiff = data - kvm->arch.last_tsc_write; if (sdiff < 0) sdiff = -sdiff; /* * Special case: close write to TSC within 5 seconds of * another CPU is interpreted as an attempt to synchronize * The 5 seconds is to accomodate host load / swapping as * well as any reset of TSC during the boot process. * * In that case, for a reliable TSC, we can match TSC offsets, * or make a best guest using elapsed value. */ if (sdiff < nsec_to_cycles(5ULL * NSEC_PER_SEC) && elapsed < 5ULL * NSEC_PER_SEC) { if (!check_tsc_unstable()) { offset = kvm->arch.last_tsc_offset; pr_debug("kvm: matched tsc offset for %llu\n", data); } else { u64 delta = nsec_to_cycles(elapsed); offset += delta; pr_debug("kvm: adjusted tsc offset by %llu\n", delta); } ns = kvm->arch.last_tsc_nsec; } kvm->arch.last_tsc_nsec = ns; kvm->arch.last_tsc_write = data; kvm->arch.last_tsc_offset = offset; kvm_x86_ops->write_tsc_offset(vcpu, offset); spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); /* Reset of TSC must disable overshoot protection below */ vcpu->arch.hv_clock.tsc_timestamp = 0; vcpu->arch.last_tsc_write = data; vcpu->arch.last_tsc_nsec = ns; } EXPORT_SYMBOL_GPL(kvm_write_tsc); static int kvm_guest_time_update(struct kvm_vcpu *v) { unsigned long flags; struct kvm_vcpu_arch *vcpu = &v->arch; void *shared_kaddr; unsigned long this_tsc_khz; s64 kernel_ns, max_kernel_ns; u64 tsc_timestamp; /* Keep irq disabled to prevent changes to the clock */ local_irq_save(flags); kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp); kernel_ns = get_kernel_ns(); this_tsc_khz = __get_cpu_var(cpu_tsc_khz); if (unlikely(this_tsc_khz == 0)) { local_irq_restore(flags); kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); return 1; } /* * We may have to catch up the TSC to match elapsed wall clock * time for two reasons, even if kvmclock is used. * 1) CPU could have been running below the maximum TSC rate * 2) Broken TSC compensation resets the base at each VCPU * entry to avoid unknown leaps of TSC even when running * again on the same CPU. This may cause apparent elapsed * time to disappear, and the guest to stand still or run * very slowly. */ if (vcpu->tsc_catchup) { u64 tsc = compute_guest_tsc(v, kernel_ns); if (tsc > tsc_timestamp) { kvm_x86_ops->adjust_tsc_offset(v, tsc - tsc_timestamp); tsc_timestamp = tsc; } } local_irq_restore(flags); if (!vcpu->time_page) return 0; /* * Time as measured by the TSC may go backwards when resetting the base * tsc_timestamp. The reason for this is that the TSC resolution is * higher than the resolution of the other clock scales. Thus, many * possible measurments of the TSC correspond to one measurement of any * other clock, and so a spread of values is possible. This is not a * problem for the computation of the nanosecond clock; with TSC rates * around 1GHZ, there can only be a few cycles which correspond to one * nanosecond value, and any path through this code will inevitably * take longer than that. However, with the kernel_ns value itself, * the precision may be much lower, down to HZ granularity. If the * first sampling of TSC against kernel_ns ends in the low part of the * range, and the second in the high end of the range, we can get: * * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new * * As the sampling errors potentially range in the thousands of cycles, * it is possible such a time value has already been observed by the * guest. To protect against this, we must compute the system time as * observed by the guest and ensure the new system time is greater. */ max_kernel_ns = 0; if (vcpu->hv_clock.tsc_timestamp && vcpu->last_guest_tsc) { max_kernel_ns = vcpu->last_guest_tsc - vcpu->hv_clock.tsc_timestamp; max_kernel_ns = pvclock_scale_delta(max_kernel_ns, vcpu->hv_clock.tsc_to_system_mul, vcpu->hv_clock.tsc_shift); max_kernel_ns += vcpu->last_kernel_ns; } if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) { kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz, &vcpu->hv_clock.tsc_shift, &vcpu->hv_clock.tsc_to_system_mul); vcpu->hw_tsc_khz = this_tsc_khz; } if (max_kernel_ns > kernel_ns) kernel_ns = max_kernel_ns; /* With all the info we got, fill in the values */ vcpu->hv_clock.tsc_timestamp = tsc_timestamp; vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; vcpu->last_kernel_ns = kernel_ns; vcpu->last_guest_tsc = tsc_timestamp; vcpu->hv_clock.flags = 0; /* * The interface expects us to write an even number signaling that the * update is finished. Since the guest won't see the intermediate * state, we just increase by 2 at the end. */ vcpu->hv_clock.version += 2; shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0); memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, sizeof(vcpu->hv_clock)); kunmap_atomic(shared_kaddr, KM_USER0); mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); return 0; } static bool msr_mtrr_valid(unsigned msr) { switch (msr) { case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: case MSR_MTRRfix64K_00000: case MSR_MTRRfix16K_80000: case MSR_MTRRfix16K_A0000: case MSR_MTRRfix4K_C0000: case MSR_MTRRfix4K_C8000: case MSR_MTRRfix4K_D0000: case MSR_MTRRfix4K_D8000: case MSR_MTRRfix4K_E0000: case MSR_MTRRfix4K_E8000: case MSR_MTRRfix4K_F0000: case MSR_MTRRfix4K_F8000: case MSR_MTRRdefType: case MSR_IA32_CR_PAT: return true; case 0x2f8: return true; } return false; } static bool valid_pat_type(unsigned t) { return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ } static bool valid_mtrr_type(unsigned t) { return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ } static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) { int i; if (!msr_mtrr_valid(msr)) return false; if (msr == MSR_IA32_CR_PAT) { for (i = 0; i < 8; i++) if (!valid_pat_type((data >> (i * 8)) & 0xff)) return false; return true; } else if (msr == MSR_MTRRdefType) { if (data & ~0xcff) return false; return valid_mtrr_type(data & 0xff); } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { for (i = 0; i < 8 ; i++) if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) return false; return true; } /* variable MTRRs */ return valid_mtrr_type(data & 0xff); } static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; if (!mtrr_valid(vcpu, msr, data)) return 1; if (msr == MSR_MTRRdefType) { vcpu->arch.mtrr_state.def_type = data; vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10; } else if (msr == MSR_MTRRfix64K_00000) p[0] = data; else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) p[1 + msr - MSR_MTRRfix16K_80000] = data; else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) p[3 + msr - MSR_MTRRfix4K_C0000] = data; else if (msr == MSR_IA32_CR_PAT) vcpu->arch.pat = data; else { /* Variable MTRRs */ int idx, is_mtrr_mask; u64 *pt; idx = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * idx; if (!is_mtrr_mask) pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; else pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; *pt = data; } kvm_mmu_reset_context(vcpu); return 0; } static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_MCG_STATUS: vcpu->arch.mcg_status = data; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; if (data != 0 && data != ~(u64)0) return -1; vcpu->arch.mcg_ctl = data; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MC0_CTL + 4 * bank_num) { u32 offset = msr - MSR_IA32_MC0_CTL; /* only 0 or all 1s can be written to IA32_MCi_CTL * some Linux kernels though clear bit 10 in bank 4 to * workaround a BIOS/GART TBL issue on AMD K8s, ignore * this to avoid an uncatched #GP in the guest */ if ((offset & 0x3) == 0 && data != 0 && (data | (1 << 10)) != ~(u64)0) return -1; vcpu->arch.mce_banks[offset] = data; break; } return 1; } return 0; } static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; int lm = is_long_mode(vcpu); u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 : kvm->arch.xen_hvm_config.blob_size_32; u32 page_num = data & ~PAGE_MASK; u64 page_addr = data & PAGE_MASK; u8 *page; int r; r = -E2BIG; if (page_num >= blob_size) goto out; r = -ENOMEM; page = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!page) goto out; r = -EFAULT; if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE)) goto out_free; if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE)) goto out_free; r = 0; out_free: kfree(page); out: return r; } static bool kvm_hv_hypercall_enabled(struct kvm *kvm) { return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; } static bool kvm_hv_msr_partition_wide(u32 msr) { bool r = false; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: case HV_X64_MSR_HYPERCALL: r = true; break; } return r; } static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) { struct kvm *kvm = vcpu->kvm; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: kvm->arch.hv_guest_os_id = data; /* setting guest os id to zero disables hypercall page */ if (!kvm->arch.hv_guest_os_id) kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; break; case HV_X64_MSR_HYPERCALL: { u64 gfn; unsigned long addr; u8 instructions[4]; /* if guest os id is not set hypercall should remain disabled */ if (!kvm->arch.hv_guest_os_id) break; if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { kvm->arch.hv_hypercall = data; break; } gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; addr = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(addr)) return 1; kvm_x86_ops->patch_hypercall(vcpu, instructions); ((unsigned char *)instructions)[3] = 0xc3; /* ret */ if (copy_to_user((void __user *)addr, instructions, 4)) return 1; kvm->arch.hv_hypercall = data; break; } default: pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; } static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { case HV_X64_MSR_APIC_ASSIST_PAGE: { unsigned long addr; if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { vcpu->arch.hv_vapic = data; break; } addr = gfn_to_hva(vcpu->kvm, data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT); if (kvm_is_error_hva(addr)) return 1; if (clear_user((void __user *)addr, PAGE_SIZE)) return 1; vcpu->arch.hv_vapic = data; break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); default: pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; } static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) { gpa_t gpa = data & ~0x3f; /* Bits 2:5 are resrved, Should be zero */ if (data & 0x3c) return 1; vcpu->arch.apf.msr_val = data; if (!(data & KVM_ASYNC_PF_ENABLED)) { kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); return 0; } if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); kvm_async_pf_wakeup_all(vcpu); return 0; } int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { case MSR_EFER: return set_efer(vcpu, data); case MSR_K7_HWCR: data &= ~(u64)0x40; /* ignore flush filter disable */ data &= ~(u64)0x100; /* ignore ignne emulation enable */ if (data != 0) { pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", data); return 1; } break; case MSR_FAM10H_MMIO_CONF_BASE: if (data != 0) { pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " "0x%llx\n", data); return 1; } break; case MSR_AMD64_NB_CFG: break; case MSR_IA32_DEBUGCTLMSR: if (!data) { /* We support the non-activated case already */ break; } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) { /* Values other than LBR and BTF are vendor-specific, thus reserved and should throw a #GP */ return 1; } pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", __func__, data); break; case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_WRITE: case MSR_VM_HSAVE_PA: case MSR_AMD64_PATCH_LOADER: break; case 0x200 ... 0x2ff: return set_msr_mtrr(vcpu, msr, data); case MSR_IA32_APICBASE: kvm_set_apic_base(vcpu, data); break; case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_write(vcpu, msr, data); case MSR_IA32_MISC_ENABLE: vcpu->arch.ia32_misc_enable_msr = data; break; case MSR_KVM_WALL_CLOCK_NEW: case MSR_KVM_WALL_CLOCK: vcpu->kvm->arch.wall_clock = data; kvm_write_wall_clock(vcpu->kvm, data); break; case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME: { if (vcpu->arch.time_page) { kvm_release_page_dirty(vcpu->arch.time_page); vcpu->arch.time_page = NULL; } vcpu->arch.time = data; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); /* we verify if the enable bit is set... */ if (!(data & 1)) break; /* ...but clean it before doing the actual write */ vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); vcpu->arch.time_page = gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); if (is_error_page(vcpu->arch.time_page)) { kvm_release_page_clean(vcpu->arch.time_page); vcpu->arch.time_page = NULL; } break; } case MSR_KVM_ASYNC_PF_EN: if (kvm_pv_enable_async_pf(vcpu, data)) return 1; break; case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: return set_msr_mce(vcpu, msr, data); /* Performance counters are not protected by a CPUID bit, * so we should check all of them in the generic path for the sake of * cross vendor migration. * Writing a zero into the event select MSRs disables them, * which we perfectly emulate ;-). Any other value should be at least * reported, some guests depend on them. */ case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: case MSR_K7_EVNTSEL0: case MSR_K7_EVNTSEL1: case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL3: if (data != 0) pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; /* at least RHEL 4 unconditionally writes to the perfctr registers, * so we ignore writes to make it happy. */ case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: case MSR_K7_PERFCTR0: case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR2: case MSR_K7_PERFCTR3: pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; case MSR_K7_CLK_CTL: /* * Ignore all writes to this no longer documented MSR. * Writes are only relevant for old K7 processors, * all pre-dating SVM, but a recommended workaround from * AMD for these chips. It is possible to speicify the * affected processor models on the command line, hence * the need to ignore the workaround. */ break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: if (kvm_hv_msr_partition_wide(msr)) { int r; mutex_lock(&vcpu->kvm->lock); r = set_msr_hyperv_pw(vcpu, msr, data); mutex_unlock(&vcpu->kvm->lock); return r; } else return set_msr_hyperv(vcpu, msr, data); break; default: if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) return xen_hvm_config(vcpu, data); if (!ignore_msrs) { pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data); return 1; } else { pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); break; } } return 0; } EXPORT_SYMBOL_GPL(kvm_set_msr_common); /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); } static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; if (!msr_mtrr_valid(msr)) return 1; if (msr == MSR_MTRRdefType) *pdata = vcpu->arch.mtrr_state.def_type + (vcpu->arch.mtrr_state.enabled << 10); else if (msr == MSR_MTRRfix64K_00000) *pdata = p[0]; else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) *pdata = p[1 + msr - MSR_MTRRfix16K_80000]; else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) *pdata = p[3 + msr - MSR_MTRRfix4K_C0000]; else if (msr == MSR_IA32_CR_PAT) *pdata = vcpu->arch.pat; else { /* Variable MTRRs */ int idx, is_mtrr_mask; u64 *pt; idx = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * idx; if (!is_mtrr_mask) pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; else pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; *pdata = *pt; } return 0; } static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: data = 0; break; case MSR_IA32_MCG_CAP: data = vcpu->arch.mcg_cap; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; data = vcpu->arch.mcg_ctl; break; case MSR_IA32_MCG_STATUS: data = vcpu->arch.mcg_status; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MC0_CTL + 4 * bank_num) { u32 offset = msr - MSR_IA32_MC0_CTL; data = vcpu->arch.mce_banks[offset]; break; } return 1; } *pdata = data; return 0; } static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data = 0; struct kvm *kvm = vcpu->kvm; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: data = kvm->arch.hv_guest_os_id; break; case HV_X64_MSR_HYPERCALL: data = kvm->arch.hv_hypercall; break; default: pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } *pdata = data; return 0; } static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data = 0; switch (msr) { case HV_X64_MSR_VP_INDEX: { int r; struct kvm_vcpu *v; kvm_for_each_vcpu(r, v, vcpu->kvm) if (v == vcpu) data = r; break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); default: pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } *pdata = data; return 0; } int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; switch (msr) { case MSR_IA32_PLATFORM_ID: case MSR_IA32_UCODE_REV: case MSR_IA32_EBL_CR_POWERON: case MSR_IA32_DEBUGCTLMSR: case MSR_IA32_LASTBRANCHFROMIP: case MSR_IA32_LASTBRANCHTOIP: case MSR_IA32_LASTINTFROMIP: case MSR_IA32_LASTINTTOIP: case MSR_K8_SYSCFG: case MSR_K7_HWCR: case MSR_VM_HSAVE_PA: case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: case MSR_K7_EVNTSEL0: case MSR_K7_PERFCTR0: case MSR_K8_INT_PENDING_MSG: case MSR_AMD64_NB_CFG: case MSR_FAM10H_MMIO_CONF_BASE: data = 0; break; case MSR_MTRRcap: data = 0x500 | KVM_NR_VAR_MTRR; break; case 0x200 ... 0x2ff: return get_msr_mtrr(vcpu, msr, pdata); case 0xcd: /* fsb frequency */ data = 3; break; /* * MSR_EBC_FREQUENCY_ID * Conservative value valid for even the basic CPU models. * Models 0,1: 000 in bits 23:21 indicating a bus speed of * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, * and 266MHz for model 3, or 4. Set Core Clock * Frequency to System Bus Frequency Ratio to 1 (bits * 31:24) even though these are only valid for CPU * models > 2, however guests may end up dividing or * multiplying by zero otherwise. */ case MSR_EBC_FREQUENCY_ID: data = 1 << 24; break; case MSR_IA32_APICBASE: data = kvm_get_apic_base(vcpu); break; case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_read(vcpu, msr, pdata); break; case MSR_IA32_MISC_ENABLE: data = vcpu->arch.ia32_misc_enable_msr; break; case MSR_IA32_PERF_STATUS: /* TSC increment by tick */ data = 1000ULL; /* CPU multiplier */ data |= (((uint64_t)4ULL) << 40); break; case MSR_EFER: data = vcpu->arch.efer; break; case MSR_KVM_WALL_CLOCK: case MSR_KVM_WALL_CLOCK_NEW: data = vcpu->kvm->arch.wall_clock; break; case MSR_KVM_SYSTEM_TIME: case MSR_KVM_SYSTEM_TIME_NEW: data = vcpu->arch.time; break; case MSR_KVM_ASYNC_PF_EN: data = vcpu->arch.apf.msr_val; break; case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: case MSR_IA32_MCG_CAP: case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: return get_msr_mce(vcpu, msr, pdata); case MSR_K7_CLK_CTL: /* * Provide expected ramp-up count for K7. All other * are set to zero, indicating minimum divisors for * every field. * * This prevents guest kernels on AMD host with CPU * type 6, model 8 and higher from exploding due to * the rdmsr failing. */ data = 0x20000000; break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: if (kvm_hv_msr_partition_wide(msr)) { int r; mutex_lock(&vcpu->kvm->lock); r = get_msr_hyperv_pw(vcpu, msr, pdata); mutex_unlock(&vcpu->kvm->lock); return r; } else return get_msr_hyperv(vcpu, msr, pdata); break; default: if (!ignore_msrs) { pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); return 1; } else { pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); data = 0; } break; } *pdata = data; return 0; } EXPORT_SYMBOL_GPL(kvm_get_msr_common); /* * Read or write a bunch of msrs. All parameters are kernel addresses. * * @return number of msrs set successfully. */ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, struct kvm_msr_entry *entries, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data)) { int i, idx; idx = srcu_read_lock(&vcpu->kvm->srcu); for (i = 0; i < msrs->nmsrs; ++i) if (do_msr(vcpu, entries[i].index, &entries[i].data)) break; srcu_read_unlock(&vcpu->kvm->srcu, idx); return i; } /* * Read or write a bunch of msrs. Parameters are user addresses. * * @return number of msrs set successfully. */ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data), int writeback) { struct kvm_msrs msrs; struct kvm_msr_entry *entries; int r, n; unsigned size; r = -EFAULT; if (copy_from_user(&msrs, user_msrs, sizeof msrs)) goto out; r = -E2BIG; if (msrs.nmsrs >= MAX_IO_MSRS) goto out; r = -ENOMEM; size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; entries = kmalloc(size, GFP_KERNEL); if (!entries) goto out; r = -EFAULT; if (copy_from_user(entries, user_msrs->entries, size)) goto out_free; r = n = __msr_io(vcpu, &msrs, entries, do_msr); if (r < 0) goto out_free; r = -EFAULT; if (writeback && copy_to_user(user_msrs->entries, entries, size)) goto out_free; r = n; out_free: kfree(entries); out: return r; } int kvm_dev_ioctl_check_extension(long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: case KVM_CAP_HLT: case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: case KVM_CAP_SET_TSS_ADDR: case KVM_CAP_EXT_CPUID: case KVM_CAP_CLOCKSOURCE: case KVM_CAP_PIT: case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: case KVM_CAP_SYNC_MMU: case KVM_CAP_REINJECT_CONTROL: case KVM_CAP_IRQ_INJECT_STATUS: case KVM_CAP_ASSIGN_DEV_IRQ: case KVM_CAP_IRQFD: case KVM_CAP_IOEVENTFD: case KVM_CAP_PIT2: case KVM_CAP_PIT_STATE2: case KVM_CAP_SET_IDENTITY_MAP_ADDR: case KVM_CAP_XEN_HVM: case KVM_CAP_ADJUST_CLOCK: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_HYPERV: case KVM_CAP_HYPERV_VAPIC: case KVM_CAP_HYPERV_SPIN: case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: case KVM_CAP_XSAVE: case KVM_CAP_ASYNC_PF: r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_VAPIC: r = !kvm_x86_ops->cpu_has_accelerated_tpr(); break; case KVM_CAP_NR_VCPUS: r = KVM_MAX_VCPUS; break; case KVM_CAP_NR_MEMSLOTS: r = KVM_MEMORY_SLOTS; break; case KVM_CAP_PV_MMU: /* obsolete */ r = 0; break; case KVM_CAP_IOMMU: r = iommu_found(); break; case KVM_CAP_MCE: r = KVM_MAX_MCE_BANKS; break; case KVM_CAP_XCRS: r = cpu_has_xsave; break; default: r = 0; break; } return r; } long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; long r; switch (ioctl) { case KVM_GET_MSR_INDEX_LIST: { struct kvm_msr_list __user *user_msr_list = argp; struct kvm_msr_list msr_list; unsigned n; r = -EFAULT; if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) goto out; n = msr_list.nmsrs; msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs); if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) goto out; r = -E2BIG; if (n < msr_list.nmsrs) goto out; r = -EFAULT; if (copy_to_user(user_msr_list->indices, &msrs_to_save, num_msrs_to_save * sizeof(u32))) goto out; if (copy_to_user(user_msr_list->indices + num_msrs_to_save, &emulated_msrs, ARRAY_SIZE(emulated_msrs) * sizeof(u32))) goto out; r = 0; break; } case KVM_GET_SUPPORTED_CPUID: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_dev_ioctl_get_supported_cpuid(&cpuid, cpuid_arg->entries); if (r) goto out; r = -EFAULT; if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) goto out; r = 0; break; } case KVM_X86_GET_MCE_CAP_SUPPORTED: { u64 mce_cap; mce_cap = KVM_MCE_CAP_SUPPORTED; r = -EFAULT; if (copy_to_user(argp, &mce_cap, sizeof mce_cap)) goto out; r = 0; break; } default: r = -EINVAL; } out: return r; } static void wbinvd_ipi(void *garbage) { wbinvd(); } static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) { return vcpu->kvm->arch.iommu_domain && !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY); } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { /* Address WBINVD may be executed by guest */ if (need_emulate_wbinvd(vcpu)) { if (kvm_x86_ops->has_wbinvd_exit()) cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); else if (vcpu->cpu != -1 && vcpu->cpu != cpu) smp_call_function_single(vcpu->cpu, wbinvd_ipi, NULL, 1); } kvm_x86_ops->vcpu_load(vcpu, cpu); if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { /* Make sure TSC doesn't go backwards */ s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : native_read_tsc() - vcpu->arch.last_host_tsc; if (tsc_delta < 0) mark_tsc_unstable("KVM discovered backwards TSC"); if (check_tsc_unstable()) { kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta); vcpu->arch.tsc_catchup = 1; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); } if (vcpu->cpu != cpu) kvm_migrate_timers(vcpu); vcpu->cpu = cpu; } } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { kvm_x86_ops->vcpu_put(vcpu); kvm_put_guest_fpu(vcpu); vcpu->arch.last_host_tsc = native_read_tsc(); } static int is_efer_nx(void) { unsigned long long efer = 0; rdmsrl_safe(MSR_EFER, &efer); return efer & EFER_NX; } static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu) { int i; struct kvm_cpuid_entry2 *e, *entry; entry = NULL; for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { e = &vcpu->arch.cpuid_entries[i]; if (e->function == 0x80000001) { entry = e; break; } } if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) { entry->edx &= ~(1 << 20); printk(KERN_INFO "kvm: guest NX capability removed\n"); } } /* when an old userspace process fills a new kernel module */ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid *cpuid, struct kvm_cpuid_entry __user *entries) { int r, i; struct kvm_cpuid_entry *cpuid_entries; r = -E2BIG; if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) goto out; r = -ENOMEM; cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent); if (!cpuid_entries) goto out; r = -EFAULT; if (copy_from_user(cpuid_entries, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry))) goto out_free; for (i = 0; i < cpuid->nent; i++) { vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx; vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx; vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx; vcpu->arch.cpuid_entries[i].index = 0; vcpu->arch.cpuid_entries[i].flags = 0; vcpu->arch.cpuid_entries[i].padding[0] = 0; vcpu->arch.cpuid_entries[i].padding[1] = 0; vcpu->arch.cpuid_entries[i].padding[2] = 0; } vcpu->arch.cpuid_nent = cpuid->nent; cpuid_fix_nx_cap(vcpu); r = 0; kvm_apic_set_version(vcpu); kvm_x86_ops->cpuid_update(vcpu); update_cpuid(vcpu); out_free: vfree(cpuid_entries); out: return r; } static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries) { int r; r = -E2BIG; if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) goto out; r = -EFAULT; if (copy_from_user(&vcpu->arch.cpuid_entries, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2))) goto out; vcpu->arch.cpuid_nent = cpuid->nent; kvm_apic_set_version(vcpu); kvm_x86_ops->cpuid_update(vcpu); update_cpuid(vcpu); return 0; out: return r; } static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries) { int r; r = -E2BIG; if (cpuid->nent < vcpu->arch.cpuid_nent) goto out; r = -EFAULT; if (copy_to_user(entries, &vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) goto out; return 0; out: cpuid->nent = vcpu->arch.cpuid_nent; return r; } static void cpuid_mask(u32 *word, int wordnum) { *word &= boot_cpu_data.x86_capability[wordnum]; } static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, u32 index) { entry->function = function; entry->index = index; cpuid_count(entry->function, entry->index, &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); entry->flags = 0; } #define F(x) bit(X86_FEATURE_##x) static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, u32 index, int *nent, int maxnent) { unsigned f_nx = is_efer_nx() ? F(NX) : 0; #ifdef CONFIG_X86_64 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL) ? F(GBPAGES) : 0; unsigned f_lm = F(LM); #else unsigned f_gbpages = 0; unsigned f_lm = 0; #endif unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; /* cpuid 1.edx */ const u32 kvm_supported_word0_x86_features = F(FPU) | F(VME) | F(DE) | F(PSE) | F(TSC) | F(MSR) | F(PAE) | F(MCE) | F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) | F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) | 0 /* Reserved, DS, ACPI */ | F(MMX) | F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) | 0 /* HTT, TM, Reserved, PBE */; /* cpuid 0x80000001.edx */ const u32 kvm_supported_word1_x86_features = F(FPU) | F(VME) | F(DE) | F(PSE) | F(TSC) | F(MSR) | F(PAE) | F(MCE) | F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) | F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | F(PAT) | F(PSE36) | 0 /* Reserved */ | f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp | 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW); /* cpuid 1.ecx */ const u32 kvm_supported_word4_x86_features = F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ | 0 /* DS-CPL, VMX, SMX, EST */ | 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ | 0 /* Reserved, DCA */ | F(XMM4_1) | F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) | F(F16C); /* cpuid 0x80000001.ecx */ const u32 kvm_supported_word6_x86_features = F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) | 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM); /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); do_cpuid_1_ent(entry, function, index); ++*nent; switch (function) { case 0: entry->eax = min(entry->eax, (u32)0xd); break; case 1: entry->edx &= kvm_supported_word0_x86_features; cpuid_mask(&entry->edx, 0); entry->ecx &= kvm_supported_word4_x86_features; cpuid_mask(&entry->ecx, 4); /* we support x2apic emulation even if host does not support * it since we emulate x2apic in software */ entry->ecx |= F(X2APIC); break; /* function 2 entries are STATEFUL. That is, repeated cpuid commands * may return different values. This forces us to get_cpu() before * issuing the first command, and also to emulate this annoying behavior * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */ case 2: { int t, times = entry->eax & 0xff; entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; for (t = 1; t < times && *nent < maxnent; ++t) { do_cpuid_1_ent(&entry[t], function, 0); entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; ++*nent; } break; } /* function 4 and 0xb have additional index. */ case 4: { int i, cache_type; entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; /* read more entries until cache_type is zero */ for (i = 1; *nent < maxnent; ++i) { cache_type = entry[i - 1].eax & 0x1f; if (!cache_type) break; do_cpuid_1_ent(&entry[i], function, i); entry[i].flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; ++*nent; } break; } case 0xb: { int i, level_type; entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; /* read more entries until level_type is zero */ for (i = 1; *nent < maxnent; ++i) { level_type = entry[i - 1].ecx & 0xff00; if (!level_type) break; do_cpuid_1_ent(&entry[i], function, i); entry[i].flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; ++*nent; } break; } case 0xd: { int i; entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; for (i = 1; *nent < maxnent; ++i) { if (entry[i - 1].eax == 0 && i != 2) break; do_cpuid_1_ent(&entry[i], function, i); entry[i].flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; ++*nent; } break; } case KVM_CPUID_SIGNATURE: { char signature[12] = "KVMKVMKVM\0\0"; u32 *sigptr = (u32 *)signature; entry->eax = 0; entry->ebx = sigptr[0]; entry->ecx = sigptr[1]; entry->edx = sigptr[2]; break; } case KVM_CPUID_FEATURES: entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) | (1 << KVM_FEATURE_NOP_IO_DELAY) | (1 << KVM_FEATURE_CLOCKSOURCE2) | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT); entry->ebx = 0; entry->ecx = 0; entry->edx = 0; break; case 0x80000000: entry->eax = min(entry->eax, 0x8000001a); break; case 0x80000001: entry->edx &= kvm_supported_word1_x86_features; cpuid_mask(&entry->edx, 1); entry->ecx &= kvm_supported_word6_x86_features; cpuid_mask(&entry->ecx, 6); break; } kvm_x86_ops->set_supported_cpuid(function, entry); put_cpu(); } #undef F static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries) { struct kvm_cpuid_entry2 *cpuid_entries; int limit, nent = 0, r = -E2BIG; u32 func; if (cpuid->nent < 1) goto out; if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) cpuid->nent = KVM_MAX_CPUID_ENTRIES; r = -ENOMEM; cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent); if (!cpuid_entries) goto out; do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent); limit = cpuid_entries[0].eax; for (func = 1; func <= limit && nent < cpuid->nent; ++func) do_cpuid_ent(&cpuid_entries[nent], func, 0, &nent, cpuid->nent); r = -E2BIG; if (nent >= cpuid->nent) goto out_free; do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent); limit = cpuid_entries[nent - 1].eax; for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func) do_cpuid_ent(&cpuid_entries[nent], func, 0, &nent, cpuid->nent); r = -E2BIG; if (nent >= cpuid->nent) goto out_free; do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent, cpuid->nent); r = -E2BIG; if (nent >= cpuid->nent) goto out_free; do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent, cpuid->nent); r = -E2BIG; if (nent >= cpuid->nent) goto out_free; r = -EFAULT; if (copy_to_user(entries, cpuid_entries, nent * sizeof(struct kvm_cpuid_entry2))) goto out_free; cpuid->nent = nent; r = 0; out_free: vfree(cpuid_entries); out: return r; } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); return 0; } static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); kvm_apic_post_state_restore(vcpu); update_cr8_intercept(vcpu); return 0; } static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { if (irq->irq < 0 || irq->irq >= 256) return -EINVAL; if (irqchip_in_kernel(vcpu->kvm)) return -ENXIO; kvm_queue_interrupt(vcpu, irq->irq, false); kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) { kvm_inject_nmi(vcpu); return 0; } static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, struct kvm_tpr_access_ctl *tac) { if (tac->flags) return -EINVAL; vcpu->arch.tpr_access_reporting = !!tac->enabled; return 0; } static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, u64 mcg_cap) { int r; unsigned bank_num = mcg_cap & 0xff, bank; r = -EINVAL; if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) goto out; if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000)) goto out; r = 0; vcpu->arch.mcg_cap = mcg_cap; /* Init IA32_MCG_CTL to all 1s */ if (mcg_cap & MCG_CTL_P) vcpu->arch.mcg_ctl = ~(u64)0; /* Init IA32_MCi_CTL to all 1s */ for (bank = 0; bank < bank_num; bank++) vcpu->arch.mce_banks[bank*4] = ~(u64)0; out: return r; } static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; u64 *banks = vcpu->arch.mce_banks; if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) return -EINVAL; /* * if IA32_MCG_CTL is not all 1s, the uncorrected error * reporting is disabled */ if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && vcpu->arch.mcg_ctl != ~(u64)0) return 0; banks += 4 * mce->bank; /* * if IA32_MCi_CTL is not all 1s, the uncorrected error * reporting is disabled for the bank */ if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) return 0; if (mce->status & MCI_STATUS_UC) { if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { printk(KERN_DEBUG "kvm: set_mce: " "injects mce exception while " "previous one is in progress!\n"); kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return 0; } if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; vcpu->arch.mcg_status = mce->mcg_status; banks[1] = mce->status; kvm_queue_exception(vcpu, MC_VECTOR); } else if (!(banks[1] & MCI_STATUS_VAL) || !(banks[1] & MCI_STATUS_UC)) { if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; banks[1] = mce->status; } else banks[1] |= MCI_STATUS_OVER; return 0; } static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { events->exception.injected = vcpu->arch.exception.pending && !kvm_exception_is_soft(vcpu->arch.exception.nr); events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; events->exception.pad = 0; events->exception.error_code = vcpu->arch.exception.error_code; events->interrupt.injected = vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; events->interrupt.soft = 0; events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending; events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); events->nmi.pad = 0; events->sipi_vector = vcpu->arch.sipi_vector; events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW); memset(&events->reserved, 0, sizeof(events->reserved)); } static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW)) return -EINVAL; vcpu->arch.exception.pending = events->exception.injected; vcpu->arch.exception.nr = events->exception.nr; vcpu->arch.exception.has_error_code = events->exception.has_error_code; vcpu->arch.exception.error_code = events->exception.error_code; vcpu->arch.interrupt.pending = events->interrupt.injected; vcpu->arch.interrupt.nr = events->interrupt.nr; vcpu->arch.interrupt.soft = events->interrupt.soft; if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm)) kvm_pic_clear_isr_ack(vcpu->kvm); if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) kvm_x86_ops->set_interrupt_shadow(vcpu, events->interrupt.shadow); vcpu->arch.nmi_injected = events->nmi.injected; if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) vcpu->arch.nmi_pending = events->nmi.pending; kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR) vcpu->arch.sipi_vector = events->sipi_vector; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); dbgregs->dr6 = vcpu->arch.dr6; dbgregs->dr7 = vcpu->arch.dr7; dbgregs->flags = 0; memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); } static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { if (dbgregs->flags) return -EINVAL; memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = dbgregs->dr6; vcpu->arch.dr7 = dbgregs->dr7; return 0; } static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { if (cpu_has_xsave) memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->xsave, xstate_size); else { memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->fxsave, sizeof(struct i387_fxsave_struct)); *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = XSTATE_FPSSE; } } static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { u64 xstate_bv = *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; if (cpu_has_xsave) memcpy(&vcpu->arch.guest_fpu.state->xsave, guest_xsave->region, xstate_size); else { if (xstate_bv & ~XSTATE_FPSSE) return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->fxsave, guest_xsave->region, sizeof(struct i387_fxsave_struct)); } return 0; } static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { if (!cpu_has_xsave) { guest_xcrs->nr_xcrs = 0; return; } guest_xcrs->nr_xcrs = 1; guest_xcrs->flags = 0; guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; } static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { int i, r = 0; if (!cpu_has_xsave) return -EINVAL; if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) return -EINVAL; for (i = 0; i < guest_xcrs->nr_xcrs; i++) /* Only support XCR0 currently */ if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) { r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, guest_xcrs->xcrs[0].value); break; } if (r) r = -EINVAL; return r; } long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; int r; union { struct kvm_lapic_state *lapic; struct kvm_xsave *xsave; struct kvm_xcrs *xcrs; void *buffer; } u; u.buffer = NULL; switch (ioctl) { case KVM_GET_LAPIC: { r = -EINVAL; if (!vcpu->arch.apic) goto out; u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); r = -ENOMEM; if (!u.lapic) goto out; r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) goto out; r = 0; break; } case KVM_SET_LAPIC: { r = -EINVAL; if (!vcpu->arch.apic) goto out; u.lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); r = -ENOMEM; if (!u.lapic) goto out; r = -EFAULT; if (copy_from_user(u.lapic, argp, sizeof(struct kvm_lapic_state))) goto out; r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); if (r) goto out; r = 0; break; } case KVM_INTERRUPT: { struct kvm_interrupt irq; r = -EFAULT; if (copy_from_user(&irq, argp, sizeof irq)) goto out; r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); if (r) goto out; r = 0; break; } case KVM_NMI: { r = kvm_vcpu_ioctl_nmi(vcpu); if (r) goto out; r = 0; break; } case KVM_SET_CPUID: { struct kvm_cpuid __user *cpuid_arg = argp; struct kvm_cpuid cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); if (r) goto out; break; } case KVM_SET_CPUID2: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, cpuid_arg->entries); if (r) goto out; break; } case KVM_GET_CPUID2: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, cpuid_arg->entries); if (r) goto out; r = -EFAULT; if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) goto out; r = 0; break; } case KVM_GET_MSRS: r = msr_io(vcpu, argp, kvm_get_msr, 1); break; case KVM_SET_MSRS: r = msr_io(vcpu, argp, do_set_msr, 0); break; case KVM_TPR_ACCESS_REPORTING: { struct kvm_tpr_access_ctl tac; r = -EFAULT; if (copy_from_user(&tac, argp, sizeof tac)) goto out; r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &tac, sizeof tac)) goto out; r = 0; break; }; case KVM_SET_VAPIC_ADDR: { struct kvm_vapic_addr va; r = -EINVAL; if (!irqchip_in_kernel(vcpu->kvm)) goto out; r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; r = 0; kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { u64 mcg_cap; r = -EFAULT; if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap)) goto out; r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); break; } case KVM_X86_SET_MCE: { struct kvm_x86_mce mce; r = -EFAULT; if (copy_from_user(&mce, argp, sizeof mce)) goto out; r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); break; } case KVM_GET_VCPU_EVENTS: { struct kvm_vcpu_events events; kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); r = -EFAULT; if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) break; r = 0; break; } case KVM_SET_VCPU_EVENTS: { struct kvm_vcpu_events events; r = -EFAULT; if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) break; r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); break; } case KVM_GET_DEBUGREGS: { struct kvm_debugregs dbgregs; kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); r = -EFAULT; if (copy_to_user(argp, &dbgregs, sizeof(struct kvm_debugregs))) break; r = 0; break; } case KVM_SET_DEBUGREGS: { struct kvm_debugregs dbgregs; r = -EFAULT; if (copy_from_user(&dbgregs, argp, sizeof(struct kvm_debugregs))) break; r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); break; } case KVM_GET_XSAVE: { u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); r = -ENOMEM; if (!u.xsave) break; kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); r = -EFAULT; if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) break; r = 0; break; } case KVM_SET_XSAVE: { u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); r = -ENOMEM; if (!u.xsave) break; r = -EFAULT; if (copy_from_user(u.xsave, argp, sizeof(struct kvm_xsave))) break; r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); break; } case KVM_GET_XCRS: { u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL); r = -ENOMEM; if (!u.xcrs) break; kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); r = -EFAULT; if (copy_to_user(argp, u.xcrs, sizeof(struct kvm_xcrs))) break; r = 0; break; } case KVM_SET_XCRS: { u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL); r = -ENOMEM; if (!u.xcrs) break; r = -EFAULT; if (copy_from_user(u.xcrs, argp, sizeof(struct kvm_xcrs))) break; r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); break; } default: r = -EINVAL; } out: kfree(u.buffer); return r; } static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) { int ret; if (addr > (unsigned int)(-3 * PAGE_SIZE)) return -1; ret = kvm_x86_ops->set_tss_addr(kvm, addr); return ret; } static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) { kvm->arch.ept_identity_map_addr = ident_addr; return 0; } static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, u32 kvm_nr_mmu_pages) { if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) return -EINVAL; mutex_lock(&kvm->slots_lock); spin_lock(&kvm->mmu_lock); kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; spin_unlock(&kvm->mmu_lock); mutex_unlock(&kvm->slots_lock); return 0; } static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) { return kvm->arch.n_max_mmu_pages; } static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: memcpy(&chip->chip.pic, &pic_irqchip(kvm)->pics[0], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_PIC_SLAVE: memcpy(&chip->chip.pic, &pic_irqchip(kvm)->pics[1], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_IOAPIC: r = kvm_get_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } return r; } static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[0], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_PIC_SLAVE: spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[1], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_IOAPIC: r = kvm_set_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } kvm_pic_update_irq(pic_irqchip(kvm)); return r; } static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, sizeof(ps->channels)); ps->flags = kvm->arch.vpit->pit_state.flags; mutex_unlock(&kvm->arch.vpit->pit_state.lock); memset(&ps->reserved, 0, sizeof(ps->reserved)); return r; } static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int r = 0, start = 0; u32 prev_legacy, cur_legacy; mutex_lock(&kvm->arch.vpit->pit_state.lock); prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; if (!prev_legacy && cur_legacy) start = 1; memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, sizeof(kvm->arch.vpit->pit_state.channels)); kvm->arch.vpit->pit_state.flags = ps->flags; kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_reinject(struct kvm *kvm, struct kvm_reinject_control *control) { if (!kvm->arch.vpit) return -ENXIO; mutex_lock(&kvm->arch.vpit->pit_state.lock); kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject; mutex_unlock(&kvm->arch.vpit->pit_state.lock); return 0; } /* * Get (and clear) the dirty memory log for a memory slot. */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { int r, i; struct kvm_memory_slot *memslot; unsigned long n; unsigned long is_dirty = 0; mutex_lock(&kvm->slots_lock); r = -EINVAL; if (log->slot >= KVM_MEMORY_SLOTS) goto out; memslot = &kvm->memslots->memslots[log->slot]; r = -ENOENT; if (!memslot->dirty_bitmap) goto out; n = kvm_dirty_bitmap_bytes(memslot); for (i = 0; !is_dirty && i < n/sizeof(long); i++) is_dirty = memslot->dirty_bitmap[i]; /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { struct kvm_memslots *slots, *old_slots; unsigned long *dirty_bitmap; dirty_bitmap = memslot->dirty_bitmap_head; if (memslot->dirty_bitmap == dirty_bitmap) dirty_bitmap += n / sizeof(long); memset(dirty_bitmap, 0, n); r = -ENOMEM; slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); if (!slots) goto out; memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); slots->memslots[log->slot].dirty_bitmap = dirty_bitmap; slots->generation++; old_slots = kvm->memslots; rcu_assign_pointer(kvm->memslots, slots); synchronize_srcu_expedited(&kvm->srcu); dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; kfree(old_slots); spin_lock(&kvm->mmu_lock); kvm_mmu_slot_remove_write_access(kvm, log->slot); spin_unlock(&kvm->mmu_lock); r = -EFAULT; if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) goto out; } else { r = -EFAULT; if (clear_user(log->dirty_bitmap, n)) goto out; } r = 0; out: mutex_unlock(&kvm->slots_lock); return r; } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; int r = -ENOTTY; /* * This union makes it completely explicit to gcc-3.x * that these two variables' stack usage should be * combined, not added together. */ union { struct kvm_pit_state ps; struct kvm_pit_state2 ps2; struct kvm_pit_config pit_config; } u; switch (ioctl) { case KVM_SET_TSS_ADDR: r = kvm_vm_ioctl_set_tss_addr(kvm, arg); if (r < 0) goto out; break; case KVM_SET_IDENTITY_MAP_ADDR: { u64 ident_addr; r = -EFAULT; if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) goto out; r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); if (r < 0) goto out; break; } case KVM_SET_NR_MMU_PAGES: r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); if (r) goto out; break; case KVM_GET_NR_MMU_PAGES: r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); break; case KVM_CREATE_IRQCHIP: { struct kvm_pic *vpic; mutex_lock(&kvm->lock); r = -EEXIST; if (kvm->arch.vpic) goto create_irqchip_unlock; r = -ENOMEM; vpic = kvm_create_pic(kvm); if (vpic) { r = kvm_ioapic_init(kvm); if (r) { kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev); kfree(vpic); goto create_irqchip_unlock; } } else goto create_irqchip_unlock; smp_wmb(); kvm->arch.vpic = vpic; smp_wmb(); r = kvm_setup_default_irq_routing(kvm); if (r) { mutex_lock(&kvm->irq_lock); kvm_ioapic_destroy(kvm); kvm_destroy_pic(kvm); mutex_unlock(&kvm->irq_lock); } create_irqchip_unlock: mutex_unlock(&kvm->lock); break; } case KVM_CREATE_PIT: u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; goto create_pit; case KVM_CREATE_PIT2: r = -EFAULT; if (copy_from_user(&u.pit_config, argp, sizeof(struct kvm_pit_config))) goto out; create_pit: mutex_lock(&kvm->slots_lock); r = -EEXIST; if (kvm->arch.vpit) goto create_pit_unlock; r = -ENOMEM; kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); if (kvm->arch.vpit) r = 0; create_pit_unlock: mutex_unlock(&kvm->slots_lock); break; case KVM_IRQ_LINE_STATUS: case KVM_IRQ_LINE: { struct kvm_irq_level irq_event; r = -EFAULT; if (copy_from_user(&irq_event, argp, sizeof irq_event)) goto out; r = -ENXIO; if (irqchip_in_kernel(kvm)) { __s32 status; status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event.irq, irq_event.level); if (ioctl == KVM_IRQ_LINE_STATUS) { r = -EFAULT; irq_event.status = status; if (copy_to_user(argp, &irq_event, sizeof irq_event)) goto out; } r = 0; } break; } case KVM_GET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL); r = -ENOMEM; if (!chip) goto out; r = -EFAULT; if (copy_from_user(chip, argp, sizeof *chip)) goto get_irqchip_out; r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto get_irqchip_out; r = kvm_vm_ioctl_get_irqchip(kvm, chip); if (r) goto get_irqchip_out; r = -EFAULT; if (copy_to_user(argp, chip, sizeof *chip)) goto get_irqchip_out; r = 0; get_irqchip_out: kfree(chip); if (r) goto out; break; } case KVM_SET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL); r = -ENOMEM; if (!chip) goto out; r = -EFAULT; if (copy_from_user(chip, argp, sizeof *chip)) goto set_irqchip_out; r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto set_irqchip_out; r = kvm_vm_ioctl_set_irqchip(kvm, chip); if (r) goto set_irqchip_out; r = 0; set_irqchip_out: kfree(chip); if (r) goto out; break; } case KVM_GET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit(kvm, &u.ps); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) goto out; r = 0; break; } case KVM_SET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof u.ps)) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit(kvm, &u.ps); if (r) goto out; r = 0; break; } case KVM_GET_PIT2: { r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) goto out; r = 0; break; } case KVM_SET_PIT2: { r = -EFAULT; if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); if (r) goto out; r = 0; break; } case KVM_REINJECT_CONTROL: { struct kvm_reinject_control control; r = -EFAULT; if (copy_from_user(&control, argp, sizeof(control))) goto out; r = kvm_vm_ioctl_reinject(kvm, &control); if (r) goto out; r = 0; break; } case KVM_XEN_HVM_CONFIG: { r = -EFAULT; if (copy_from_user(&kvm->arch.xen_hvm_config, argp, sizeof(struct kvm_xen_hvm_config))) goto out; r = -EINVAL; if (kvm->arch.xen_hvm_config.flags) goto out; r = 0; break; } case KVM_SET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; s64 delta; r = -EFAULT; if (copy_from_user(&user_ns, argp, sizeof(user_ns))) goto out; r = -EINVAL; if (user_ns.flags) goto out; r = 0; local_irq_disable(); now_ns = get_kernel_ns(); delta = user_ns.clock - now_ns; local_irq_enable(); kvm->arch.kvmclock_offset = delta; break; } case KVM_GET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; local_irq_disable(); now_ns = get_kernel_ns(); user_ns.clock = kvm->arch.kvmclock_offset + now_ns; local_irq_enable(); user_ns.flags = 0; memset(&user_ns.pad, 0, sizeof(user_ns.pad)); r = -EFAULT; if (copy_to_user(argp, &user_ns, sizeof(user_ns))) goto out; r = 0; break; } default: ; } out: return r; } static void kvm_init_msr_list(void) { u32 dummy[2]; unsigned i, j; /* skip the first msrs in the list. KVM-specific */ for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) { if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) continue; if (j < i) msrs_to_save[j] = msrs_to_save[i]; j++; } num_msrs_to_save = j; } static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *v) { if (vcpu->arch.apic && !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v)) return 0; return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v); } static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) { if (vcpu->arch.apic && !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v)) return 0; return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v); } static void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { kvm_x86_ops->set_segment(vcpu, var, seg); } void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { kvm_x86_ops->get_segment(vcpu, var, seg); } static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) { return gpa; } static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) { gpa_t t_gpa; struct x86_exception exception; BUG_ON(!mmu_is_nested(vcpu)); /* NPT walks are always user-walks */ access |= PFERR_USER_MASK; t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception); return t_gpa; } gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_FETCH_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_WRITE_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } /* uses this to access any guest's mapped memory without checking CPL */ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); } static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, u32 access, struct x86_exception *exception) { void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; ret = kvm_read_guest(vcpu->kvm, gpa, data, toread); if (ret < 0) { r = X86EMUL_IO_NEEDED; goto out; } bytes -= toread; data += toread; addr += toread; } out: return r; } /* used for instruction fetching */ static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access | PFERR_FETCH_MASK, exception); } static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, struct x86_exception *exception) { return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); } static int kvm_write_guest_virt_system(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, struct x86_exception *exception) { void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, PFERR_WRITE_MASK, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); if (ret < 0) { r = X86EMUL_IO_NEEDED; goto out; } bytes -= towrite; data += towrite; addr += towrite; } out: return r; } static int emulator_read_emulated(unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu) { gpa_t gpa; if (vcpu->mmio_read_completed) { memcpy(val, vcpu->mmio_data, bytes); trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, vcpu->mmio_phys_addr, *(u64 *)val); vcpu->mmio_read_completed = 0; return X86EMUL_CONTINUE; } gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, exception); if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; /* For APIC access vmexit */ if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto mmio; if (kvm_read_guest_virt(addr, val, bytes, vcpu, exception) == X86EMUL_CONTINUE) return X86EMUL_CONTINUE; mmio: /* * Is this MMIO handled locally? */ if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) { trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val); return X86EMUL_CONTINUE; } trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); vcpu->mmio_needed = 1; vcpu->run->exit_reason = KVM_EXIT_MMIO; vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa; vcpu->run->mmio.len = vcpu->mmio_size = bytes; vcpu->run->mmio.is_write = vcpu->mmio_is_write = 0; return X86EMUL_IO_NEEDED; } int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, const void *val, int bytes) { int ret; ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); if (ret < 0) return 0; kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1); return 1; } static int emulator_write_emulated_onepage(unsigned long addr, const void *val, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu) { gpa_t gpa; gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, exception); if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; /* For APIC access vmexit */ if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto mmio; if (emulator_write_phys(vcpu, gpa, val, bytes)) return X86EMUL_CONTINUE; mmio: trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val); /* * Is this MMIO handled locally? */ if (!vcpu_mmio_write(vcpu, gpa, bytes, val)) return X86EMUL_CONTINUE; vcpu->mmio_needed = 1; vcpu->run->exit_reason = KVM_EXIT_MMIO; vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa; vcpu->run->mmio.len = vcpu->mmio_size = bytes; vcpu->run->mmio.is_write = vcpu->mmio_is_write = 1; memcpy(vcpu->run->mmio.data, val, bytes); return X86EMUL_CONTINUE; } int emulator_write_emulated(unsigned long addr, const void *val, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu) { /* Crossing a page boundary? */ if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { int rc, now; now = -addr & ~PAGE_MASK; rc = emulator_write_emulated_onepage(addr, val, now, exception, vcpu); if (rc != X86EMUL_CONTINUE) return rc; addr += now; val += now; bytes -= now; } return emulator_write_emulated_onepage(addr, val, bytes, exception, vcpu); } #define CMPXCHG_TYPE(t, ptr, old, new) \ (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old)) #ifdef CONFIG_X86_64 # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new) #else # define CMPXCHG64(ptr, old, new) \ (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old)) #endif static int emulator_cmpxchg_emulated(unsigned long addr, const void *old, const void *new, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu) { gpa_t gpa; struct page *page; char *kaddr; bool exchanged; /* guests cmpxchg8b have to be emulated atomically */ if (bytes > 8 || (bytes & (bytes - 1))) goto emul_write; gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); if (gpa == UNMAPPED_GVA || (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto emul_write; if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK)) goto emul_write; page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); if (is_error_page(page)) { kvm_release_page_clean(page); goto emul_write; } kaddr = kmap_atomic(page, KM_USER0); kaddr += offset_in_page(gpa); switch (bytes) { case 1: exchanged = CMPXCHG_TYPE(u8, kaddr, old, new); break; case 2: exchanged = CMPXCHG_TYPE(u16, kaddr, old, new); break; case 4: exchanged = CMPXCHG_TYPE(u32, kaddr, old, new); break; case 8: exchanged = CMPXCHG64(kaddr, old, new); break; default: BUG(); } kunmap_atomic(kaddr, KM_USER0); kvm_release_page_dirty(page); if (!exchanged) return X86EMUL_CMPXCHG_FAILED; kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1); return X86EMUL_CONTINUE; emul_write: printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); return emulator_write_emulated(addr, new, bytes, exception, vcpu); } static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) { /* TODO: String I/O for in kernel device */ int r; if (vcpu->arch.pio.in) r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, vcpu->arch.pio.size, pd); else r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, vcpu->arch.pio.size, pd); return r; } static int emulator_pio_in_emulated(int size, unsigned short port, void *val, unsigned int count, struct kvm_vcpu *vcpu) { if (vcpu->arch.pio.count) goto data_avail; trace_kvm_pio(0, port, size, 1); vcpu->arch.pio.port = port; vcpu->arch.pio.in = 1; vcpu->arch.pio.count = count; vcpu->arch.pio.size = size; if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { data_avail: memcpy(val, vcpu->arch.pio_data, size * count); vcpu->arch.pio.count = 0; return 1; } vcpu->run->exit_reason = KVM_EXIT_IO; vcpu->run->io.direction = KVM_EXIT_IO_IN; vcpu->run->io.size = size; vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; vcpu->run->io.count = count; vcpu->run->io.port = port; return 0; } static int emulator_pio_out_emulated(int size, unsigned short port, const void *val, unsigned int count, struct kvm_vcpu *vcpu) { trace_kvm_pio(1, port, size, 1); vcpu->arch.pio.port = port; vcpu->arch.pio.in = 0; vcpu->arch.pio.count = count; vcpu->arch.pio.size = size; memcpy(vcpu->arch.pio_data, val, size * count); if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { vcpu->arch.pio.count = 0; return 1; } vcpu->run->exit_reason = KVM_EXIT_IO; vcpu->run->io.direction = KVM_EXIT_IO_OUT; vcpu->run->io.size = size; vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; vcpu->run->io.count = count; vcpu->run->io.port = port; return 0; } static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) { return kvm_x86_ops->get_segment_base(vcpu, seg); } int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) { kvm_mmu_invlpg(vcpu, address); return X86EMUL_CONTINUE; } int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) { if (!need_emulate_wbinvd(vcpu)) return X86EMUL_CONTINUE; if (kvm_x86_ops->has_wbinvd_exit()) { int cpu = get_cpu(); cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, wbinvd_ipi, NULL, 1); put_cpu(); cpumask_clear(vcpu->arch.wbinvd_dirty_mask); } else wbinvd(); return X86EMUL_CONTINUE; } EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); int emulate_clts(struct kvm_vcpu *vcpu) { kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); kvm_x86_ops->fpu_activate(vcpu); return X86EMUL_CONTINUE; } int emulator_get_dr(int dr, unsigned long *dest, struct kvm_vcpu *vcpu) { return _kvm_get_dr(vcpu, dr, dest); } int emulator_set_dr(int dr, unsigned long value, struct kvm_vcpu *vcpu) { return __kvm_set_dr(vcpu, dr, value); } static u64 mk_cr_64(u64 curr_cr, u32 new_val) { return (curr_cr & ~((1ULL << 32) - 1)) | new_val; } static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu) { unsigned long value; switch (cr) { case 0: value = kvm_read_cr0(vcpu); break; case 2: value = vcpu->arch.cr2; break; case 3: value = vcpu->arch.cr3; break; case 4: value = kvm_read_cr4(vcpu); break; case 8: value = kvm_get_cr8(vcpu); break; default: vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); return 0; } return value; } static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu) { int res = 0; switch (cr) { case 0: res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); break; case 2: vcpu->arch.cr2 = val; break; case 3: res = kvm_set_cr3(vcpu, val); break; case 4: res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); break; case 8: res = __kvm_set_cr8(vcpu, val & 0xfUL); break; default: vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); res = -1; } return res; } static int emulator_get_cpl(struct kvm_vcpu *vcpu) { return kvm_x86_ops->get_cpl(vcpu); } static void emulator_get_gdt(struct desc_ptr *dt, struct kvm_vcpu *vcpu) { kvm_x86_ops->get_gdt(vcpu, dt); } static void emulator_get_idt(struct desc_ptr *dt, struct kvm_vcpu *vcpu) { kvm_x86_ops->get_idt(vcpu, dt); } static unsigned long emulator_get_cached_segment_base(int seg, struct kvm_vcpu *vcpu) { return get_segment_base(vcpu, seg); } static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg, struct kvm_vcpu *vcpu) { struct kvm_segment var; kvm_get_segment(vcpu, &var, seg); if (var.unusable) return false; if (var.g) var.limit >>= 12; set_desc_limit(desc, var.limit); set_desc_base(desc, (unsigned long)var.base); desc->type = var.type; desc->s = var.s; desc->dpl = var.dpl; desc->p = var.present; desc->avl = var.avl; desc->l = var.l; desc->d = var.db; desc->g = var.g; return true; } static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg, struct kvm_vcpu *vcpu) { struct kvm_segment var; /* needed to preserve selector */ kvm_get_segment(vcpu, &var, seg); var.base = get_desc_base(desc); var.limit = get_desc_limit(desc); if (desc->g) var.limit = (var.limit << 12) | 0xfff; var.type = desc->type; var.present = desc->p; var.dpl = desc->dpl; var.db = desc->d; var.s = desc->s; var.l = desc->l; var.g = desc->g; var.avl = desc->avl; var.present = desc->p; var.unusable = !var.present; var.padding = 0; kvm_set_segment(vcpu, &var, seg); return; } static u16 emulator_get_segment_selector(int seg, struct kvm_vcpu *vcpu) { struct kvm_segment kvm_seg; kvm_get_segment(vcpu, &kvm_seg, seg); return kvm_seg.selector; } static void emulator_set_segment_selector(u16 sel, int seg, struct kvm_vcpu *vcpu) { struct kvm_segment kvm_seg; kvm_get_segment(vcpu, &kvm_seg, seg); kvm_seg.selector = sel; kvm_set_segment(vcpu, &kvm_seg, seg); } static struct x86_emulate_ops emulate_ops = { .read_std = kvm_read_guest_virt_system, .write_std = kvm_write_guest_virt_system, .fetch = kvm_fetch_guest_virt, .read_emulated = emulator_read_emulated, .write_emulated = emulator_write_emulated, .cmpxchg_emulated = emulator_cmpxchg_emulated, .pio_in_emulated = emulator_pio_in_emulated, .pio_out_emulated = emulator_pio_out_emulated, .get_cached_descriptor = emulator_get_cached_descriptor, .set_cached_descriptor = emulator_set_cached_descriptor, .get_segment_selector = emulator_get_segment_selector, .set_segment_selector = emulator_set_segment_selector, .get_cached_segment_base = emulator_get_cached_segment_base, .get_gdt = emulator_get_gdt, .get_idt = emulator_get_idt, .get_cr = emulator_get_cr, .set_cr = emulator_set_cr, .cpl = emulator_get_cpl, .get_dr = emulator_get_dr, .set_dr = emulator_set_dr, .set_msr = kvm_set_msr, .get_msr = kvm_get_msr, }; static void cache_all_regs(struct kvm_vcpu *vcpu) { kvm_register_read(vcpu, VCPU_REGS_RAX); kvm_register_read(vcpu, VCPU_REGS_RSP); kvm_register_read(vcpu, VCPU_REGS_RIP); vcpu->arch.regs_dirty = ~0; } static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) { u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask); /* * an sti; sti; sequence only disable interrupts for the first * instruction. So, if the last instruction, be it emulated or * not, left the system with the INT_STI flag enabled, it * means that the last instruction is an sti. We should not * leave the flag on in this case. The same goes for mov ss */ if (!(int_shadow & mask)) kvm_x86_ops->set_interrupt_shadow(vcpu, mask); } static void inject_emulated_exception(struct kvm_vcpu *vcpu) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; if (ctxt->exception.vector == PF_VECTOR) kvm_propagate_fault(vcpu, &ctxt->exception); else if (ctxt->exception.error_code_valid) kvm_queue_exception_e(vcpu, ctxt->exception.vector, ctxt->exception.error_code); else kvm_queue_exception(vcpu, ctxt->exception.vector); } static void init_emulate_ctxt(struct kvm_vcpu *vcpu) { struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; int cs_db, cs_l; cache_all_regs(vcpu); kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); vcpu->arch.emulate_ctxt.vcpu = vcpu; vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu); vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu); vcpu->arch.emulate_ctxt.mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : cs_l ? X86EMUL_MODE_PROT64 : cs_db ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; memset(c, 0, sizeof(struct decode_cache)); memcpy(c->regs, vcpu->arch.regs, sizeof c->regs); } int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq) { struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; int ret; init_emulate_ctxt(vcpu); vcpu->arch.emulate_ctxt.decode.op_bytes = 2; vcpu->arch.emulate_ctxt.decode.ad_bytes = 2; vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip; ret = emulate_int_real(&vcpu->arch.emulate_ctxt, &emulate_ops, irq); if (ret != X86EMUL_CONTINUE) return EMULATE_FAIL; vcpu->arch.emulate_ctxt.eip = c->eip; memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); if (irq == NMI_VECTOR) vcpu->arch.nmi_pending = false; else vcpu->arch.interrupt.pending = false; return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); static int handle_emulation_failure(struct kvm_vcpu *vcpu) { ++vcpu->stat.insn_emulation_fail; trace_kvm_emulate_insn_failed(vcpu); vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; kvm_queue_exception(vcpu, UD_VECTOR); return EMULATE_FAIL; } static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva) { gpa_t gpa; if (tdp_enabled) return false; /* * if emulation was due to access to shadowed page table * and it failed try to unshadow page and re-entetr the * guest to let CPU execute the instruction. */ if (kvm_mmu_unprotect_page_virt(vcpu, gva)) return true; gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL); if (gpa == UNMAPPED_GVA) return true; /* let cpu generate fault */ if (!kvm_is_error_hva(gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT))) return true; return false; } int emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, u16 error_code, int emulation_type) { int r; struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; kvm_clear_exception_queue(vcpu); vcpu->arch.mmio_fault_cr2 = cr2; /* * TODO: fix emulate.c to use guest_read/write_register * instead of direct ->regs accesses, can save hundred cycles * on Intel for instructions that don't read/change RSP, for * for example. */ cache_all_regs(vcpu); if (!(emulation_type & EMULTYPE_NO_DECODE)) { init_emulate_ctxt(vcpu); vcpu->arch.emulate_ctxt.interruptibility = 0; vcpu->arch.emulate_ctxt.have_exception = false; vcpu->arch.emulate_ctxt.perm_ok = false; r = x86_decode_insn(&vcpu->arch.emulate_ctxt); if (r == X86EMUL_PROPAGATE_FAULT) goto done; trace_kvm_emulate_insn_start(vcpu); /* Only allow emulation of specific instructions on #UD * (namely VMMCALL, sysenter, sysexit, syscall)*/ if (emulation_type & EMULTYPE_TRAP_UD) { if (!c->twobyte) return EMULATE_FAIL; switch (c->b) { case 0x01: /* VMMCALL */ if (c->modrm_mod != 3 || c->modrm_rm != 1) return EMULATE_FAIL; break; case 0x34: /* sysenter */ case 0x35: /* sysexit */ if (c->modrm_mod != 0 || c->modrm_rm != 0) return EMULATE_FAIL; break; case 0x05: /* syscall */ if (c->modrm_mod != 0 || c->modrm_rm != 0) return EMULATE_FAIL; break; default: return EMULATE_FAIL; } if (!(c->modrm_reg == 0 || c->modrm_reg == 3)) return EMULATE_FAIL; } ++vcpu->stat.insn_emulation; if (r) { if (reexecute_instruction(vcpu, cr2)) return EMULATE_DONE; if (emulation_type & EMULTYPE_SKIP) return EMULATE_FAIL; return handle_emulation_failure(vcpu); } } if (emulation_type & EMULTYPE_SKIP) { kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip); return EMULATE_DONE; } /* this is needed for vmware backdor interface to work since it changes registers values during IO operation */ memcpy(c->regs, vcpu->arch.regs, sizeof c->regs); restart: r = x86_emulate_insn(&vcpu->arch.emulate_ctxt); if (r == EMULATION_FAILED) { if (reexecute_instruction(vcpu, cr2)) return EMULATE_DONE; return handle_emulation_failure(vcpu); } done: if (vcpu->arch.emulate_ctxt.have_exception) { inject_emulated_exception(vcpu); r = EMULATE_DONE; } else if (vcpu->arch.pio.count) { if (!vcpu->arch.pio.in) vcpu->arch.pio.count = 0; r = EMULATE_DO_MMIO; } else if (vcpu->mmio_needed) { if (vcpu->mmio_is_write) vcpu->mmio_needed = 0; r = EMULATE_DO_MMIO; } else if (r == EMULATION_RESTART) goto restart; else r = EMULATE_DONE; toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility); kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); kvm_make_request(KVM_REQ_EVENT, vcpu); memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); return r; } EXPORT_SYMBOL_GPL(emulate_instruction); int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) { unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); int ret = emulator_pio_out_emulated(size, port, &val, 1, vcpu); /* do not return to emulator after return from userspace */ vcpu->arch.pio.count = 0; return ret; } EXPORT_SYMBOL_GPL(kvm_fast_pio_out); static void tsc_bad(void *info) { __get_cpu_var(cpu_tsc_khz) = 0; } static void tsc_khz_changed(void *data) { struct cpufreq_freqs *freq = data; unsigned long khz = 0; if (data) khz = freq->new; else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) khz = cpufreq_quick_get(raw_smp_processor_id()); if (!khz) khz = tsc_khz; __get_cpu_var(cpu_tsc_khz) = khz; } static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; struct kvm *kvm; struct kvm_vcpu *vcpu; int i, send_ipi = 0; /* * We allow guests to temporarily run on slowing clocks, * provided we notify them after, or to run on accelerating * clocks, provided we notify them before. Thus time never * goes backwards. * * However, we have a problem. We can't atomically update * the frequency of a given CPU from this function; it is * merely a notifier, which can be called from any CPU. * Changing the TSC frequency at arbitrary points in time * requires a recomputation of local variables related to * the TSC for each VCPU. We must flag these local variables * to be updated and be sure the update takes place with the * new frequency before any guests proceed. * * Unfortunately, the combination of hotplug CPU and frequency * change creates an intractable locking scenario; the order * of when these callouts happen is undefined with respect to * CPU hotplug, and they can race with each other. As such, * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is * undefined; you can actually have a CPU frequency change take * place in between the computation of X and the setting of the * variable. To protect against this problem, all updates of * the per_cpu tsc_khz variable are done in an interrupt * protected IPI, and all callers wishing to update the value * must wait for a synchronous IPI to complete (which is trivial * if the caller is on the CPU already). This establishes the * necessary total order on variable updates. * * Note that because a guest time update may take place * anytime after the setting of the VCPU's request bit, the * correct TSC value must be set before the request. However, * to ensure the update actually makes it to any guest which * starts running in hardware virtualization between the set * and the acquisition of the spinlock, we must also ping the * CPU after setting the request bit. * */ if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) return 0; if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) return 0; smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->cpu != freq->cpu) continue; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->cpu != smp_processor_id()) send_ipi = 1; } } spin_unlock(&kvm_lock); if (freq->old < freq->new && send_ipi) { /* * We upscale the frequency. Must make the guest * doesn't see old kvmclock values while running with * the new frequency, otherwise we risk the guest sees * time go backwards. * * In case we update the frequency for another cpu * (which might be in guest context) send an interrupt * to kick the cpu out of guest context. Next time * guest context is entered kvmclock will be updated, * so the guest will not see stale values. */ smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); } return 0; } static struct notifier_block kvmclock_cpufreq_notifier_block = { .notifier_call = kvmclock_cpufreq_notifier }; static int kvmclock_cpu_notifier(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; switch (action) { case CPU_ONLINE: case CPU_DOWN_FAILED: smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); break; case CPU_DOWN_PREPARE: smp_call_function_single(cpu, tsc_bad, NULL, 1); break; } return NOTIFY_OK; } static struct notifier_block kvmclock_cpu_notifier_block = { .notifier_call = kvmclock_cpu_notifier, .priority = -INT_MAX }; static void kvm_timer_init(void) { int cpu; max_tsc_khz = tsc_khz; register_hotcpu_notifier(&kvmclock_cpu_notifier_block); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { #ifdef CONFIG_CPU_FREQ struct cpufreq_policy policy; memset(&policy, 0, sizeof(policy)); cpu = get_cpu(); cpufreq_get_policy(&policy, cpu); if (policy.cpuinfo.max_freq) max_tsc_khz = policy.cpuinfo.max_freq; put_cpu(); #endif cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz); for_each_online_cpu(cpu) smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); } static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); static int kvm_is_in_guest(void) { return percpu_read(current_vcpu) != NULL; } static int kvm_is_user_mode(void) { int user_mode = 3; if (percpu_read(current_vcpu)) user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu)); return user_mode != 0; } static unsigned long kvm_get_guest_ip(void) { unsigned long ip = 0; if (percpu_read(current_vcpu)) ip = kvm_rip_read(percpu_read(current_vcpu)); return ip; } static struct perf_guest_info_callbacks kvm_guest_cbs = { .is_in_guest = kvm_is_in_guest, .is_user_mode = kvm_is_user_mode, .get_guest_ip = kvm_get_guest_ip, }; void kvm_before_handle_nmi(struct kvm_vcpu *vcpu) { percpu_write(current_vcpu, vcpu); } EXPORT_SYMBOL_GPL(kvm_before_handle_nmi); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) { percpu_write(current_vcpu, NULL); } EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); int kvm_arch_init(void *opaque) { int r; struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; if (kvm_x86_ops) { printk(KERN_ERR "kvm: already loaded the other module\n"); r = -EEXIST; goto out; } if (!ops->cpu_has_kvm_support()) { printk(KERN_ERR "kvm: no hardware support\n"); r = -EOPNOTSUPP; goto out; } if (ops->disabled_by_bios()) { printk(KERN_ERR "kvm: disabled by bios\n"); r = -EOPNOTSUPP; goto out; } r = kvm_mmu_module_init(); if (r) goto out; kvm_init_msr_list(); kvm_x86_ops = ops; kvm_mmu_set_nonpresent_ptes(0ull, 0ull); kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, PT_DIRTY_MASK, PT64_NX_MASK, 0); kvm_timer_init(); perf_register_guest_info_callbacks(&kvm_guest_cbs); if (cpu_has_xsave) host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); return 0; out: return r; } void kvm_arch_exit(void) { perf_unregister_guest_info_callbacks(&kvm_guest_cbs); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block); kvm_x86_ops = NULL; kvm_mmu_module_exit(); } int kvm_emulate_halt(struct kvm_vcpu *vcpu) { ++vcpu->stat.halt_exits; if (irqchip_in_kernel(vcpu->kvm)) { vcpu->arch.mp_state = KVM_MP_STATE_HALTED; return 1; } else { vcpu->run->exit_reason = KVM_EXIT_HLT; return 0; } } EXPORT_SYMBOL_GPL(kvm_emulate_halt); static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0, unsigned long a1) { if (is_long_mode(vcpu)) return a0; else return a0 | ((gpa_t)a1 << 32); } int kvm_hv_hypercall(struct kvm_vcpu *vcpu) { u64 param, ingpa, outgpa, ret; uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; bool fast, longmode; int cs_db, cs_l; /* * hypercall generates UD from non zero cpl and real mode * per HYPER-V spec */ if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; } kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); longmode = is_long_mode(vcpu) && cs_l == 1; if (!longmode) { param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); } #ifdef CONFIG_X86_64 else { param = kvm_register_read(vcpu, VCPU_REGS_RCX); ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); } #endif code = param & 0xffff; fast = (param >> 16) & 0x1; rep_cnt = (param >> 32) & 0xfff; rep_idx = (param >> 48) & 0xfff; trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); switch (code) { case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: kvm_vcpu_on_spin(vcpu); break; default: res = HV_STATUS_INVALID_HYPERCALL_CODE; break; } ret = res | (((u64)rep_done & 0xfff) << 32); if (longmode) { kvm_register_write(vcpu, VCPU_REGS_RAX, ret); } else { kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); } return 1; } int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) { unsigned long nr, a0, a1, a2, a3, ret; int r = 1; if (kvm_hv_hypercall_enabled(vcpu->kvm)) return kvm_hv_hypercall(vcpu); nr = kvm_register_read(vcpu, VCPU_REGS_RAX); a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); a1 = kvm_register_read(vcpu, VCPU_REGS_RCX); a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); trace_kvm_hypercall(nr, a0, a1, a2, a3); if (!is_long_mode(vcpu)) { nr &= 0xFFFFFFFF; a0 &= 0xFFFFFFFF; a1 &= 0xFFFFFFFF; a2 &= 0xFFFFFFFF; a3 &= 0xFFFFFFFF; } if (kvm_x86_ops->get_cpl(vcpu) != 0) { ret = -KVM_EPERM; goto out; } switch (nr) { case KVM_HC_VAPIC_POLL_IRQ: ret = 0; break; case KVM_HC_MMU_OP: r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret); break; default: ret = -KVM_ENOSYS; break; } out: kvm_register_write(vcpu, VCPU_REGS_RAX, ret); ++vcpu->stat.hypercalls; return r; } EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); int kvm_fix_hypercall(struct kvm_vcpu *vcpu) { char instruction[3]; unsigned long rip = kvm_rip_read(vcpu); /* * Blow out the MMU to ensure that no other VCPU has an active mapping * to ensure that the updated hypercall appears atomically across all * VCPUs. */ kvm_mmu_zap_all(vcpu->kvm); kvm_x86_ops->patch_hypercall(vcpu, instruction); return emulator_write_emulated(rip, instruction, 3, NULL, vcpu); } void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) { struct desc_ptr dt = { limit, base }; kvm_x86_ops->set_gdt(vcpu, &dt); } void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) { struct desc_ptr dt = { limit, base }; kvm_x86_ops->set_idt(vcpu, &dt); } static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) { struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; int j, nent = vcpu->arch.cpuid_nent; e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; /* when no next entry is found, the current entry[i] is reselected */ for (j = i + 1; ; j = (j + 1) % nent) { struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; if (ej->function == e->function) { ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; return j; } } return 0; /* silence gcc, even though control never reaches here */ } /* find an entry with matching function, matching index (if needed), and that * should be read next (if it's stateful) */ static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e, u32 function, u32 index) { if (e->function != function) return 0; if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index) return 0; if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) && !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT)) return 0; return 1; } struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function, u32 index) { int i; struct kvm_cpuid_entry2 *best = NULL; for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { struct kvm_cpuid_entry2 *e; e = &vcpu->arch.cpuid_entries[i]; if (is_matching_cpuid_entry(e, function, index)) { if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) move_to_next_stateful_cpuid_entry(vcpu, i); best = e; break; } /* * Both basic or both extended? */ if (((e->function ^ function) & 0x80000000) == 0) if (!best || e->function > best->function) best = e; } return best; } EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0); if (!best || best->eax < 0x80000008) goto not_found; best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); if (best) return best->eax & 0xff; not_found: return 36; } void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) { u32 function, index; struct kvm_cpuid_entry2 *best; function = kvm_register_read(vcpu, VCPU_REGS_RAX); index = kvm_register_read(vcpu, VCPU_REGS_RCX); kvm_register_write(vcpu, VCPU_REGS_RAX, 0); kvm_register_write(vcpu, VCPU_REGS_RBX, 0); kvm_register_write(vcpu, VCPU_REGS_RCX, 0); kvm_register_write(vcpu, VCPU_REGS_RDX, 0); best = kvm_find_cpuid_entry(vcpu, function, index); if (best) { kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax); kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx); kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx); kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx); } kvm_x86_ops->skip_emulated_instruction(vcpu); trace_kvm_cpuid(function, kvm_register_read(vcpu, VCPU_REGS_RAX), kvm_register_read(vcpu, VCPU_REGS_RBX), kvm_register_read(vcpu, VCPU_REGS_RCX), kvm_register_read(vcpu, VCPU_REGS_RDX)); } EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); /* * Check if userspace requested an interrupt window, and that the * interrupt window is open. * * No need to exit to userspace if we already have an interrupt queued. */ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) { return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && vcpu->run->request_interrupt_window && kvm_arch_interrupt_allowed(vcpu)); } static void post_kvm_run_save(struct kvm_vcpu *vcpu) { struct kvm_run *kvm_run = vcpu->run; kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu); if (irqchip_in_kernel(vcpu->kvm)) kvm_run->ready_for_interrupt_injection = 1; else kvm_run->ready_for_interrupt_injection = kvm_arch_interrupt_allowed(vcpu) && !kvm_cpu_has_interrupt(vcpu) && !kvm_event_needs_reinjection(vcpu); } static void vapic_enter(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; struct page *page; if (!apic || !apic->vapic_addr) return; page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); vcpu->arch.apic->vapic_page = page; } static void vapic_exit(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; int idx; if (!apic || !apic->vapic_addr) return; idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_release_page_dirty(apic->vapic_page); mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); srcu_read_unlock(&vcpu->kvm->srcu, idx); } static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; if (!kvm_x86_ops->update_cr8_intercept) return; if (!vcpu->arch.apic) return; if (!vcpu->arch.apic->vapic_addr) max_irr = kvm_lapic_find_highest_irr(vcpu); else max_irr = -1; if (max_irr != -1) max_irr >>= 4; tpr = kvm_lapic_get_cr8(vcpu); kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); } static void inject_pending_event(struct kvm_vcpu *vcpu) { /* try to reinject previous events if any */ if (vcpu->arch.exception.pending) { trace_kvm_inj_exception(vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code); kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code, vcpu->arch.exception.reinject); return; } if (vcpu->arch.nmi_injected) { kvm_x86_ops->set_nmi(vcpu); return; } if (vcpu->arch.interrupt.pending) { kvm_x86_ops->set_irq(vcpu); return; } /* try to inject new event if pending */ if (vcpu->arch.nmi_pending) { if (kvm_x86_ops->nmi_allowed(vcpu)) { vcpu->arch.nmi_pending = false; vcpu->arch.nmi_injected = true; kvm_x86_ops->set_nmi(vcpu); } } else if (kvm_cpu_has_interrupt(vcpu)) { if (kvm_x86_ops->interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); kvm_x86_ops->set_irq(vcpu); } } } static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) { if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && !vcpu->guest_xcr0_loaded) { /* kvm_set_xcr() also depends on this */ xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); vcpu->guest_xcr0_loaded = 1; } } static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) { if (vcpu->guest_xcr0_loaded) { if (vcpu->arch.xcr0 != host_xcr0) xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); vcpu->guest_xcr0_loaded = 0; } } static int vcpu_enter_guest(struct kvm_vcpu *vcpu) { int r; bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && vcpu->run->request_interrupt_window; if (vcpu->requests) { if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) kvm_mmu_unload(vcpu); if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) __kvm_migrate_timers(vcpu); if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { r = kvm_guest_time_update(vcpu); if (unlikely(r)) goto out; } if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) kvm_mmu_sync_roots(vcpu); if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) kvm_x86_ops->tlb_flush(vcpu); if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; r = 0; goto out; } if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; r = 0; goto out; } if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) { vcpu->fpu_active = 0; kvm_x86_ops->fpu_deactivate(vcpu); } if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { /* Page is swapped out. Do synthetic halt */ vcpu->arch.apf.halted = true; r = 1; goto out; } } r = kvm_mmu_reload(vcpu); if (unlikely(r)) goto out; if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { inject_pending_event(vcpu); /* enable NMI/IRQ window open exits if needed */ if (vcpu->arch.nmi_pending) kvm_x86_ops->enable_nmi_window(vcpu); else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) kvm_x86_ops->enable_irq_window(vcpu); if (kvm_lapic_enabled(vcpu)) { update_cr8_intercept(vcpu); kvm_lapic_sync_to_vapic(vcpu); } } preempt_disable(); kvm_x86_ops->prepare_guest_switch(vcpu); if (vcpu->fpu_active) kvm_load_guest_fpu(vcpu); kvm_load_guest_xcr0(vcpu); atomic_set(&vcpu->guest_mode, 1); smp_wmb(); local_irq_disable(); if (!atomic_read(&vcpu->guest_mode) || vcpu->requests || need_resched() || signal_pending(current)) { atomic_set(&vcpu->guest_mode, 0); smp_wmb(); local_irq_enable(); preempt_enable(); kvm_x86_ops->cancel_injection(vcpu); r = 1; goto out; } srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); kvm_guest_enter(); if (unlikely(vcpu->arch.switch_db_regs)) { set_debugreg(0, 7); set_debugreg(vcpu->arch.eff_db[0], 0); set_debugreg(vcpu->arch.eff_db[1], 1); set_debugreg(vcpu->arch.eff_db[2], 2); set_debugreg(vcpu->arch.eff_db[3], 3); } trace_kvm_entry(vcpu->vcpu_id); kvm_x86_ops->run(vcpu); /* * If the guest has used debug registers, at least dr7 * will be disabled while returning to the host. * If we don't have active breakpoints in the host, we don't * care about the messed up debug address registers. But if * we have some of them active, restore the old state. */ if (hw_breakpoint_active()) hw_breakpoint_restore(); kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc); atomic_set(&vcpu->guest_mode, 0); smp_wmb(); local_irq_enable(); ++vcpu->stat.exits; /* * We must have an instruction between local_irq_enable() and * kvm_guest_exit(), so the timer interrupt isn't delayed by * the interrupt shadow. The stat.exits increment will do nicely. * But we need to prevent reordering, hence this barrier(): */ barrier(); kvm_guest_exit(); preempt_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); /* * Profile KVM exit RIPs: */ if (unlikely(prof_on == KVM_PROFILING)) { unsigned long rip = kvm_rip_read(vcpu); profile_hit(KVM_PROFILING, (void *)rip); } kvm_lapic_sync_from_vapic(vcpu); r = kvm_x86_ops->handle_exit(vcpu); out: return r; } static int __vcpu_run(struct kvm_vcpu *vcpu) { int r; struct kvm *kvm = vcpu->kvm; if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) { pr_debug("vcpu %d received sipi with vector # %x\n", vcpu->vcpu_id, vcpu->arch.sipi_vector); kvm_lapic_reset(vcpu); r = kvm_arch_vcpu_reset(vcpu); if (r) return r; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); vapic_enter(vcpu); r = 1; while (r > 0) { if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) r = vcpu_enter_guest(vcpu); else { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { switch(vcpu->arch.mp_state) { case KVM_MP_STATE_HALTED: vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; case KVM_MP_STATE_RUNNABLE: vcpu->arch.apf.halted = false; break; case KVM_MP_STATE_SIPI_RECEIVED: default: r = -EINTR; break; } } } if (r <= 0) break; clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); if (kvm_cpu_has_pending_timer(vcpu)) kvm_inject_pending_timer_irqs(vcpu); if (dm_request_for_irq_injection(vcpu)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.request_irq_exits; } kvm_check_async_pf_completion(vcpu); if (signal_pending(current)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.signal_exits; } if (need_resched()) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_resched(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); } } srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); vapic_exit(vcpu); return r; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; sigset_t sigsaved; if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); r = -EAGAIN; goto out; } /* re-sync apic's tpr */ if (!irqchip_in_kernel(vcpu->kvm)) kvm_set_cr8(vcpu, kvm_run->cr8); if (vcpu->arch.pio.count || vcpu->mmio_needed) { if (vcpu->mmio_needed) { memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); vcpu->mmio_read_completed = 1; vcpu->mmio_needed = 0; } vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (r != EMULATE_DONE) { r = 0; goto out; } } if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) kvm_register_write(vcpu, VCPU_REGS_RAX, kvm_run->hypercall.ret); r = __vcpu_run(vcpu); out: post_kvm_run_save(vcpu); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return r; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX); regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX); regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI); regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI); regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP); #ifdef CONFIG_X86_64 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8); regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9); regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10); regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11); regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12); regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13); regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14); regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15); #endif regs->rip = kvm_rip_read(vcpu); regs->rflags = kvm_get_rflags(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax); kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx); kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx); kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi); kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi); kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp); kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp); #ifdef CONFIG_X86_64 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8); kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9); kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10); kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11); kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12); kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); #endif kvm_rip_write(vcpu, regs->rip); kvm_set_rflags(vcpu, regs->rflags); vcpu->arch.exception.pending = false; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) { struct kvm_segment cs; kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); *db = cs.db; *l = cs.l; } EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits); int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct desc_ptr dt; kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); kvm_x86_ops->get_idt(vcpu, &dt); sregs->idt.limit = dt.size; sregs->idt.base = dt.address; kvm_x86_ops->get_gdt(vcpu, &dt); sregs->gdt.limit = dt.size; sregs->gdt.base = dt.address; sregs->cr0 = kvm_read_cr0(vcpu); sregs->cr2 = vcpu->arch.cr2; sregs->cr3 = vcpu->arch.cr3; sregs->cr4 = kvm_read_cr4(vcpu); sregs->cr8 = kvm_get_cr8(vcpu); sregs->efer = vcpu->arch.efer; sregs->apic_base = kvm_get_apic_base(vcpu); memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft) set_bit(vcpu->arch.interrupt.nr, (unsigned long *)sregs->interrupt_bitmap); return 0; } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { mp_state->mp_state = vcpu->arch.mp_state; return 0; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { vcpu->arch.mp_state = mp_state->mp_state; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, bool has_error_code, u32 error_code) { struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; int ret; init_emulate_ctxt(vcpu); ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, tss_selector, reason, has_error_code, error_code); if (ret) return EMULATE_FAIL; memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); kvm_make_request(KVM_REQ_EVENT, vcpu); return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_task_switch); int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { int mmu_reset_needed = 0; int pending_vec, max_bits; struct desc_ptr dt; dt.size = sregs->idt.limit; dt.address = sregs->idt.base; kvm_x86_ops->set_idt(vcpu, &dt); dt.size = sregs->gdt.limit; dt.address = sregs->gdt.base; kvm_x86_ops->set_gdt(vcpu, &dt); vcpu->arch.cr2 = sregs->cr2; mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3; vcpu->arch.cr3 = sregs->cr3; kvm_set_cr8(vcpu, sregs->cr8); mmu_reset_needed |= vcpu->arch.efer != sregs->efer; kvm_x86_ops->set_efer(vcpu, sregs->efer); kvm_set_apic_base(vcpu, sregs->apic_base); mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; kvm_x86_ops->set_cr0(vcpu, sregs->cr0); vcpu->arch.cr0 = sregs->cr0; mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; kvm_x86_ops->set_cr4(vcpu, sregs->cr4); if (sregs->cr4 & X86_CR4_OSXSAVE) update_cpuid(vcpu); if (!is_long_mode(vcpu) && is_pae(vcpu)) { load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3); mmu_reset_needed = 1; } if (mmu_reset_needed) kvm_mmu_reset_context(vcpu); max_bits = (sizeof sregs->interrupt_bitmap) << 3; pending_vec = find_first_bit( (const unsigned long *)sregs->interrupt_bitmap, max_bits); if (pending_vec < max_bits) { kvm_queue_interrupt(vcpu, pending_vec, false); pr_debug("Set back pending irq %d\n", pending_vec); if (irqchip_in_kernel(vcpu->kvm)) kvm_pic_clear_isr_ack(vcpu->kvm); } kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); update_cr8_intercept(vcpu); /* Older userspace won't unhalt the vcpu on reset. */ if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && !is_protmode(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { unsigned long rflags; int i, r; if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { r = -EBUSY; if (vcpu->arch.exception.pending) goto out; if (dbg->control & KVM_GUESTDBG_INJECT_DB) kvm_queue_exception(vcpu, DB_VECTOR); else kvm_queue_exception(vcpu, BP_VECTOR); } /* * Read rflags as long as potentially injected trace flags are still * filtered out. */ rflags = kvm_get_rflags(vcpu); vcpu->guest_debug = dbg->control; if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) vcpu->guest_debug = 0; if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { for (i = 0; i < KVM_NR_DB_REGS; ++i) vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; vcpu->arch.switch_db_regs = (dbg->arch.debugreg[7] & DR7_BP_EN_MASK); } else { for (i = 0; i < KVM_NR_DB_REGS; i++) vcpu->arch.eff_db[i] = vcpu->arch.db[i]; vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK); } if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); /* * Trigger an rflags update that will inject or remove the trace * flags. */ kvm_set_rflags(vcpu, rflags); kvm_x86_ops->set_guest_debug(vcpu, dbg); r = 0; out: return r; } /* * Translate a guest virtual address to a guest physical address. */ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { unsigned long vaddr = tr->linear_address; gpa_t gpa; int idx; idx = srcu_read_lock(&vcpu->kvm->srcu); gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); srcu_read_unlock(&vcpu->kvm->srcu, idx); tr->physical_address = gpa; tr->valid = gpa != UNMAPPED_GVA; tr->writeable = 1; tr->usermode = 0; return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct i387_fxsave_struct *fxsave = &vcpu->arch.guest_fpu.state->fxsave; memcpy(fpu->fpr, fxsave->st_space, 128); fpu->fcw = fxsave->cwd; fpu->fsw = fxsave->swd; fpu->ftwx = fxsave->twd; fpu->last_opcode = fxsave->fop; fpu->last_ip = fxsave->rip; fpu->last_dp = fxsave->rdp; memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); return 0; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct i387_fxsave_struct *fxsave = &vcpu->arch.guest_fpu.state->fxsave; memcpy(fxsave->st_space, fpu->fpr, 128); fxsave->cwd = fpu->fcw; fxsave->swd = fpu->fsw; fxsave->twd = fpu->ftwx; fxsave->fop = fpu->last_opcode; fxsave->rip = fpu->last_ip; fxsave->rdp = fpu->last_dp; memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); return 0; } int fx_init(struct kvm_vcpu *vcpu) { int err; err = fpu_alloc(&vcpu->arch.guest_fpu); if (err) return err; fpu_finit(&vcpu->arch.guest_fpu); /* * Ensure guest xcr0 is valid for loading */ vcpu->arch.xcr0 = XSTATE_FP; vcpu->arch.cr0 |= X86_CR0_ET; return 0; } EXPORT_SYMBOL_GPL(fx_init); static void fx_free(struct kvm_vcpu *vcpu) { fpu_free(&vcpu->arch.guest_fpu); } void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) { if (vcpu->guest_fpu_loaded) return; /* * Restore all possible states in the guest, * and assume host would use all available bits. * Guest xcr0 would be loaded later. */ kvm_put_guest_xcr0(vcpu); vcpu->guest_fpu_loaded = 1; unlazy_fpu(current); fpu_restore_checking(&vcpu->arch.guest_fpu); trace_kvm_fpu(1); } void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) { kvm_put_guest_xcr0(vcpu); if (!vcpu->guest_fpu_loaded) return; vcpu->guest_fpu_loaded = 0; fpu_save_init(&vcpu->arch.guest_fpu); ++vcpu->stat.fpu_reload; kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); trace_kvm_fpu(0); } void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { if (vcpu->arch.time_page) { kvm_release_page_dirty(vcpu->arch.time_page); vcpu->arch.time_page = NULL; } free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) printk_once(KERN_WARNING "kvm: SMP vm created on host with unstable TSC; " "guest TSC will not be reliable\n"); return kvm_x86_ops->vcpu_create(kvm, id); } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { int r; vcpu->arch.mtrr_state.have_fixed = 1; vcpu_load(vcpu); r = kvm_arch_vcpu_reset(vcpu); if (r == 0) r = kvm_mmu_setup(vcpu); vcpu_put(vcpu); if (r < 0) goto free_vcpu; return 0; free_vcpu: kvm_x86_ops->vcpu_free(vcpu); return r; } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { vcpu->arch.apf.msr_val = 0; vcpu_load(vcpu); kvm_mmu_unload(vcpu); vcpu_put(vcpu); fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) { vcpu->arch.nmi_pending = false; vcpu->arch.nmi_injected = false; vcpu->arch.switch_db_regs = 0; memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = DR6_FIXED_1; vcpu->arch.dr7 = DR7_FIXED_1; kvm_make_request(KVM_REQ_EVENT, vcpu); vcpu->arch.apf.msr_val = 0; kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); vcpu->arch.apf.halted = false; return kvm_x86_ops->vcpu_reset(vcpu); } int kvm_arch_hardware_enable(void *garbage) { struct kvm *kvm; struct kvm_vcpu *vcpu; int i; kvm_shared_msr_cpu_online(); list_for_each_entry(kvm, &vm_list, vm_list) kvm_for_each_vcpu(i, vcpu, kvm) if (vcpu->cpu == smp_processor_id()) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); return kvm_x86_ops->hardware_enable(garbage); } void kvm_arch_hardware_disable(void *garbage) { kvm_x86_ops->hardware_disable(garbage); drop_user_return_notifiers(garbage); } int kvm_arch_hardware_setup(void) { return kvm_x86_ops->hardware_setup(); } void kvm_arch_hardware_unsetup(void) { kvm_x86_ops->hardware_unsetup(); } void kvm_arch_check_processor_compat(void *rtn) { kvm_x86_ops->check_processor_compatibility(rtn); } int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct page *page; struct kvm *kvm; int r; BUG_ON(vcpu->kvm == NULL); kvm = vcpu->kvm; vcpu->arch.emulate_ctxt.ops = &emulate_ops; vcpu->arch.walk_mmu = &vcpu->arch.mmu; vcpu->arch.mmu.root_hpa = INVALID_PAGE; vcpu->arch.mmu.translate_gpa = translate_gpa; vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; else vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) { r = -ENOMEM; goto fail; } vcpu->arch.pio_data = page_address(page); if (!kvm->arch.virtual_tsc_khz) kvm_arch_set_tsc_khz(kvm, max_tsc_khz); r = kvm_mmu_create(vcpu); if (r < 0) goto fail_free_pio_data; if (irqchip_in_kernel(kvm)) { r = kvm_create_lapic(vcpu); if (r < 0) goto fail_mmu_destroy; } vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, GFP_KERNEL); if (!vcpu->arch.mce_banks) { r = -ENOMEM; goto fail_free_lapic; } vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) goto fail_free_mce_banks; kvm_async_pf_hash_reset(vcpu); return 0; fail_free_mce_banks: kfree(vcpu->arch.mce_banks); fail_free_lapic: kvm_free_lapic(vcpu); fail_mmu_destroy: kvm_mmu_destroy(vcpu); fail_free_pio_data: free_page((unsigned long)vcpu->arch.pio_data); fail: return r; } void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { int idx; kfree(vcpu->arch.mce_banks); kvm_free_lapic(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_mmu_destroy(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, idx); free_page((unsigned long)vcpu->arch.pio_data); } int kvm_arch_init_vm(struct kvm *kvm) { INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); spin_lock_init(&kvm->arch.tsc_write_lock); return 0; } static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) { vcpu_load(vcpu); kvm_mmu_unload(vcpu); vcpu_put(vcpu); } static void kvm_free_vcpus(struct kvm *kvm) { unsigned int i; struct kvm_vcpu *vcpu; /* * Unpin any mmu pages first. */ kvm_for_each_vcpu(i, vcpu, kvm) { kvm_clear_async_pf_completion_queue(vcpu); kvm_unload_vcpu_mmu(vcpu); } kvm_for_each_vcpu(i, vcpu, kvm) kvm_arch_vcpu_free(vcpu); mutex_lock(&kvm->lock); for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) kvm->vcpus[i] = NULL; atomic_set(&kvm->online_vcpus, 0); mutex_unlock(&kvm->lock); } void kvm_arch_sync_events(struct kvm *kvm) { kvm_free_all_assigned_devices(kvm); kvm_free_pit(kvm); } void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_iommu_unmap_guest(kvm); kfree(kvm->arch.vpic); kfree(kvm->arch.vioapic); kvm_free_vcpus(kvm); if (kvm->arch.apic_access_page) put_page(kvm->arch.apic_access_page); if (kvm->arch.ept_identity_pagetable) put_page(kvm->arch.ept_identity_pagetable); } int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem, int user_alloc) { int npages = memslot->npages; int map_flags = MAP_PRIVATE | MAP_ANONYMOUS; /* Prevent internal slot pages from being moved by fork()/COW. */ if (memslot->id >= KVM_MEMORY_SLOTS) map_flags = MAP_SHARED | MAP_ANONYMOUS; /*To keep backward compatibility with older userspace, *x86 needs to hanlde !user_alloc case. */ if (!user_alloc) { if (npages && !old.rmap) { unsigned long userspace_addr; down_write(&current->mm->mmap_sem); userspace_addr = do_mmap(NULL, 0, npages * PAGE_SIZE, PROT_READ | PROT_WRITE, map_flags, 0); up_write(&current->mm->mmap_sem); if (IS_ERR((void *)userspace_addr)) return PTR_ERR((void *)userspace_addr); memslot->userspace_addr = userspace_addr; } } return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, int user_alloc) { int npages = mem->memory_size >> PAGE_SHIFT; if (!user_alloc && !old.user_alloc && old.rmap && !npages) { int ret; down_write(&current->mm->mmap_sem); ret = do_munmap(current->mm, old.userspace_addr, old.npages * PAGE_SIZE); up_write(&current->mm->mmap_sem); if (ret < 0) printk(KERN_WARNING "kvm_vm_ioctl_set_memory_region: " "failed to munmap memory\n"); } spin_lock(&kvm->mmu_lock); if (!kvm->arch.n_requested_mmu_pages) { unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); } kvm_mmu_slot_remove_write_access(kvm, mem->slot); spin_unlock(&kvm->mmu_lock); } void kvm_arch_flush_shadow(struct kvm *kvm) { kvm_mmu_zap_all(kvm); kvm_reload_remote_mmus(kvm); } int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) || !list_empty_careful(&vcpu->async_pf.done) || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED || vcpu->arch.nmi_pending || (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu)); } void kvm_vcpu_kick(struct kvm_vcpu *vcpu) { int me; int cpu = vcpu->cpu; if (waitqueue_active(&vcpu->wq)) { wake_up_interruptible(&vcpu->wq); ++vcpu->stat.halt_wakeup; } me = get_cpu(); if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) if (atomic_xchg(&vcpu->guest_mode, 0)) smp_send_reschedule(cpu); put_cpu(); } int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) { return kvm_x86_ops->interrupt_allowed(vcpu); } bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) { unsigned long current_rip = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); return current_rip == linear_rip; } EXPORT_SYMBOL_GPL(kvm_is_linear_rip); unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags; rflags = kvm_x86_ops->get_rflags(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) rflags &= ~X86_EFLAGS_TF; return rflags; } EXPORT_SYMBOL_GPL(kvm_get_rflags); void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) rflags |= X86_EFLAGS_TF; kvm_x86_ops->set_rflags(vcpu, rflags); kvm_make_request(KVM_REQ_EVENT, vcpu); } EXPORT_SYMBOL_GPL(kvm_set_rflags); void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { int r; if (!vcpu->arch.mmu.direct_map || !work->arch.direct_map || is_error_page(work->page)) return; r = kvm_mmu_reload(vcpu); if (unlikely(r)) return; vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); } static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) { return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); } static inline u32 kvm_async_pf_next_probe(u32 key) { return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1); } static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { u32 key = kvm_async_pf_hash_fn(gfn); while (vcpu->arch.apf.gfns[key] != ~0) key = kvm_async_pf_next_probe(key); vcpu->arch.apf.gfns[key] = gfn; } static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) { int i; u32 key = kvm_async_pf_hash_fn(gfn); for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) && (vcpu->arch.apf.gfns[key] != gfn && vcpu->arch.apf.gfns[key] != ~0); i++) key = kvm_async_pf_next_probe(key); return key; } bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; } static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { u32 i, j, k; i = j = kvm_async_pf_gfn_slot(vcpu, gfn); while (true) { vcpu->arch.apf.gfns[i] = ~0; do { j = kvm_async_pf_next_probe(j); if (vcpu->arch.apf.gfns[j] == ~0) return; k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); /* * k lies cyclically in ]i,j] * | i.k.j | * |....j i.k.| or |.k..j i...| */ } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; i = j; } } static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) { return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, sizeof(val)); } void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; trace_kvm_async_pf_not_present(work->arch.token, work->gva); kvm_add_async_pf_gfn(vcpu, work->arch.gfn); if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || (vcpu->arch.apf.send_user_only && kvm_x86_ops->get_cpl(vcpu) == 0)) kvm_make_request(KVM_REQ_APF_HALT, vcpu); else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; fault.error_code = 0; fault.nested_page_fault = false; fault.address = work->arch.token; kvm_inject_page_fault(vcpu, &fault); } } void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; trace_kvm_async_pf_ready(work->arch.token, work->gva); if (is_error_page(work->page)) work->arch.token = ~0; /* broadcast wakeup */ else kvm_del_async_pf_gfn(vcpu, work->arch.gfn); if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; fault.error_code = 0; fault.nested_page_fault = false; fault.address = work->arch.token; kvm_inject_page_fault(vcpu, &fault); } vcpu->arch.apf.halted = false; } bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) return true; else return !kvm_event_needs_reinjection(vcpu) && kvm_x86_ops->interrupt_allowed(vcpu); } EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
./CrossVul/dataset_final_sorted/CWE-362/c/bad_4726_0
crossvul-cpp_data_good_2116_2
/* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * * Transmit and frame generation functions. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/bitmap.h> #include <linux/rcupdate.h> #include <linux/export.h> #include <linux/time.h> #include <net/net_namespace.h> #include <net/ieee80211_radiotap.h> #include <net/cfg80211.h> #include <net/mac80211.h> #include <asm/unaligned.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "led.h" #include "mesh.h" #include "wep.h" #include "wpa.h" #include "wme.h" #include "rate.h" /* misc utils */ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, struct sk_buff *skb, int group_addr, int next_frag_len) { int rate, mrate, erp, dur, i, shift = 0; struct ieee80211_rate *txrate; struct ieee80211_local *local = tx->local; struct ieee80211_supported_band *sband; struct ieee80211_hdr *hdr; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_chanctx_conf *chanctx_conf; u32 rate_flags = 0; rcu_read_lock(); chanctx_conf = rcu_dereference(tx->sdata->vif.chanctx_conf); if (chanctx_conf) { shift = ieee80211_chandef_get_shift(&chanctx_conf->def); rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); } rcu_read_unlock(); /* assume HW handles this */ if (tx->rate.flags & IEEE80211_TX_RC_MCS) return 0; /* uh huh? */ if (WARN_ON_ONCE(tx->rate.idx < 0)) return 0; sband = local->hw.wiphy->bands[info->band]; txrate = &sband->bitrates[tx->rate.idx]; erp = txrate->flags & IEEE80211_RATE_ERP_G; /* * data and mgmt (except PS Poll): * - during CFP: 32768 * - during contention period: * if addr1 is group address: 0 * if more fragments = 0 and addr1 is individual address: time to * transmit one ACK plus SIFS * if more fragments = 1 and addr1 is individual address: time to * transmit next fragment plus 2 x ACK plus 3 x SIFS * * IEEE 802.11, 9.6: * - control response frame (CTS or ACK) shall be transmitted using the * same rate as the immediately previous frame in the frame exchange * sequence, if this rate belongs to the PHY mandatory rates, or else * at the highest possible rate belonging to the PHY rates in the * BSSBasicRateSet */ hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_ctl(hdr->frame_control)) { /* TODO: These control frames are not currently sent by * mac80211, but should they be implemented, this function * needs to be updated to support duration field calculation. * * RTS: time needed to transmit pending data/mgmt frame plus * one CTS frame plus one ACK frame plus 3 x SIFS * CTS: duration of immediately previous RTS minus time * required to transmit CTS and its SIFS * ACK: 0 if immediately previous directed data/mgmt had * more=0, with more=1 duration in ACK frame is duration * from previous frame minus time needed to transmit ACK * and its SIFS * PS Poll: BIT(15) | BIT(14) | aid */ return 0; } /* data/mgmt */ if (0 /* FIX: data/mgmt during CFP */) return cpu_to_le16(32768); if (group_addr) /* Group address as the destination - no ACK */ return 0; /* Individual destination address: * IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes) * CTS and ACK frames shall be transmitted using the highest rate in * basic rate set that is less than or equal to the rate of the * immediately previous frame and that is using the same modulation * (CCK or OFDM). If no basic rate set matches with these requirements, * the highest mandatory rate of the PHY that is less than or equal to * the rate of the previous frame is used. * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps */ rate = -1; /* use lowest available if everything fails */ mrate = sband->bitrates[0].bitrate; for (i = 0; i < sband->n_bitrates; i++) { struct ieee80211_rate *r = &sband->bitrates[i]; if (r->bitrate > txrate->bitrate) break; if ((rate_flags & r->flags) != rate_flags) continue; if (tx->sdata->vif.bss_conf.basic_rates & BIT(i)) rate = DIV_ROUND_UP(r->bitrate, 1 << shift); switch (sband->band) { case IEEE80211_BAND_2GHZ: { u32 flag; if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) flag = IEEE80211_RATE_MANDATORY_G; else flag = IEEE80211_RATE_MANDATORY_B; if (r->flags & flag) mrate = r->bitrate; break; } case IEEE80211_BAND_5GHZ: if (r->flags & IEEE80211_RATE_MANDATORY_A) mrate = r->bitrate; break; case IEEE80211_BAND_60GHZ: /* TODO, for now fall through */ case IEEE80211_NUM_BANDS: WARN_ON(1); break; } } if (rate == -1) { /* No matching basic rate found; use highest suitable mandatory * PHY rate */ rate = DIV_ROUND_UP(mrate, 1 << shift); } /* Don't calculate ACKs for QoS Frames with NoAck Policy set */ if (ieee80211_is_data_qos(hdr->frame_control) && *(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK) dur = 0; else /* Time needed to transmit ACK * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up * to closest integer */ dur = ieee80211_frame_duration(sband->band, 10, rate, erp, tx->sdata->vif.bss_conf.use_short_preamble, shift); if (next_frag_len) { /* Frame is fragmented: duration increases with time needed to * transmit next fragment plus ACK and 2 x SIFS. */ dur *= 2; /* ACK + SIFS */ /* next fragment */ dur += ieee80211_frame_duration(sband->band, next_frag_len, txrate->bitrate, erp, tx->sdata->vif.bss_conf.use_short_preamble, shift); } return cpu_to_le16(dur); } /* tx handlers */ static ieee80211_tx_result debug_noinline ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx) { struct ieee80211_local *local = tx->local; struct ieee80211_if_managed *ifmgd; /* driver doesn't support power save */ if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) return TX_CONTINUE; /* hardware does dynamic power save */ if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) return TX_CONTINUE; /* dynamic power save disabled */ if (local->hw.conf.dynamic_ps_timeout <= 0) return TX_CONTINUE; /* we are scanning, don't enable power save */ if (local->scanning) return TX_CONTINUE; if (!local->ps_sdata) return TX_CONTINUE; /* No point if we're going to suspend */ if (local->quiescing) return TX_CONTINUE; /* dynamic ps is supported only in managed mode */ if (tx->sdata->vif.type != NL80211_IFTYPE_STATION) return TX_CONTINUE; ifmgd = &tx->sdata->u.mgd; /* * Don't wakeup from power save if u-apsd is enabled, voip ac has * u-apsd enabled and the frame is in voip class. This effectively * means that even if all access categories have u-apsd enabled, in * practise u-apsd is only used with the voip ac. This is a * workaround for the case when received voip class packets do not * have correct qos tag for some reason, due the network or the * peer application. * * Note: ifmgd->uapsd_queues access is racy here. If the value is * changed via debugfs, user needs to reassociate manually to have * everything in sync. */ if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) && (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) && skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO) return TX_CONTINUE; if (local->hw.conf.flags & IEEE80211_CONF_PS) { ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_PS); ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; ieee80211_queue_work(&local->hw, &local->dynamic_ps_disable_work); } /* Don't restart the timer if we're not disassociated */ if (!ifmgd->associated) return TX_CONTINUE; mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); bool assoc = false; if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) return TX_CONTINUE; if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) && test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) && !ieee80211_is_probe_req(hdr->frame_control) && !ieee80211_is_nullfunc(hdr->frame_control)) /* * When software scanning only nullfunc frames (to notify * the sleep state to the AP) and probe requests (for the * active scan) are allowed, all other frames should not be * sent and we should not get here, but if we do * nonetheless, drop them to avoid sending them * off-channel. See the link below and * ieee80211_start_scan() for more. * * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089 */ return TX_DROP; if (tx->sdata->vif.type == NL80211_IFTYPE_WDS) return TX_CONTINUE; if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT) return TX_CONTINUE; if (tx->flags & IEEE80211_TX_PS_BUFFERED) return TX_CONTINUE; if (tx->sta) assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC); if (likely(tx->flags & IEEE80211_TX_UNICAST)) { if (unlikely(!assoc && ieee80211_is_data(hdr->frame_control))) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG sdata_info(tx->sdata, "dropped data frame to not associated station %pM\n", hdr->addr1); #endif I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); return TX_DROP; } } else if (unlikely(tx->sdata->vif.type == NL80211_IFTYPE_AP && ieee80211_is_data(hdr->frame_control) && !atomic_read(&tx->sdata->u.ap.num_mcast_sta))) { /* * No associated STAs - no need to send multicast * frames. */ return TX_DROP; } return TX_CONTINUE; } /* This function is called whenever the AP is about to exceed the maximum limit * of buffered frames for power saving STAs. This situation should not really * happen often during normal operation, so dropping the oldest buffered packet * from each queue should be OK to make some room for new frames. */ static void purge_old_ps_buffers(struct ieee80211_local *local) { int total = 0, purged = 0; struct sk_buff *skb; struct ieee80211_sub_if_data *sdata; struct sta_info *sta; list_for_each_entry_rcu(sdata, &local->interfaces, list) { struct ps_data *ps; if (sdata->vif.type == NL80211_IFTYPE_AP) ps = &sdata->u.ap.ps; else if (ieee80211_vif_is_mesh(&sdata->vif)) ps = &sdata->u.mesh.ps; else continue; skb = skb_dequeue(&ps->bc_buf); if (skb) { purged++; dev_kfree_skb(skb); } total += skb_queue_len(&ps->bc_buf); } /* * Drop one frame from each station from the lowest-priority * AC that has frames at all. */ list_for_each_entry_rcu(sta, &local->sta_list, list) { int ac; for (ac = IEEE80211_AC_BK; ac >= IEEE80211_AC_VO; ac--) { skb = skb_dequeue(&sta->ps_tx_buf[ac]); total += skb_queue_len(&sta->ps_tx_buf[ac]); if (skb) { purged++; ieee80211_free_txskb(&local->hw, skb); break; } } } local->total_ps_buffered = total; ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged); } static ieee80211_tx_result ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; struct ps_data *ps; /* * broadcast/multicast frame * * If any of the associated/peer stations is in power save mode, * the frame is buffered to be sent after DTIM beacon frame. * This is done either by the hardware or us. */ /* powersaving STAs currently only in AP/VLAN/mesh mode */ if (tx->sdata->vif.type == NL80211_IFTYPE_AP || tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { if (!tx->sdata->bss) return TX_CONTINUE; ps = &tx->sdata->bss->ps; } else if (ieee80211_vif_is_mesh(&tx->sdata->vif)) { ps = &tx->sdata->u.mesh.ps; } else { return TX_CONTINUE; } /* no buffering for ordered frames */ if (ieee80211_has_order(hdr->frame_control)) return TX_CONTINUE; if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) info->hw_queue = tx->sdata->vif.cab_queue; /* no stations in PS mode */ if (!atomic_read(&ps->num_sta_ps)) return TX_CONTINUE; info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; /* device releases frame after DTIM beacon */ if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)) return TX_CONTINUE; /* buffered in mac80211 */ if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) purge_old_ps_buffers(tx->local); if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) { ps_dbg(tx->sdata, "BC TX buffer full - dropping the oldest frame\n"); dev_kfree_skb(skb_dequeue(&ps->bc_buf)); } else tx->local->total_ps_buffered++; skb_queue_tail(&ps->bc_buf, tx->skb); return TX_QUEUED; } static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta, struct sk_buff *skb) { if (!ieee80211_is_mgmt(fc)) return 0; if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP)) return 0; if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) skb->data)) return 0; return 1; } static ieee80211_tx_result ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) { struct sta_info *sta = tx->sta; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_local *local = tx->local; if (unlikely(!sta)) return TX_CONTINUE; if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) || test_sta_flag(sta, WLAN_STA_PS_DRIVER)) && !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { int ac = skb_get_queue_mapping(tx->skb); ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", sta->sta.addr, sta->sta.aid, ac); if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) purge_old_ps_buffers(tx->local); /* sync with ieee80211_sta_ps_deliver_wakeup */ spin_lock(&sta->ps_lock); /* * STA woke up the meantime and all the frames on ps_tx_buf have * been queued to pending queue. No reordering can happen, go * ahead and Tx the packet. */ if (!test_sta_flag(sta, WLAN_STA_PS_STA) && !test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { spin_unlock(&sta->ps_lock); return TX_CONTINUE; } if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) { struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]); ps_dbg(tx->sdata, "STA %pM TX buffer for AC %d full - dropping oldest frame\n", sta->sta.addr, ac); ieee80211_free_txskb(&local->hw, old); } else tx->local->total_ps_buffered++; info->control.jiffies = jiffies; info->control.vif = &tx->sdata->vif; info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb); spin_unlock(&sta->ps_lock); if (!timer_pending(&local->sta_cleanup)) mod_timer(&local->sta_cleanup, round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL)); /* * We queued up some frames, so the TIM bit might * need to be set, recalculate it. */ sta_info_recalc_tim(sta); return TX_QUEUED; } else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) { ps_dbg(tx->sdata, "STA %pM in PS mode, but polling/in SP -> send frame\n", sta->sta.addr); } return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) return TX_CONTINUE; /* only deauth, disassoc and action are bufferable MMPDUs */ if (ieee80211_is_mgmt(hdr->frame_control) && !ieee80211_is_deauth(hdr->frame_control) && !ieee80211_is_disassoc(hdr->frame_control) && !ieee80211_is_action(hdr->frame_control)) { if (tx->flags & IEEE80211_TX_UNICAST) info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; return TX_CONTINUE; } if (tx->flags & IEEE80211_TX_UNICAST) return ieee80211_tx_h_unicast_ps_buf(tx); else return ieee80211_tx_h_multicast_ps_buf(tx); } static ieee80211_tx_result debug_noinline ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol)) { if (tx->sdata->control_port_no_encrypt) info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; } return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) { struct ieee80211_key *key; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) tx->key = NULL; else if (tx->sta && (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx]))) tx->key = key; else if (ieee80211_is_mgmt(hdr->frame_control) && is_multicast_ether_addr(hdr->addr1) && ieee80211_is_robust_mgmt_frame(hdr) && (key = rcu_dereference(tx->sdata->default_mgmt_key))) tx->key = key; else if (is_multicast_ether_addr(hdr->addr1) && (key = rcu_dereference(tx->sdata->default_multicast_key))) tx->key = key; else if (!is_multicast_ether_addr(hdr->addr1) && (key = rcu_dereference(tx->sdata->default_unicast_key))) tx->key = key; else if (info->flags & IEEE80211_TX_CTL_INJECTED) tx->key = NULL; else if (!tx->sdata->drop_unencrypted) tx->key = NULL; else if (tx->skb->protocol == tx->sdata->control_port_protocol) tx->key = NULL; else if (ieee80211_is_robust_mgmt_frame(hdr) && !(ieee80211_is_action(hdr->frame_control) && tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP))) tx->key = NULL; else if (ieee80211_is_mgmt(hdr->frame_control) && !ieee80211_is_robust_mgmt_frame(hdr)) tx->key = NULL; else { I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); return TX_DROP; } if (tx->key) { bool skip_hw = false; tx->key->tx_rx_count++; /* TODO: add threshold stuff again */ switch (tx->key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: if (!ieee80211_is_data_present(hdr->frame_control)) tx->key = NULL; break; case WLAN_CIPHER_SUITE_CCMP: if (!ieee80211_is_data_present(hdr->frame_control) && !ieee80211_use_mfp(hdr->frame_control, tx->sta, tx->skb)) tx->key = NULL; else skip_hw = (tx->key->conf.flags & IEEE80211_KEY_FLAG_SW_MGMT_TX) && ieee80211_is_mgmt(hdr->frame_control); break; case WLAN_CIPHER_SUITE_AES_CMAC: if (!ieee80211_is_mgmt(hdr->frame_control)) tx->key = NULL; break; } if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED && !ieee80211_is_deauth(hdr->frame_control))) return TX_DROP; if (!skip_hw && tx->key && tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) info->control.hw_key = &tx->key->conf; } return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (void *)tx->skb->data; struct ieee80211_supported_band *sband; u32 len; struct ieee80211_tx_rate_control txrc; struct ieee80211_sta_rates *ratetbl = NULL; bool assoc = false; memset(&txrc, 0, sizeof(txrc)); sband = tx->local->hw.wiphy->bands[info->band]; len = min_t(u32, tx->skb->len + FCS_LEN, tx->local->hw.wiphy->frag_threshold); /* set up the tx rate control struct we give the RC algo */ txrc.hw = &tx->local->hw; txrc.sband = sband; txrc.bss_conf = &tx->sdata->vif.bss_conf; txrc.skb = tx->skb; txrc.reported_rate.idx = -1; txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band]; if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1) txrc.max_rate_idx = -1; else txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; if (tx->sdata->rc_has_mcs_mask[info->band]) txrc.rate_idx_mcs_mask = tx->sdata->rc_rateidx_mcs_mask[info->band]; txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP || tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT || tx->sdata->vif.type == NL80211_IFTYPE_ADHOC); /* set up RTS protection if desired */ if (len > tx->local->hw.wiphy->rts_threshold) { txrc.rts = true; } info->control.use_rts = txrc.rts; info->control.use_cts_prot = tx->sdata->vif.bss_conf.use_cts_prot; /* * Use short preamble if the BSS can handle it, but not for * management frames unless we know the receiver can handle * that -- the management frame might be to a station that * just wants a probe response. */ if (tx->sdata->vif.bss_conf.use_short_preamble && (ieee80211_is_data(hdr->frame_control) || (tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE)))) txrc.short_preamble = true; info->control.short_preamble = txrc.short_preamble; if (tx->sta) assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC); /* * Lets not bother rate control if we're associated and cannot * talk to the sta. This should not happen. */ if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && assoc && !rate_usable_index_exists(sband, &tx->sta->sta), "%s: Dropped data frame as no usable bitrate found while " "scanning and associated. Target station: " "%pM on %d GHz band\n", tx->sdata->name, hdr->addr1, info->band ? 5 : 2)) return TX_DROP; /* * If we're associated with the sta at this point we know we can at * least send the frame at the lowest bit rate. */ rate_control_get_rate(tx->sdata, tx->sta, &txrc); if (tx->sta && !info->control.skip_table) ratetbl = rcu_dereference(tx->sta->sta.rates); if (unlikely(info->control.rates[0].idx < 0)) { if (ratetbl) { struct ieee80211_tx_rate rate = { .idx = ratetbl->rate[0].idx, .flags = ratetbl->rate[0].flags, .count = ratetbl->rate[0].count }; if (ratetbl->rate[0].idx < 0) return TX_DROP; tx->rate = rate; } else { return TX_DROP; } } else { tx->rate = info->control.rates[0]; } if (txrc.reported_rate.idx < 0) { txrc.reported_rate = tx->rate; if (tx->sta && ieee80211_is_data(hdr->frame_control)) tx->sta->last_tx_rate = txrc.reported_rate; } else if (tx->sta) tx->sta->last_tx_rate = txrc.reported_rate; if (ratetbl) return TX_CONTINUE; if (unlikely(!info->control.rates[0].count)) info->control.rates[0].count = 1; if (WARN_ON_ONCE((info->control.rates[0].count > 1) && (info->flags & IEEE80211_TX_CTL_NO_ACK))) info->control.rates[0].count = 1; return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; u16 *seq; u8 *qc; int tid; /* * Packet injection may want to control the sequence * number, if we have no matching interface then we * neither assign one ourselves nor ask the driver to. */ if (unlikely(info->control.vif->type == NL80211_IFTYPE_MONITOR)) return TX_CONTINUE; if (unlikely(ieee80211_is_ctl(hdr->frame_control))) return TX_CONTINUE; if (ieee80211_hdrlen(hdr->frame_control) < 24) return TX_CONTINUE; if (ieee80211_is_qos_nullfunc(hdr->frame_control)) return TX_CONTINUE; /* * Anything but QoS data that has a sequence number field * (is long enough) gets a sequence number from the global * counter. QoS data frames with a multicast destination * also use the global counter (802.11-2012 9.3.2.10). */ if (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) { /* driver should assign sequence number */ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; /* for pure STA mode without beacons, we can do it */ hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number); tx->sdata->sequence_number += 0x10; return TX_CONTINUE; } /* * This should be true for injected/management frames only, for * management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ * above since they are not QoS-data frames. */ if (!tx->sta) return TX_CONTINUE; /* include per-STA, per-TID sequence counter */ qc = ieee80211_get_qos_ctl(hdr); tid = *qc & IEEE80211_QOS_CTL_TID_MASK; seq = &tx->sta->tid_seq[tid]; hdr->seq_ctrl = cpu_to_le16(*seq); /* Increase the sequence number. */ *seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ; return TX_CONTINUE; } static int ieee80211_fragment(struct ieee80211_tx_data *tx, struct sk_buff *skb, int hdrlen, int frag_threshold) { struct ieee80211_local *local = tx->local; struct ieee80211_tx_info *info; struct sk_buff *tmp; int per_fragm = frag_threshold - hdrlen - FCS_LEN; int pos = hdrlen + per_fragm; int rem = skb->len - hdrlen - per_fragm; if (WARN_ON(rem < 0)) return -EINVAL; /* first fragment was already added to queue by caller */ while (rem) { int fraglen = per_fragm; if (fraglen > rem) fraglen = rem; rem -= fraglen; tmp = dev_alloc_skb(local->tx_headroom + frag_threshold + tx->sdata->encrypt_headroom + IEEE80211_ENCRYPT_TAILROOM); if (!tmp) return -ENOMEM; __skb_queue_tail(&tx->skbs, tmp); skb_reserve(tmp, local->tx_headroom + tx->sdata->encrypt_headroom); /* copy control information */ memcpy(tmp->cb, skb->cb, sizeof(tmp->cb)); info = IEEE80211_SKB_CB(tmp); info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_FIRST_FRAGMENT); if (rem) info->flags |= IEEE80211_TX_CTL_MORE_FRAMES; skb_copy_queue_mapping(tmp, skb); tmp->priority = skb->priority; tmp->dev = skb->dev; /* copy header and data */ memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen); memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen); pos += fraglen; } /* adjust first fragment's length */ skb_trim(skb, hdrlen + per_fragm); return 0; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) { struct sk_buff *skb = tx->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; int frag_threshold = tx->local->hw.wiphy->frag_threshold; int hdrlen; int fragnum; /* no matter what happens, tx->skb moves to tx->skbs */ __skb_queue_tail(&tx->skbs, skb); tx->skb = NULL; if (info->flags & IEEE80211_TX_CTL_DONTFRAG) return TX_CONTINUE; if (tx->local->ops->set_frag_threshold) return TX_CONTINUE; /* * Warn when submitting a fragmented A-MPDU frame and drop it. * This scenario is handled in ieee80211_tx_prepare but extra * caution taken here as fragmented ampdu may cause Tx stop. */ if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) return TX_DROP; hdrlen = ieee80211_hdrlen(hdr->frame_control); /* internal error, why isn't DONTFRAG set? */ if (WARN_ON(skb->len + FCS_LEN <= frag_threshold)) return TX_DROP; /* * Now fragment the frame. This will allocate all the fragments and * chain them (using skb as the first fragment) to skb->next. * During transmission, we will remove the successfully transmitted * fragments from this list. When the low-level driver rejects one * of the fragments then we will simply pretend to accept the skb * but store it away as pending. */ if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold)) return TX_DROP; /* update duration/seq/flags of fragments */ fragnum = 0; skb_queue_walk(&tx->skbs, skb) { const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); hdr = (void *)skb->data; info = IEEE80211_SKB_CB(skb); if (!skb_queue_is_last(&tx->skbs, skb)) { hdr->frame_control |= morefrags; /* * No multi-rate retries for fragmented frames, that * would completely throw off the NAV at other STAs. */ info->control.rates[1].idx = -1; info->control.rates[2].idx = -1; info->control.rates[3].idx = -1; BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 4); info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; } else { hdr->frame_control &= ~morefrags; } hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG); fragnum++; } return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) { struct sk_buff *skb; int ac = -1; if (!tx->sta) return TX_CONTINUE; skb_queue_walk(&tx->skbs, skb) { ac = skb_get_queue_mapping(skb); tx->sta->tx_fragments++; tx->sta->tx_bytes[ac] += skb->len; } if (ac >= 0) tx->sta->tx_packets[ac]++; return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) { if (!tx->key) return TX_CONTINUE; switch (tx->key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: return ieee80211_crypto_wep_encrypt(tx); case WLAN_CIPHER_SUITE_TKIP: return ieee80211_crypto_tkip_encrypt(tx); case WLAN_CIPHER_SUITE_CCMP: return ieee80211_crypto_ccmp_encrypt(tx); case WLAN_CIPHER_SUITE_AES_CMAC: return ieee80211_crypto_aes_cmac_encrypt(tx); default: return ieee80211_crypto_hw_encrypt(tx); } return TX_DROP; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) { struct sk_buff *skb; struct ieee80211_hdr *hdr; int next_len; bool group_addr; skb_queue_walk(&tx->skbs, skb) { hdr = (void *) skb->data; if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) break; /* must not overwrite AID */ if (!skb_queue_is_last(&tx->skbs, skb)) { struct sk_buff *next = skb_queue_next(&tx->skbs, skb); next_len = next->len; } else next_len = 0; group_addr = is_multicast_ether_addr(hdr->addr1); hdr->duration_id = ieee80211_duration(tx, skb, group_addr, next_len); } return TX_CONTINUE; } /* actual transmit path */ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, struct sk_buff *skb, struct ieee80211_tx_info *info, struct tid_ampdu_tx *tid_tx, int tid) { bool queued = false; bool reset_agg_timer = false; struct sk_buff *purge_skb = NULL; if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { info->flags |= IEEE80211_TX_CTL_AMPDU; reset_agg_timer = true; } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { /* * nothing -- this aggregation session is being started * but that might still fail with the driver */ } else { spin_lock(&tx->sta->lock); /* * Need to re-check now, because we may get here * * 1) in the window during which the setup is actually * already done, but not marked yet because not all * packets are spliced over to the driver pending * queue yet -- if this happened we acquire the lock * either before or after the splice happens, but * need to recheck which of these cases happened. * * 2) during session teardown, if the OPERATIONAL bit * was cleared due to the teardown but the pointer * hasn't been assigned NULL yet (or we loaded it * before it was assigned) -- in this case it may * now be NULL which means we should just let the * packet pass through because splicing the frames * back is already done. */ tid_tx = rcu_dereference_protected_tid_tx(tx->sta, tid); if (!tid_tx) { /* do nothing, let packet pass through */ } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { info->flags |= IEEE80211_TX_CTL_AMPDU; reset_agg_timer = true; } else { queued = true; info->control.vif = &tx->sdata->vif; info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; __skb_queue_tail(&tid_tx->pending, skb); if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) purge_skb = __skb_dequeue(&tid_tx->pending); } spin_unlock(&tx->sta->lock); if (purge_skb) ieee80211_free_txskb(&tx->local->hw, purge_skb); } /* reset session timer */ if (reset_agg_timer && tid_tx->timeout) tid_tx->last_tx = jiffies; return queued; } /* * initialises @tx */ static ieee80211_tx_result ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, struct ieee80211_tx_data *tx, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee80211_hdr *hdr; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int tid; u8 *qc; memset(tx, 0, sizeof(*tx)); tx->skb = skb; tx->local = local; tx->sdata = sdata; __skb_queue_head_init(&tx->skbs); /* * If this flag is set to true anywhere, and we get here, * we are doing the needed processing, so remove the flag * now. */ info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING; hdr = (struct ieee80211_hdr *) skb->data; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { tx->sta = rcu_dereference(sdata->u.vlan.sta); if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr) return TX_DROP; } else if (info->flags & (IEEE80211_TX_CTL_INJECTED | IEEE80211_TX_INTFL_NL80211_FRAME_TX) || tx->sdata->control_port_protocol == tx->skb->protocol) { tx->sta = sta_info_get_bss(sdata, hdr->addr1); } if (!tx->sta) tx->sta = sta_info_get(sdata, hdr->addr1); if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && !ieee80211_is_qos_nullfunc(hdr->frame_control) && (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) && !(local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) { struct tid_ampdu_tx *tid_tx; qc = ieee80211_get_qos_ctl(hdr); tid = *qc & IEEE80211_QOS_CTL_TID_MASK; tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]); if (tid_tx) { bool queued; queued = ieee80211_tx_prep_agg(tx, skb, info, tid_tx, tid); if (unlikely(queued)) return TX_QUEUED; } } if (is_multicast_ether_addr(hdr->addr1)) { tx->flags &= ~IEEE80211_TX_UNICAST; info->flags |= IEEE80211_TX_CTL_NO_ACK; } else tx->flags |= IEEE80211_TX_UNICAST; if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) { if (!(tx->flags & IEEE80211_TX_UNICAST) || skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold || info->flags & IEEE80211_TX_CTL_AMPDU) info->flags |= IEEE80211_TX_CTL_DONTFRAG; } if (!tx->sta) info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT; return TX_CONTINUE; } static bool ieee80211_tx_frags(struct ieee80211_local *local, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct sk_buff_head *skbs, bool txpending) { struct ieee80211_tx_control control; struct sk_buff *skb, *tmp; unsigned long flags; skb_queue_walk_safe(skbs, skb, tmp) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int q = info->hw_queue; #ifdef CONFIG_MAC80211_VERBOSE_DEBUG if (WARN_ON_ONCE(q >= local->hw.queues)) { __skb_unlink(skb, skbs); ieee80211_free_txskb(&local->hw, skb); continue; } #endif spin_lock_irqsave(&local->queue_stop_reason_lock, flags); if (local->queue_stop_reasons[q] || (!txpending && !skb_queue_empty(&local->pending[q]))) { if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) { if (local->queue_stop_reasons[q] & ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) { /* * Drop off-channel frames if queues * are stopped for any reason other * than off-channel operation. Never * queue them. */ spin_unlock_irqrestore( &local->queue_stop_reason_lock, flags); ieee80211_purge_tx_queue(&local->hw, skbs); return true; } } else { /* * Since queue is stopped, queue up frames for * later transmission from the tx-pending * tasklet when the queue is woken again. */ if (txpending) skb_queue_splice_init(skbs, &local->pending[q]); else skb_queue_splice_tail_init(skbs, &local->pending[q]); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); return false; } } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); info->control.vif = vif; control.sta = sta; __skb_unlink(skb, skbs); drv_tx(local, &control, skb); } return true; } /* * Returns false if the frame couldn't be transmitted but was queued instead. */ static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff_head *skbs, int led_len, struct sta_info *sta, bool txpending) { struct ieee80211_tx_info *info; struct ieee80211_sub_if_data *sdata; struct ieee80211_vif *vif; struct ieee80211_sta *pubsta; struct sk_buff *skb; bool result = true; __le16 fc; if (WARN_ON(skb_queue_empty(skbs))) return true; skb = skb_peek(skbs); fc = ((struct ieee80211_hdr *)skb->data)->frame_control; info = IEEE80211_SKB_CB(skb); sdata = vif_to_sdata(info->control.vif); if (sta && !sta->uploaded) sta = NULL; if (sta) pubsta = &sta->sta; else pubsta = NULL; switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: if (sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE) { vif = &sdata->vif; break; } sdata = rcu_dereference(local->monitor_sdata); if (sdata) { vif = &sdata->vif; info->hw_queue = vif->hw_queue[skb_get_queue_mapping(skb)]; } else if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) { dev_kfree_skb(skb); return true; } else vif = NULL; break; case NL80211_IFTYPE_AP_VLAN: sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); /* fall through */ default: vif = &sdata->vif; break; } result = ieee80211_tx_frags(local, vif, pubsta, skbs, txpending); ieee80211_tpt_led_trig_tx(local, fc, led_len); WARN_ON_ONCE(!skb_queue_empty(skbs)); return result; } /* * Invoke TX handlers, return 0 on success and non-zero if the * frame was dropped or queued. */ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); ieee80211_tx_result res = TX_DROP; #define CALL_TXH(txh) \ do { \ res = txh(tx); \ if (res != TX_CONTINUE) \ goto txh_done; \ } while (0) CALL_TXH(ieee80211_tx_h_dynamic_ps); CALL_TXH(ieee80211_tx_h_check_assoc); CALL_TXH(ieee80211_tx_h_ps_buf); CALL_TXH(ieee80211_tx_h_check_control_port_protocol); CALL_TXH(ieee80211_tx_h_select_key); if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) CALL_TXH(ieee80211_tx_h_rate_ctrl); if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) { __skb_queue_tail(&tx->skbs, tx->skb); tx->skb = NULL; goto txh_done; } CALL_TXH(ieee80211_tx_h_michael_mic_add); CALL_TXH(ieee80211_tx_h_sequence); CALL_TXH(ieee80211_tx_h_fragment); /* handlers after fragment must be aware of tx info fragmentation! */ CALL_TXH(ieee80211_tx_h_stats); CALL_TXH(ieee80211_tx_h_encrypt); if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) CALL_TXH(ieee80211_tx_h_calculate_duration); #undef CALL_TXH txh_done: if (unlikely(res == TX_DROP)) { I802_DEBUG_INC(tx->local->tx_handlers_drop); if (tx->skb) ieee80211_free_txskb(&tx->local->hw, tx->skb); else ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs); return -1; } else if (unlikely(res == TX_QUEUED)) { I802_DEBUG_INC(tx->local->tx_handlers_queued); return -1; } return 0; } bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct sk_buff *skb, int band, struct ieee80211_sta **sta) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_data tx; if (ieee80211_tx_prepare(sdata, &tx, skb) == TX_DROP) return false; info->band = band; info->control.vif = vif; info->hw_queue = vif->hw_queue[skb_get_queue_mapping(skb)]; if (invoke_tx_handlers(&tx)) return false; if (sta) { if (tx.sta) *sta = &tx.sta->sta; else *sta = NULL; } return true; } EXPORT_SYMBOL(ieee80211_tx_prepare_skb); /* * Returns false if the frame couldn't be transmitted but was queued instead. */ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, bool txpending, enum ieee80211_band band) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_data tx; ieee80211_tx_result res_prepare; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); bool result = true; int led_len; if (unlikely(skb->len < 10)) { dev_kfree_skb(skb); return true; } /* initialises tx */ led_len = skb->len; res_prepare = ieee80211_tx_prepare(sdata, &tx, skb); if (unlikely(res_prepare == TX_DROP)) { ieee80211_free_txskb(&local->hw, skb); return true; } else if (unlikely(res_prepare == TX_QUEUED)) { return true; } info->band = band; /* set up hw_queue value early */ if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) || !(local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; if (!invoke_tx_handlers(&tx)) result = __ieee80211_tx(local, &tx.skbs, led_len, tx.sta, txpending); return result; } /* device xmit handlers */ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int head_need, bool may_encrypt) { struct ieee80211_local *local = sdata->local; int tail_need = 0; if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) { tail_need = IEEE80211_ENCRYPT_TAILROOM; tail_need -= skb_tailroom(skb); tail_need = max_t(int, tail_need, 0); } if (skb_cloned(skb)) I802_DEBUG_INC(local->tx_expand_skb_head_cloned); else if (head_need || tail_need) I802_DEBUG_INC(local->tx_expand_skb_head); else return 0; if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) { wiphy_debug(local->hw.wiphy, "failed to reallocate TX buffer\n"); return -ENOMEM; } return 0; } void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, enum ieee80211_band band) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; int headroom; bool may_encrypt; may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT); headroom = local->tx_headroom; if (may_encrypt) headroom += sdata->encrypt_headroom; headroom -= skb_headroom(skb); headroom = max_t(int, 0, headroom); if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) { ieee80211_free_txskb(&local->hw, skb); return; } hdr = (struct ieee80211_hdr *) skb->data; info->control.vif = &sdata->vif; if (ieee80211_vif_is_mesh(&sdata->vif)) { if (ieee80211_is_data(hdr->frame_control) && is_unicast_ether_addr(hdr->addr1)) { if (mesh_nexthop_resolve(sdata, skb)) return; /* skb queued: don't free */ } else { ieee80211_mps_set_frame_flags(sdata, NULL, hdr); } } ieee80211_set_qos_hdr(sdata, skb); ieee80211_tx(sdata, skb, false, band); } static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb) { struct ieee80211_radiotap_iterator iterator; struct ieee80211_radiotap_header *rthdr = (struct ieee80211_radiotap_header *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len, NULL); u16 txflags; info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | IEEE80211_TX_CTL_DONTFRAG; /* * for every radiotap entry that is present * (ieee80211_radiotap_iterator_next returns -ENOENT when no more * entries present, or -EINVAL on error) */ while (!ret) { ret = ieee80211_radiotap_iterator_next(&iterator); if (ret) continue; /* see if this argument is something we can use */ switch (iterator.this_arg_index) { /* * You must take care when dereferencing iterator.this_arg * for multibyte types... the pointer is not aligned. Use * get_unaligned((type *)iterator.this_arg) to dereference * iterator.this_arg for type "type" safely on all arches. */ case IEEE80211_RADIOTAP_FLAGS: if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) { /* * this indicates that the skb we have been * handed has the 32-bit FCS CRC at the end... * we should react to that by snipping it off * because it will be recomputed and added * on transmission */ if (skb->len < (iterator._max_length + FCS_LEN)) return false; skb_trim(skb, skb->len - FCS_LEN); } if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT; if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) info->flags &= ~IEEE80211_TX_CTL_DONTFRAG; break; case IEEE80211_RADIOTAP_TX_FLAGS: txflags = get_unaligned_le16(iterator.this_arg); if (txflags & IEEE80211_RADIOTAP_F_TX_NOACK) info->flags |= IEEE80211_TX_CTL_NO_ACK; break; /* * Please update the file * Documentation/networking/mac80211-injection.txt * when parsing new fields here. */ default: break; } } if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */ return false; /* * remove the radiotap header * iterator->_max_length was sanity-checked against * skb->len by iterator init */ skb_pull(skb, iterator._max_length); return true; } netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *chan; struct ieee80211_radiotap_header *prthdr = (struct ieee80211_radiotap_header *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr; struct ieee80211_sub_if_data *tmp_sdata, *sdata; u16 len_rthdr; int hdrlen; /* check for not even having the fixed radiotap header part */ if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) goto fail; /* too short to be possibly valid */ /* is it a header version we can trust to find length from? */ if (unlikely(prthdr->it_version)) goto fail; /* only version 0 is supported */ /* then there must be a radiotap header with a length we can use */ len_rthdr = ieee80211_get_radiotap_len(skb->data); /* does the skb contain enough to deliver on the alleged length? */ if (unlikely(skb->len < len_rthdr)) goto fail; /* skb too short for claimed rt header extent */ /* * fix up the pointers accounting for the radiotap * header still being in there. We are being given * a precooked IEEE80211 header so no need for * normal processing */ skb_set_mac_header(skb, len_rthdr); /* * these are just fixed to the end of the rt area since we * don't have any better information and at this point, nobody cares */ skb_set_network_header(skb, len_rthdr); skb_set_transport_header(skb, len_rthdr); if (skb->len < len_rthdr + 2) goto fail; hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < len_rthdr + hdrlen) goto fail; /* * Initialize skb->protocol if the injected frame is a data frame * carrying a rfc1042 header */ if (ieee80211_is_data(hdr->frame_control) && skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) { u8 *payload = (u8 *)hdr + hdrlen; if (ether_addr_equal(payload, rfc1042_header)) skb->protocol = cpu_to_be16((payload[6] << 8) | payload[7]); } memset(info, 0, sizeof(*info)); info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_CTL_INJECTED; /* process and remove the injection radiotap header */ if (!ieee80211_parse_tx_radiotap(skb)) goto fail; rcu_read_lock(); /* * We process outgoing injected frames that have a local address * we handle as though they are non-injected frames. * This code here isn't entirely correct, the local MAC address * isn't always enough to find the interface to use; for proper * VLAN/WDS support we will need a different mechanism (which * likely isn't going to be monitor interfaces). */ sdata = IEEE80211_DEV_TO_SUB_IF(dev); list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(tmp_sdata)) continue; if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR || tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN || tmp_sdata->vif.type == NL80211_IFTYPE_WDS) continue; if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) { sdata = tmp_sdata; break; } } chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) { tmp_sdata = rcu_dereference(local->monitor_sdata); if (tmp_sdata) chanctx_conf = rcu_dereference(tmp_sdata->vif.chanctx_conf); } if (chanctx_conf) chan = chanctx_conf->def.chan; else if (!local->use_chanctx) chan = local->_oper_chandef.chan; else goto fail_rcu; /* * Frame injection is not allowed if beaconing is not allowed * or if we need radar detection. Beaconing is usually not allowed when * the mode or operation (Adhoc, AP, Mesh) does not support DFS. * Passive scan is also used in world regulatory domains where * your country is not known and as such it should be treated as * NO TX unless the channel is explicitly allowed in which case * your current regulatory domain would not have the passive scan * flag. * * Since AP mode uses monitor interfaces to inject/TX management * frames we can make AP mode the exception to this rule once it * supports radar detection as its implementation can deal with * radar detection by itself. We can do that later by adding a * monitor flag interfaces used for AP support. */ if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR))) goto fail_rcu; ieee80211_xmit(sdata, skb, chan->band); rcu_read_unlock(); return NETDEV_TX_OK; fail_rcu: rcu_read_unlock(); fail: dev_kfree_skb(skb); return NETDEV_TX_OK; /* meaning, we dealt with the skb */ } /* * Measure Tx frame arrival time for Tx latency statistics calculation * A single Tx frame latency should be measured from when it is entering the * Kernel until we receive Tx complete confirmation indication and the skb is * freed. */ static void ieee80211_tx_latency_start_msrmnt(struct ieee80211_local *local, struct sk_buff *skb) { struct timespec skb_arv; struct ieee80211_tx_latency_bin_ranges *tx_latency; tx_latency = rcu_dereference(local->tx_latency); if (!tx_latency) return; ktime_get_ts(&skb_arv); skb->tstamp = ktime_set(skb_arv.tv_sec, skb_arv.tv_nsec); } /** * ieee80211_subif_start_xmit - netif start_xmit function for Ethernet-type * subinterfaces (wlan#, WDS, and VLAN interfaces) * @skb: packet to be sent * @dev: incoming interface * * Returns: 0 on success (and frees skb in this case) or 1 on failure (skb will * not be freed, and caller is responsible for either retrying later or freeing * skb). * * This function takes in an Ethernet header and encapsulates it with suitable * IEEE 802.11 header based on which interface the packet is coming in. The * encapsulated packet will then be passed to master interface, wlan#.11, for * transmission (through low-level driver). */ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info; int head_need; u16 ethertype, hdrlen, meshhdrlen = 0; __le16 fc; struct ieee80211_hdr hdr; struct ieee80211s_hdr mesh_hdr __maybe_unused; struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL; const u8 *encaps_data; int encaps_len, skip_header_bytes; int nh_pos, h_pos; struct sta_info *sta = NULL; bool wme_sta = false, authorized = false, tdls_auth = false; bool tdls_direct = false; bool multicast; u32 info_flags = 0; u16 info_id = 0; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_sub_if_data *ap_sdata; enum ieee80211_band band; if (unlikely(skb->len < ETH_HLEN)) goto fail; /* convert Ethernet header to proper 802.11 header (based on * operation mode) */ ethertype = (skb->data[12] << 8) | skb->data[13]; fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); rcu_read_lock(); /* Measure frame arrival for Tx latency statistics calculation */ ieee80211_tx_latency_start_msrmnt(local, skb); switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: sta = rcu_dereference(sdata->u.vlan.sta); if (sta) { fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN); memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); memcpy(hdr.addr3, skb->data, ETH_ALEN); memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); hdrlen = 30; authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); wme_sta = test_sta_flag(sta, WLAN_STA_WME); } ap_sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); chanctx_conf = rcu_dereference(ap_sdata->vif.chanctx_conf); if (!chanctx_conf) goto fail_rcu; band = chanctx_conf->def.chan->band; if (sta) break; /* fall through */ case NL80211_IFTYPE_AP: if (sdata->vif.type == NL80211_IFTYPE_AP) chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) goto fail_rcu; fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); /* DA BSSID SA */ memcpy(hdr.addr1, skb->data, ETH_ALEN); memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); hdrlen = 24; band = chanctx_conf->def.chan->band; break; case NL80211_IFTYPE_WDS: fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); memcpy(hdr.addr3, skb->data, ETH_ALEN); memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); hdrlen = 30; /* * This is the exception! WDS style interfaces are prohibited * when channel contexts are in used so this must be valid */ band = local->hw.conf.chandef.chan->band; break; #ifdef CONFIG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: if (!is_multicast_ether_addr(skb->data)) { struct sta_info *next_hop; bool mpp_lookup = true; mpath = mesh_path_lookup(sdata, skb->data); if (mpath) { mpp_lookup = false; next_hop = rcu_dereference(mpath->next_hop); if (!next_hop || !(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING))) mpp_lookup = true; } if (mpp_lookup) mppath = mpp_path_lookup(sdata, skb->data); if (mppath && mpath) mesh_path_del(mpath->sdata, mpath->dst); } /* * Use address extension if it is a packet from * another interface or if we know the destination * is being proxied by a portal (i.e. portal address * differs from proxied address) */ if (ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN) && !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) { hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, skb->data, skb->data + ETH_ALEN); meshhdrlen = ieee80211_new_mesh_header(sdata, &mesh_hdr, NULL, NULL); } else { /* DS -> MBSS (802.11-2012 13.11.3.3). * For unicast with unknown forwarding information, * destination might be in the MBSS or if that fails * forwarded to another mesh gate. In either case * resolution will be handled in ieee80211_xmit(), so * leave the original DA. This also works for mcast */ const u8 *mesh_da = skb->data; if (mppath) mesh_da = mppath->mpp; else if (mpath) mesh_da = mpath->dst; hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, mesh_da, sdata->vif.addr); if (is_multicast_ether_addr(mesh_da)) /* DA TA mSA AE:SA */ meshhdrlen = ieee80211_new_mesh_header( sdata, &mesh_hdr, skb->data + ETH_ALEN, NULL); else /* RA TA mDA mSA AE:DA SA */ meshhdrlen = ieee80211_new_mesh_header( sdata, &mesh_hdr, skb->data, skb->data + ETH_ALEN); } chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) goto fail_rcu; band = chanctx_conf->def.chan->band; break; #endif case NL80211_IFTYPE_STATION: if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) { bool tdls_peer = false; sta = sta_info_get(sdata, skb->data); if (sta) { authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); wme_sta = test_sta_flag(sta, WLAN_STA_WME); tdls_peer = test_sta_flag(sta, WLAN_STA_TDLS_PEER); tdls_auth = test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH); } /* * If the TDLS link is enabled, send everything * directly. Otherwise, allow TDLS setup frames * to be transmitted indirectly. */ tdls_direct = tdls_peer && (tdls_auth || !(ethertype == ETH_P_TDLS && skb->len > 14 && skb->data[14] == WLAN_TDLS_SNAP_RFTYPE)); } if (tdls_direct) { /* link during setup - throw out frames to peer */ if (!tdls_auth) goto fail_rcu; /* DA SA BSSID */ memcpy(hdr.addr1, skb->data, ETH_ALEN); memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); memcpy(hdr.addr3, sdata->u.mgd.bssid, ETH_ALEN); hdrlen = 24; } else if (sdata->u.mgd.use_4addr && cpu_to_be16(ethertype) != sdata->control_port_protocol) { fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN); memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); memcpy(hdr.addr3, skb->data, ETH_ALEN); memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); hdrlen = 30; } else { fc |= cpu_to_le16(IEEE80211_FCTL_TODS); /* BSSID SA DA */ memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN); memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); memcpy(hdr.addr3, skb->data, ETH_ALEN); hdrlen = 24; } chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) goto fail_rcu; band = chanctx_conf->def.chan->band; break; case NL80211_IFTYPE_ADHOC: /* DA SA BSSID */ memcpy(hdr.addr1, skb->data, ETH_ALEN); memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN); hdrlen = 24; chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) goto fail_rcu; band = chanctx_conf->def.chan->band; break; default: goto fail_rcu; } /* * There's no need to try to look up the destination * if it is a multicast address (which can only happen * in AP mode) */ multicast = is_multicast_ether_addr(hdr.addr1); if (!multicast) { sta = sta_info_get(sdata, hdr.addr1); if (sta) { authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); wme_sta = test_sta_flag(sta, WLAN_STA_WME); } } /* For mesh, the use of the QoS header is mandatory */ if (ieee80211_vif_is_mesh(&sdata->vif)) wme_sta = true; /* receiver and we are QoS enabled, use a QoS type frame */ if (wme_sta && local->hw.queues >= IEEE80211_NUM_ACS) { fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); hdrlen += 2; } /* * Drop unicast frames to unauthorised stations unless they are * EAPOL frames from the local station. */ if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) && !multicast && !authorized && (cpu_to_be16(ethertype) != sdata->control_port_protocol || !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG net_info_ratelimited("%s: dropped frame to %pM (unauthorized port)\n", dev->name, hdr.addr1); #endif I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); goto fail_rcu; } if (unlikely(!multicast && skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) { struct sk_buff *orig_skb = skb; skb = skb_clone(skb, GFP_ATOMIC); if (skb) { unsigned long flags; int id; spin_lock_irqsave(&local->ack_status_lock, flags); id = idr_alloc(&local->ack_status_frames, orig_skb, 1, 0x10000, GFP_ATOMIC); spin_unlock_irqrestore(&local->ack_status_lock, flags); if (id >= 0) { info_id = id; info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; } else if (skb_shared(skb)) { kfree_skb(orig_skb); } else { kfree_skb(skb); skb = orig_skb; } } else { /* couldn't clone -- lose tx status ... */ skb = orig_skb; } } /* * If the skb is shared we need to obtain our own copy. */ if (skb_shared(skb)) { struct sk_buff *tmp_skb = skb; /* can't happen -- skb is a clone if info_id != 0 */ WARN_ON(info_id); skb = skb_clone(skb, GFP_ATOMIC); kfree_skb(tmp_skb); if (!skb) goto fail_rcu; } hdr.frame_control = fc; hdr.duration_id = 0; hdr.seq_ctrl = 0; skip_header_bytes = ETH_HLEN; if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) { encaps_data = bridge_tunnel_header; encaps_len = sizeof(bridge_tunnel_header); skip_header_bytes -= 2; } else if (ethertype >= ETH_P_802_3_MIN) { encaps_data = rfc1042_header; encaps_len = sizeof(rfc1042_header); skip_header_bytes -= 2; } else { encaps_data = NULL; encaps_len = 0; } nh_pos = skb_network_header(skb) - skb->data; h_pos = skb_transport_header(skb) - skb->data; skb_pull(skb, skip_header_bytes); nh_pos -= skip_header_bytes; h_pos -= skip_header_bytes; head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb); /* * So we need to modify the skb header and hence need a copy of * that. The head_need variable above doesn't, so far, include * the needed header space that we don't need right away. If we * can, then we don't reallocate right now but only after the * frame arrives at the master device (if it does...) * * If we cannot, however, then we will reallocate to include all * the ever needed space. Also, if we need to reallocate it anyway, * make it big enough for everything we may ever need. */ if (head_need > 0 || skb_cloned(skb)) { head_need += sdata->encrypt_headroom; head_need += local->tx_headroom; head_need = max_t(int, 0, head_need); if (ieee80211_skb_resize(sdata, skb, head_need, true)) { ieee80211_free_txskb(&local->hw, skb); skb = NULL; goto fail_rcu; } } if (encaps_data) { memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); nh_pos += encaps_len; h_pos += encaps_len; } #ifdef CONFIG_MAC80211_MESH if (meshhdrlen > 0) { memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen); nh_pos += meshhdrlen; h_pos += meshhdrlen; } #endif if (ieee80211_is_data_qos(fc)) { __le16 *qos_control; qos_control = (__le16 *) skb_push(skb, 2); memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2); /* * Maybe we could actually set some fields here, for now just * initialise to zero to indicate no special operation. */ *qos_control = 0; } else memcpy(skb_push(skb, hdrlen), &hdr, hdrlen); nh_pos += hdrlen; h_pos += hdrlen; dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; /* Update skb pointers to various headers since this modified frame * is going to go through Linux networking code that may potentially * need things like pointer to IP header. */ skb_set_mac_header(skb, 0); skb_set_network_header(skb, nh_pos); skb_set_transport_header(skb, h_pos); info = IEEE80211_SKB_CB(skb); memset(info, 0, sizeof(*info)); dev->trans_start = jiffies; info->flags = info_flags; info->ack_frame_id = info_id; ieee80211_xmit(sdata, skb, band); rcu_read_unlock(); return NETDEV_TX_OK; fail_rcu: rcu_read_unlock(); fail: dev_kfree_skb(skb); return NETDEV_TX_OK; } /* * ieee80211_clear_tx_pending may not be called in a context where * it is possible that it packets could come in again. */ void ieee80211_clear_tx_pending(struct ieee80211_local *local) { struct sk_buff *skb; int i; for (i = 0; i < local->hw.queues; i++) { while ((skb = skb_dequeue(&local->pending[i])) != NULL) ieee80211_free_txskb(&local->hw, skb); } } /* * Returns false if the frame couldn't be transmitted but was queued instead, * which in this case means re-queued -- take as an indication to stop sending * more pending frames. */ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_sub_if_data *sdata; struct sta_info *sta; struct ieee80211_hdr *hdr; bool result; struct ieee80211_chanctx_conf *chanctx_conf; sdata = vif_to_sdata(info->control.vif); if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (unlikely(!chanctx_conf)) { dev_kfree_skb(skb); return true; } result = ieee80211_tx(sdata, skb, true, chanctx_conf->def.chan->band); } else { struct sk_buff_head skbs; __skb_queue_head_init(&skbs); __skb_queue_tail(&skbs, skb); hdr = (struct ieee80211_hdr *)skb->data; sta = sta_info_get(sdata, hdr->addr1); result = __ieee80211_tx(local, &skbs, skb->len, sta, true); } return result; } /* * Transmit all pending packets. Called from tasklet. */ void ieee80211_tx_pending(unsigned long data) { struct ieee80211_local *local = (struct ieee80211_local *)data; unsigned long flags; int i; bool txok; rcu_read_lock(); spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for (i = 0; i < local->hw.queues; i++) { /* * If queue is stopped by something other than due to pending * frames, or we have no pending frames, proceed to next queue. */ if (local->queue_stop_reasons[i] || skb_queue_empty(&local->pending[i])) continue; while (!skb_queue_empty(&local->pending[i])) { struct sk_buff *skb = __skb_dequeue(&local->pending[i]); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (WARN_ON(!info->control.vif)) { ieee80211_free_txskb(&local->hw, skb); continue; } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); txok = ieee80211_tx_pending_skb(local, skb); spin_lock_irqsave(&local->queue_stop_reason_lock, flags); if (!txok) break; } if (skb_queue_empty(&local->pending[i])) ieee80211_propagate_queue_wake(local, i); } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); rcu_read_unlock(); } /* functions for drivers to get certain frames */ static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, struct ps_data *ps, struct sk_buff *skb) { u8 *pos, *tim; int aid0 = 0; int i, have_bits = 0, n1, n2; /* Generate bitmap for TIM only if there are any STAs in power save * mode. */ if (atomic_read(&ps->num_sta_ps) > 0) /* in the hope that this is faster than * checking byte-for-byte */ have_bits = !bitmap_empty((unsigned long *)ps->tim, IEEE80211_MAX_AID+1); if (ps->dtim_count == 0) ps->dtim_count = sdata->vif.bss_conf.dtim_period - 1; else ps->dtim_count--; tim = pos = (u8 *) skb_put(skb, 6); *pos++ = WLAN_EID_TIM; *pos++ = 4; *pos++ = ps->dtim_count; *pos++ = sdata->vif.bss_conf.dtim_period; if (ps->dtim_count == 0 && !skb_queue_empty(&ps->bc_buf)) aid0 = 1; ps->dtim_bc_mc = aid0 == 1; if (have_bits) { /* Find largest even number N1 so that bits numbered 1 through * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits * (N2 + 1) x 8 through 2007 are 0. */ n1 = 0; for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) { if (ps->tim[i]) { n1 = i & 0xfe; break; } } n2 = n1; for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) { if (ps->tim[i]) { n2 = i; break; } } /* Bitmap control */ *pos++ = n1 | aid0; /* Part Virt Bitmap */ skb_put(skb, n2 - n1); memcpy(pos, ps->tim + n1, n2 - n1 + 1); tim[1] = n2 - n1 + 4; } else { *pos++ = aid0; /* Bitmap control */ *pos++ = 0; /* Part Virt Bitmap */ } } static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, struct ps_data *ps, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; /* * Not very nice, but we want to allow the driver to call * ieee80211_beacon_get() as a response to the set_tim() * callback. That, however, is already invoked under the * sta_lock to guarantee consistent and race-free update * of the tim bitmap in mac80211 and the driver. */ if (local->tim_in_locked_section) { __ieee80211_beacon_add_tim(sdata, ps, skb); } else { spin_lock_bh(&local->tim_lock); __ieee80211_beacon_add_tim(sdata, ps, skb); spin_unlock_bh(&local->tim_lock); } return 0; } void ieee80211_csa_finish(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); ieee80211_queue_work(&sdata->local->hw, &sdata->csa_finalize_work); } EXPORT_SYMBOL(ieee80211_csa_finish); static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata, struct beacon_data *beacon) { struct probe_resp *resp; int counter_offset_beacon = sdata->csa_counter_offset_beacon; int counter_offset_presp = sdata->csa_counter_offset_presp; u8 *beacon_data; size_t beacon_data_len; switch (sdata->vif.type) { case NL80211_IFTYPE_AP: beacon_data = beacon->tail; beacon_data_len = beacon->tail_len; break; case NL80211_IFTYPE_ADHOC: beacon_data = beacon->head; beacon_data_len = beacon->head_len; break; case NL80211_IFTYPE_MESH_POINT: beacon_data = beacon->head; beacon_data_len = beacon->head_len; break; default: return; } if (WARN_ON(counter_offset_beacon >= beacon_data_len)) return; /* warn if the driver did not check for/react to csa completeness */ if (WARN_ON(beacon_data[counter_offset_beacon] == 0)) return; beacon_data[counter_offset_beacon]--; if (sdata->vif.type == NL80211_IFTYPE_AP && counter_offset_presp) { rcu_read_lock(); resp = rcu_dereference(sdata->u.ap.probe_resp); /* if nl80211 accepted the offset, this should not happen. */ if (WARN_ON(!resp)) { rcu_read_unlock(); return; } resp->data[counter_offset_presp]--; rcu_read_unlock(); } } bool ieee80211_csa_is_complete(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct beacon_data *beacon = NULL; u8 *beacon_data; size_t beacon_data_len; int counter_beacon = sdata->csa_counter_offset_beacon; int ret = false; if (!ieee80211_sdata_running(sdata)) return false; rcu_read_lock(); if (vif->type == NL80211_IFTYPE_AP) { struct ieee80211_if_ap *ap = &sdata->u.ap; beacon = rcu_dereference(ap->beacon); if (WARN_ON(!beacon || !beacon->tail)) goto out; beacon_data = beacon->tail; beacon_data_len = beacon->tail_len; } else if (vif->type == NL80211_IFTYPE_ADHOC) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; beacon = rcu_dereference(ifibss->presp); if (!beacon) goto out; beacon_data = beacon->head; beacon_data_len = beacon->head_len; } else if (vif->type == NL80211_IFTYPE_MESH_POINT) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; beacon = rcu_dereference(ifmsh->beacon); if (!beacon) goto out; beacon_data = beacon->head; beacon_data_len = beacon->head_len; } else { WARN_ON(1); goto out; } if (WARN_ON(counter_beacon > beacon_data_len)) goto out; if (beacon_data[counter_beacon] == 0) ret = true; out: rcu_read_unlock(); return ret; } EXPORT_SYMBOL(ieee80211_csa_is_complete); struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 *tim_offset, u16 *tim_length) { struct ieee80211_local *local = hw_to_local(hw); struct sk_buff *skb = NULL; struct ieee80211_tx_info *info; struct ieee80211_sub_if_data *sdata = NULL; enum ieee80211_band band; struct ieee80211_tx_rate_control txrc; struct ieee80211_chanctx_conf *chanctx_conf; rcu_read_lock(); sdata = vif_to_sdata(vif); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!ieee80211_sdata_running(sdata) || !chanctx_conf) goto out; if (tim_offset) *tim_offset = 0; if (tim_length) *tim_length = 0; if (sdata->vif.type == NL80211_IFTYPE_AP) { struct ieee80211_if_ap *ap = &sdata->u.ap; struct beacon_data *beacon = rcu_dereference(ap->beacon); if (beacon) { if (sdata->vif.csa_active) ieee80211_update_csa(sdata, beacon); /* * headroom, head length, * tail length and maximum TIM length */ skb = dev_alloc_skb(local->tx_headroom + beacon->head_len + beacon->tail_len + 256 + local->hw.extra_beacon_tailroom); if (!skb) goto out; skb_reserve(skb, local->tx_headroom); memcpy(skb_put(skb, beacon->head_len), beacon->head, beacon->head_len); ieee80211_beacon_add_tim(sdata, &ap->ps, skb); if (tim_offset) *tim_offset = beacon->head_len; if (tim_length) *tim_length = skb->len - beacon->head_len; if (beacon->tail) memcpy(skb_put(skb, beacon->tail_len), beacon->tail, beacon->tail_len); } else goto out; } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_hdr *hdr; struct beacon_data *presp = rcu_dereference(ifibss->presp); if (!presp) goto out; if (sdata->vif.csa_active) ieee80211_update_csa(sdata, presp); skb = dev_alloc_skb(local->tx_headroom + presp->head_len + local->hw.extra_beacon_tailroom); if (!skb) goto out; skb_reserve(skb, local->tx_headroom); memcpy(skb_put(skb, presp->head_len), presp->head, presp->head_len); hdr = (struct ieee80211_hdr *) skb->data; hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); } else if (ieee80211_vif_is_mesh(&sdata->vif)) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct beacon_data *bcn = rcu_dereference(ifmsh->beacon); if (!bcn) goto out; if (sdata->vif.csa_active) ieee80211_update_csa(sdata, bcn); if (ifmsh->sync_ops) ifmsh->sync_ops->adjust_tbtt(sdata, bcn); skb = dev_alloc_skb(local->tx_headroom + bcn->head_len + 256 + /* TIM IE */ bcn->tail_len + local->hw.extra_beacon_tailroom); if (!skb) goto out; skb_reserve(skb, local->tx_headroom); memcpy(skb_put(skb, bcn->head_len), bcn->head, bcn->head_len); ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb); memcpy(skb_put(skb, bcn->tail_len), bcn->tail, bcn->tail_len); } else { WARN_ON(1); goto out; } band = chanctx_conf->def.chan->band; info = IEEE80211_SKB_CB(skb); info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; info->flags |= IEEE80211_TX_CTL_NO_ACK; info->band = band; memset(&txrc, 0, sizeof(txrc)); txrc.hw = hw; txrc.sband = local->hw.wiphy->bands[band]; txrc.bss_conf = &sdata->vif.bss_conf; txrc.skb = skb; txrc.reported_rate.idx = -1; txrc.rate_idx_mask = sdata->rc_rateidx_mask[band]; if (txrc.rate_idx_mask == (1 << txrc.sband->n_bitrates) - 1) txrc.max_rate_idx = -1; else txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; txrc.bss = true; rate_control_get_rate(sdata, NULL, &txrc); info->control.vif = vif; info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_ASSIGN_SEQ | IEEE80211_TX_CTL_FIRST_FRAGMENT; out: rcu_read_unlock(); return skb; } EXPORT_SYMBOL(ieee80211_beacon_get_tim); struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ieee80211_if_ap *ap = NULL; struct sk_buff *skb = NULL; struct probe_resp *presp = NULL; struct ieee80211_hdr *hdr; struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); if (sdata->vif.type != NL80211_IFTYPE_AP) return NULL; rcu_read_lock(); ap = &sdata->u.ap; presp = rcu_dereference(ap->probe_resp); if (!presp) goto out; skb = dev_alloc_skb(presp->len); if (!skb) goto out; memcpy(skb_put(skb, presp->len), presp->data, presp->len); hdr = (struct ieee80211_hdr *) skb->data; memset(hdr->addr1, 0, sizeof(hdr->addr1)); out: rcu_read_unlock(); return skb; } EXPORT_SYMBOL(ieee80211_proberesp_get); struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata; struct ieee80211_if_managed *ifmgd; struct ieee80211_pspoll *pspoll; struct ieee80211_local *local; struct sk_buff *skb; if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) return NULL; sdata = vif_to_sdata(vif); ifmgd = &sdata->u.mgd; local = sdata->local; skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll)); if (!skb) return NULL; skb_reserve(skb, local->hw.extra_tx_headroom); pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll)); memset(pspoll, 0, sizeof(*pspoll)); pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); pspoll->aid = cpu_to_le16(ifmgd->aid); /* aid in PS-Poll has its two MSBs each set to 1 */ pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14); memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN); memcpy(pspoll->ta, vif->addr, ETH_ALEN); return skb; } EXPORT_SYMBOL(ieee80211_pspoll_get); struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ieee80211_hdr_3addr *nullfunc; struct ieee80211_sub_if_data *sdata; struct ieee80211_if_managed *ifmgd; struct ieee80211_local *local; struct sk_buff *skb; if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) return NULL; sdata = vif_to_sdata(vif); ifmgd = &sdata->u.mgd; local = sdata->local; skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc)); if (!skb) return NULL; skb_reserve(skb, local->hw.extra_tx_headroom); nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*nullfunc)); memset(nullfunc, 0, sizeof(*nullfunc)); nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS); memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN); memcpy(nullfunc->addr2, vif->addr, ETH_ALEN); memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN); return skb; } EXPORT_SYMBOL(ieee80211_nullfunc_get); struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *ssid, size_t ssid_len, size_t tailroom) { struct ieee80211_sub_if_data *sdata; struct ieee80211_local *local; struct ieee80211_hdr_3addr *hdr; struct sk_buff *skb; size_t ie_ssid_len; u8 *pos; sdata = vif_to_sdata(vif); local = sdata->local; ie_ssid_len = 2 + ssid_len; skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) + ie_ssid_len + tailroom); if (!skb) return NULL; skb_reserve(skb, local->hw.extra_tx_headroom); hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr)); memset(hdr, 0, sizeof(*hdr)); hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ); eth_broadcast_addr(hdr->addr1); memcpy(hdr->addr2, vif->addr, ETH_ALEN); eth_broadcast_addr(hdr->addr3); pos = skb_put(skb, ie_ssid_len); *pos++ = WLAN_EID_SSID; *pos++ = ssid_len; if (ssid_len) memcpy(pos, ssid, ssid_len); pos += ssid_len; return skb; } EXPORT_SYMBOL(ieee80211_probereq_get); void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const void *frame, size_t frame_len, const struct ieee80211_tx_info *frame_txctl, struct ieee80211_rts *rts) { const struct ieee80211_hdr *hdr = frame; rts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); rts->duration = ieee80211_rts_duration(hw, vif, frame_len, frame_txctl); memcpy(rts->ra, hdr->addr1, sizeof(rts->ra)); memcpy(rts->ta, hdr->addr2, sizeof(rts->ta)); } EXPORT_SYMBOL(ieee80211_rts_get); void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const void *frame, size_t frame_len, const struct ieee80211_tx_info *frame_txctl, struct ieee80211_cts *cts) { const struct ieee80211_hdr *hdr = frame; cts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); cts->duration = ieee80211_ctstoself_duration(hw, vif, frame_len, frame_txctl); memcpy(cts->ra, hdr->addr1, sizeof(cts->ra)); } EXPORT_SYMBOL(ieee80211_ctstoself_get); struct sk_buff * ieee80211_get_buffered_bc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ieee80211_local *local = hw_to_local(hw); struct sk_buff *skb = NULL; struct ieee80211_tx_data tx; struct ieee80211_sub_if_data *sdata; struct ps_data *ps; struct ieee80211_tx_info *info; struct ieee80211_chanctx_conf *chanctx_conf; sdata = vif_to_sdata(vif); rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) goto out; if (sdata->vif.type == NL80211_IFTYPE_AP) { struct beacon_data *beacon = rcu_dereference(sdata->u.ap.beacon); if (!beacon || !beacon->head) goto out; ps = &sdata->u.ap.ps; } else if (ieee80211_vif_is_mesh(&sdata->vif)) { ps = &sdata->u.mesh.ps; } else { goto out; } if (ps->dtim_count != 0 || !ps->dtim_bc_mc) goto out; /* send buffered bc/mc only after DTIM beacon */ while (1) { skb = skb_dequeue(&ps->bc_buf); if (!skb) goto out; local->total_ps_buffered--; if (!skb_queue_empty(&ps->bc_buf) && skb->len >= 2) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; /* more buffered multicast/broadcast frames ==> set * MoreData flag in IEEE 802.11 header to inform PS * STAs */ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); } if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); if (!ieee80211_tx_prepare(sdata, &tx, skb)) break; dev_kfree_skb_any(skb); } info = IEEE80211_SKB_CB(skb); tx.flags |= IEEE80211_TX_PS_BUFFERED; info->band = chanctx_conf->def.chan->band; if (invoke_tx_handlers(&tx)) skb = NULL; out: rcu_read_unlock(); return skb; } EXPORT_SYMBOL(ieee80211_get_buffered_bc); void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, enum ieee80211_band band) { int ac = ieee802_1d_to_ac[tid & 7]; skb_set_mac_header(skb, 0); skb_set_network_header(skb, 0); skb_set_transport_header(skb, 0); skb_set_queue_mapping(skb, ac); skb->priority = tid; skb->dev = sdata->dev; /* * The other path calling ieee80211_xmit is from the tasklet, * and while we can handle concurrent transmissions locking * requirements are that we do not come into tx with bhs on. */ local_bh_disable(); ieee80211_xmit(sdata, skb, band); local_bh_enable(); }
./CrossVul/dataset_final_sorted/CWE-362/c/good_2116_2
crossvul-cpp_data_bad_1819_1
/* * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com * Written by Alex Tomas <alex@clusterfs.com> * * Architecture independence: * Copyright (c) 2005, Bull S.A. * Written by Pierre Peiffer <pierre.peiffer@bull.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public Licens * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- */ /* * Extents support for EXT4 * * TODO: * - ext4*_error() should be used in some situations * - analyze all BUG()/BUG_ON(), use -EIO where appropriate * - smart tree reduction */ #include <linux/fs.h> #include <linux/time.h> #include <linux/jbd2.h> #include <linux/highuid.h> #include <linux/pagemap.h> #include <linux/quotaops.h> #include <linux/string.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/fiemap.h> #include <linux/backing-dev.h> #include "ext4_jbd2.h" #include "ext4_extents.h" #include "xattr.h" #include <trace/events/ext4.h> /* * used by extent splitting. */ #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ due to ENOSPC */ #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */ #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */ #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ static __le32 ext4_extent_block_csum(struct inode *inode, struct ext4_extent_header *eh) { struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); __u32 csum; csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, EXT4_EXTENT_TAIL_OFFSET(eh)); return cpu_to_le32(csum); } static int ext4_extent_block_csum_verify(struct inode *inode, struct ext4_extent_header *eh) { struct ext4_extent_tail *et; if (!ext4_has_metadata_csum(inode->i_sb)) return 1; et = find_ext4_extent_tail(eh); if (et->et_checksum != ext4_extent_block_csum(inode, eh)) return 0; return 1; } static void ext4_extent_block_csum_set(struct inode *inode, struct ext4_extent_header *eh) { struct ext4_extent_tail *et; if (!ext4_has_metadata_csum(inode->i_sb)) return; et = find_ext4_extent_tail(eh); et->et_checksum = ext4_extent_block_csum(inode, eh); } static int ext4_split_extent(handle_t *handle, struct inode *inode, struct ext4_ext_path **ppath, struct ext4_map_blocks *map, int split_flag, int flags); static int ext4_split_extent_at(handle_t *handle, struct inode *inode, struct ext4_ext_path **ppath, ext4_lblk_t split, int split_flag, int flags); static int ext4_find_delayed_extent(struct inode *inode, struct extent_status *newes); static int ext4_ext_truncate_extend_restart(handle_t *handle, struct inode *inode, int needed) { int err; if (!ext4_handle_valid(handle)) return 0; if (handle->h_buffer_credits > needed) return 0; err = ext4_journal_extend(handle, needed); if (err <= 0) return err; err = ext4_truncate_restart_trans(handle, inode, needed); if (err == 0) err = -EAGAIN; return err; } /* * could return: * - EROFS * - ENOMEM */ static int ext4_ext_get_access(handle_t *handle, struct inode *inode, struct ext4_ext_path *path) { if (path->p_bh) { /* path points to block */ BUFFER_TRACE(path->p_bh, "get_write_access"); return ext4_journal_get_write_access(handle, path->p_bh); } /* path points to leaf/index in inode body */ /* we use in-core data, no need to protect them */ return 0; } /* * could return: * - EROFS * - ENOMEM * - EIO */ int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle, struct inode *inode, struct ext4_ext_path *path) { int err; WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); if (path->p_bh) { ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); /* path points to block */ err = __ext4_handle_dirty_metadata(where, line, handle, inode, path->p_bh); } else { /* path points to leaf/index in inode body */ err = ext4_mark_inode_dirty(handle, inode); } return err; } static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t block) { if (path) { int depth = path->p_depth; struct ext4_extent *ex; /* * Try to predict block placement assuming that we are * filling in a file which will eventually be * non-sparse --- i.e., in the case of libbfd writing * an ELF object sections out-of-order but in a way * the eventually results in a contiguous object or * executable file, or some database extending a table * space file. However, this is actually somewhat * non-ideal if we are writing a sparse file such as * qemu or KVM writing a raw image file that is going * to stay fairly sparse, since it will end up * fragmenting the file system's free space. Maybe we * should have some hueristics or some way to allow * userspace to pass a hint to file system, * especially if the latter case turns out to be * common. */ ex = path[depth].p_ext; if (ex) { ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); if (block > ext_block) return ext_pblk + (block - ext_block); else return ext_pblk - (ext_block - block); } /* it looks like index is empty; * try to find starting block from index itself */ if (path[depth].p_bh) return path[depth].p_bh->b_blocknr; } /* OK. use inode's group */ return ext4_inode_to_goal_block(inode); } /* * Allocation for a meta data block */ static ext4_fsblk_t ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, struct ext4_extent *ex, int *err, unsigned int flags) { ext4_fsblk_t goal, newblock; goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); newblock = ext4_new_meta_blocks(handle, inode, goal, flags, NULL, err); return newblock; } static inline int ext4_ext_space_block(struct inode *inode, int check) { int size; size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) / sizeof(struct ext4_extent); #ifdef AGGRESSIVE_TEST if (!check && size > 6) size = 6; #endif return size; } static inline int ext4_ext_space_block_idx(struct inode *inode, int check) { int size; size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) / sizeof(struct ext4_extent_idx); #ifdef AGGRESSIVE_TEST if (!check && size > 5) size = 5; #endif return size; } static inline int ext4_ext_space_root(struct inode *inode, int check) { int size; size = sizeof(EXT4_I(inode)->i_data); size -= sizeof(struct ext4_extent_header); size /= sizeof(struct ext4_extent); #ifdef AGGRESSIVE_TEST if (!check && size > 3) size = 3; #endif return size; } static inline int ext4_ext_space_root_idx(struct inode *inode, int check) { int size; size = sizeof(EXT4_I(inode)->i_data); size -= sizeof(struct ext4_extent_header); size /= sizeof(struct ext4_extent_idx); #ifdef AGGRESSIVE_TEST if (!check && size > 4) size = 4; #endif return size; } static inline int ext4_force_split_extent_at(handle_t *handle, struct inode *inode, struct ext4_ext_path **ppath, ext4_lblk_t lblk, int nofail) { struct ext4_ext_path *path = *ppath; int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext); return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ? EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0, EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO | (nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0)); } /* * Calculate the number of metadata blocks needed * to allocate @blocks * Worse case is one block per extent */ int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) { struct ext4_inode_info *ei = EXT4_I(inode); int idxs; idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) / sizeof(struct ext4_extent_idx)); /* * If the new delayed allocation block is contiguous with the * previous da block, it can share index blocks with the * previous block, so we only need to allocate a new index * block every idxs leaf blocks. At ldxs**2 blocks, we need * an additional index block, and at ldxs**3 blocks, yet * another index blocks. */ if (ei->i_da_metadata_calc_len && ei->i_da_metadata_calc_last_lblock+1 == lblock) { int num = 0; if ((ei->i_da_metadata_calc_len % idxs) == 0) num++; if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0) num++; if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) { num++; ei->i_da_metadata_calc_len = 0; } else ei->i_da_metadata_calc_len++; ei->i_da_metadata_calc_last_lblock++; return num; } /* * In the worst case we need a new set of index blocks at * every level of the inode's extent tree. */ ei->i_da_metadata_calc_len = 1; ei->i_da_metadata_calc_last_lblock = lblock; return ext_depth(inode) + 1; } static int ext4_ext_max_entries(struct inode *inode, int depth) { int max; if (depth == ext_depth(inode)) { if (depth == 0) max = ext4_ext_space_root(inode, 1); else max = ext4_ext_space_root_idx(inode, 1); } else { if (depth == 0) max = ext4_ext_space_block(inode, 1); else max = ext4_ext_space_block_idx(inode, 1); } return max; } static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) { ext4_fsblk_t block = ext4_ext_pblock(ext); int len = ext4_ext_get_actual_len(ext); ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); ext4_lblk_t last = lblock + len - 1; if (len == 0 || lblock > last) return 0; return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); } static int ext4_valid_extent_idx(struct inode *inode, struct ext4_extent_idx *ext_idx) { ext4_fsblk_t block = ext4_idx_pblock(ext_idx); return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); } static int ext4_valid_extent_entries(struct inode *inode, struct ext4_extent_header *eh, int depth) { unsigned short entries; if (eh->eh_entries == 0) return 1; entries = le16_to_cpu(eh->eh_entries); if (depth == 0) { /* leaf entries */ struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; ext4_fsblk_t pblock = 0; ext4_lblk_t lblock = 0; ext4_lblk_t prev = 0; int len = 0; while (entries) { if (!ext4_valid_extent(inode, ext)) return 0; /* Check for overlapping extents */ lblock = le32_to_cpu(ext->ee_block); len = ext4_ext_get_actual_len(ext); if ((lblock <= prev) && prev) { pblock = ext4_ext_pblock(ext); es->s_last_error_block = cpu_to_le64(pblock); return 0; } ext++; entries--; prev = lblock + len - 1; } } else { struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); while (entries) { if (!ext4_valid_extent_idx(inode, ext_idx)) return 0; ext_idx++; entries--; } } return 1; } static int __ext4_ext_check(const char *function, unsigned int line, struct inode *inode, struct ext4_extent_header *eh, int depth, ext4_fsblk_t pblk) { const char *error_msg; int max = 0, err = -EFSCORRUPTED; if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { error_msg = "invalid magic"; goto corrupted; } if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { error_msg = "unexpected eh_depth"; goto corrupted; } if (unlikely(eh->eh_max == 0)) { error_msg = "invalid eh_max"; goto corrupted; } max = ext4_ext_max_entries(inode, depth); if (unlikely(le16_to_cpu(eh->eh_max) > max)) { error_msg = "too large eh_max"; goto corrupted; } if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { error_msg = "invalid eh_entries"; goto corrupted; } if (!ext4_valid_extent_entries(inode, eh, depth)) { error_msg = "invalid extent entries"; goto corrupted; } /* Verify checksum on non-root extent tree nodes */ if (ext_depth(inode) != depth && !ext4_extent_block_csum_verify(inode, eh)) { error_msg = "extent tree corrupted"; err = -EFSBADCRC; goto corrupted; } return 0; corrupted: ext4_error_inode(inode, function, line, 0, "pblk %llu bad header/extent: %s - magic %x, " "entries %u, max %u(%u), depth %u(%u)", (unsigned long long) pblk, error_msg, le16_to_cpu(eh->eh_magic), le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), max, le16_to_cpu(eh->eh_depth), depth); return err; } #define ext4_ext_check(inode, eh, depth, pblk) \ __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk)) int ext4_ext_check_inode(struct inode *inode) { return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); } static struct buffer_head * __read_extent_tree_block(const char *function, unsigned int line, struct inode *inode, ext4_fsblk_t pblk, int depth, int flags) { struct buffer_head *bh; int err; bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS); if (unlikely(!bh)) return ERR_PTR(-ENOMEM); if (!bh_uptodate_or_lock(bh)) { trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); err = bh_submit_read(bh); if (err < 0) goto errout; } if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) return bh; err = __ext4_ext_check(function, line, inode, ext_block_hdr(bh), depth, pblk); if (err) goto errout; set_buffer_verified(bh); /* * If this is a leaf block, cache all of its entries */ if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { struct ext4_extent_header *eh = ext_block_hdr(bh); struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); ext4_lblk_t prev = 0; int i; for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { unsigned int status = EXTENT_STATUS_WRITTEN; ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); int len = ext4_ext_get_actual_len(ex); if (prev && (prev != lblk)) ext4_es_cache_extent(inode, prev, lblk - prev, ~0, EXTENT_STATUS_HOLE); if (ext4_ext_is_unwritten(ex)) status = EXTENT_STATUS_UNWRITTEN; ext4_es_cache_extent(inode, lblk, len, ext4_ext_pblock(ex), status); prev = lblk + len; } } return bh; errout: put_bh(bh); return ERR_PTR(err); } #define read_extent_tree_block(inode, pblk, depth, flags) \ __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \ (depth), (flags)) /* * This function is called to cache a file's extent information in the * extent status tree */ int ext4_ext_precache(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_ext_path *path = NULL; struct buffer_head *bh; int i = 0, depth, ret = 0; if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) return 0; /* not an extent-mapped inode */ down_read(&ei->i_data_sem); depth = ext_depth(inode); path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS); if (path == NULL) { up_read(&ei->i_data_sem); return -ENOMEM; } /* Don't cache anything if there are no external extent blocks */ if (depth == 0) goto out; path[0].p_hdr = ext_inode_hdr(inode); ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); if (ret) goto out; path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr); while (i >= 0) { /* * If this is a leaf block or we've reached the end of * the index block, go up */ if ((i == depth) || path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) { brelse(path[i].p_bh); path[i].p_bh = NULL; i--; continue; } bh = read_extent_tree_block(inode, ext4_idx_pblock(path[i].p_idx++), depth - i - 1, EXT4_EX_FORCE_CACHE); if (IS_ERR(bh)) { ret = PTR_ERR(bh); break; } i++; path[i].p_bh = bh; path[i].p_hdr = ext_block_hdr(bh); path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr); } ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED); out: up_read(&ei->i_data_sem); ext4_ext_drop_refs(path); kfree(path); return ret; } #ifdef EXT_DEBUG static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) { int k, l = path->p_depth; ext_debug("path:"); for (k = 0; k <= l; k++, path++) { if (path->p_idx) { ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), ext4_idx_pblock(path->p_idx)); } else if (path->p_ext) { ext_debug(" %d:[%d]%d:%llu ", le32_to_cpu(path->p_ext->ee_block), ext4_ext_is_unwritten(path->p_ext), ext4_ext_get_actual_len(path->p_ext), ext4_ext_pblock(path->p_ext)); } else ext_debug(" []"); } ext_debug("\n"); } static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) { int depth = ext_depth(inode); struct ext4_extent_header *eh; struct ext4_extent *ex; int i; if (!path) return; eh = path[depth].p_hdr; ex = EXT_FIRST_EXTENT(eh); ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), ext4_ext_is_unwritten(ex), ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); } ext_debug("\n"); } static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, ext4_fsblk_t newblock, int level) { int depth = ext_depth(inode); struct ext4_extent *ex; if (depth != level) { struct ext4_extent_idx *idx; idx = path[level].p_idx; while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { ext_debug("%d: move %d:%llu in new index %llu\n", level, le32_to_cpu(idx->ei_block), ext4_idx_pblock(idx), newblock); idx++; } return; } ex = path[depth].p_ext; while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex), ext4_ext_is_unwritten(ex), ext4_ext_get_actual_len(ex), newblock); ex++; } } #else #define ext4_ext_show_path(inode, path) #define ext4_ext_show_leaf(inode, path) #define ext4_ext_show_move(inode, path, newblock, level) #endif void ext4_ext_drop_refs(struct ext4_ext_path *path) { int depth, i; if (!path) return; depth = path->p_depth; for (i = 0; i <= depth; i++, path++) if (path->p_bh) { brelse(path->p_bh); path->p_bh = NULL; } } /* * ext4_ext_binsearch_idx: * binary search for the closest index of the given block * the header must be checked before calling this */ static void ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t block) { struct ext4_extent_header *eh = path->p_hdr; struct ext4_extent_idx *r, *l, *m; ext_debug("binsearch for %u(idx): ", block); l = EXT_FIRST_INDEX(eh) + 1; r = EXT_LAST_INDEX(eh); while (l <= r) { m = l + (r - l) / 2; if (block < le32_to_cpu(m->ei_block)) r = m - 1; else l = m + 1; ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block), r, le32_to_cpu(r->ei_block)); } path->p_idx = l - 1; ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), ext4_idx_pblock(path->p_idx)); #ifdef CHECK_BINSEARCH { struct ext4_extent_idx *chix, *ix; int k; chix = ix = EXT_FIRST_INDEX(eh); for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { if (k != 0 && le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { printk(KERN_DEBUG "k=%d, ix=0x%p, " "first=0x%p\n", k, ix, EXT_FIRST_INDEX(eh)); printk(KERN_DEBUG "%u <= %u\n", le32_to_cpu(ix->ei_block), le32_to_cpu(ix[-1].ei_block)); } BUG_ON(k && le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)); if (block < le32_to_cpu(ix->ei_block)) break; chix = ix; } BUG_ON(chix != path->p_idx); } #endif } /* * ext4_ext_binsearch: * binary search for closest extent of the given block * the header must be checked before calling this */ static void ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t block) { struct ext4_extent_header *eh = path->p_hdr; struct ext4_extent *r, *l, *m; if (eh->eh_entries == 0) { /* * this leaf is empty: * we get such a leaf in split/add case */ return; } ext_debug("binsearch for %u: ", block); l = EXT_FIRST_EXTENT(eh) + 1; r = EXT_LAST_EXTENT(eh); while (l <= r) { m = l + (r - l) / 2; if (block < le32_to_cpu(m->ee_block)) r = m - 1; else l = m + 1; ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block), r, le32_to_cpu(r->ee_block)); } path->p_ext = l - 1; ext_debug(" -> %d:%llu:[%d]%d ", le32_to_cpu(path->p_ext->ee_block), ext4_ext_pblock(path->p_ext), ext4_ext_is_unwritten(path->p_ext), ext4_ext_get_actual_len(path->p_ext)); #ifdef CHECK_BINSEARCH { struct ext4_extent *chex, *ex; int k; chex = ex = EXT_FIRST_EXTENT(eh); for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { BUG_ON(k && le32_to_cpu(ex->ee_block) <= le32_to_cpu(ex[-1].ee_block)); if (block < le32_to_cpu(ex->ee_block)) break; chex = ex; } BUG_ON(chex != path->p_ext); } #endif } int ext4_ext_tree_init(handle_t *handle, struct inode *inode) { struct ext4_extent_header *eh; eh = ext_inode_hdr(inode); eh->eh_depth = 0; eh->eh_entries = 0; eh->eh_magic = EXT4_EXT_MAGIC; eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); ext4_mark_inode_dirty(handle, inode); return 0; } struct ext4_ext_path * ext4_find_extent(struct inode *inode, ext4_lblk_t block, struct ext4_ext_path **orig_path, int flags) { struct ext4_extent_header *eh; struct buffer_head *bh; struct ext4_ext_path *path = orig_path ? *orig_path : NULL; short int depth, i, ppos = 0; int ret; eh = ext_inode_hdr(inode); depth = ext_depth(inode); if (path) { ext4_ext_drop_refs(path); if (depth > path[0].p_maxdepth) { kfree(path); *orig_path = path = NULL; } } if (!path) { /* account possible depth increase */ path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), GFP_NOFS); if (unlikely(!path)) return ERR_PTR(-ENOMEM); path[0].p_maxdepth = depth + 1; } path[0].p_hdr = eh; path[0].p_bh = NULL; i = depth; /* walk through the tree */ while (i) { ext_debug("depth %d: num %d, max %d\n", ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); ext4_ext_binsearch_idx(inode, path + ppos, block); path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); path[ppos].p_depth = i; path[ppos].p_ext = NULL; bh = read_extent_tree_block(inode, path[ppos].p_block, --i, flags); if (IS_ERR(bh)) { ret = PTR_ERR(bh); goto err; } eh = ext_block_hdr(bh); ppos++; if (unlikely(ppos > depth)) { put_bh(bh); EXT4_ERROR_INODE(inode, "ppos %d > depth %d", ppos, depth); ret = -EFSCORRUPTED; goto err; } path[ppos].p_bh = bh; path[ppos].p_hdr = eh; } path[ppos].p_depth = i; path[ppos].p_ext = NULL; path[ppos].p_idx = NULL; /* find extent */ ext4_ext_binsearch(inode, path + ppos, block); /* if not an empty leaf */ if (path[ppos].p_ext) path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); ext4_ext_show_path(inode, path); return path; err: ext4_ext_drop_refs(path); kfree(path); if (orig_path) *orig_path = NULL; return ERR_PTR(ret); } /* * ext4_ext_insert_index: * insert new index [@logical;@ptr] into the block at @curp; * check where to insert: before @curp or after @curp */ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, struct ext4_ext_path *curp, int logical, ext4_fsblk_t ptr) { struct ext4_extent_idx *ix; int len, err; err = ext4_ext_get_access(handle, inode, curp); if (err) return err; if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { EXT4_ERROR_INODE(inode, "logical %d == ei_block %d!", logical, le32_to_cpu(curp->p_idx->ei_block)); return -EFSCORRUPTED; } if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) >= le16_to_cpu(curp->p_hdr->eh_max))) { EXT4_ERROR_INODE(inode, "eh_entries %d >= eh_max %d!", le16_to_cpu(curp->p_hdr->eh_entries), le16_to_cpu(curp->p_hdr->eh_max)); return -EFSCORRUPTED; } if (logical > le32_to_cpu(curp->p_idx->ei_block)) { /* insert after */ ext_debug("insert new index %d after: %llu\n", logical, ptr); ix = curp->p_idx + 1; } else { /* insert before */ ext_debug("insert new index %d before: %llu\n", logical, ptr); ix = curp->p_idx; } len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; BUG_ON(len < 0); if (len > 0) { ext_debug("insert new index %d: " "move %d indices from 0x%p to 0x%p\n", logical, len, ix, ix + 1); memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); } if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); return -EFSCORRUPTED; } ix->ei_block = cpu_to_le32(logical); ext4_idx_store_pblock(ix, ptr); le16_add_cpu(&curp->p_hdr->eh_entries, 1); if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); return -EFSCORRUPTED; } err = ext4_ext_dirty(handle, inode, curp); ext4_std_error(inode->i_sb, err); return err; } /* * ext4_ext_split: * inserts new subtree into the path, using free index entry * at depth @at: * - allocates all needed blocks (new leaf and all intermediate index blocks) * - makes decision where to split * - moves remaining extents and index entries (right to the split point) * into the newly allocated blocks * - initializes subtree */ static int ext4_ext_split(handle_t *handle, struct inode *inode, unsigned int flags, struct ext4_ext_path *path, struct ext4_extent *newext, int at) { struct buffer_head *bh = NULL; int depth = ext_depth(inode); struct ext4_extent_header *neh; struct ext4_extent_idx *fidx; int i = at, k, m, a; ext4_fsblk_t newblock, oldblock; __le32 border; ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ int err = 0; /* make decision: where to split? */ /* FIXME: now decision is simplest: at current extent */ /* if current leaf will be split, then we should use * border from split point */ if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); return -EFSCORRUPTED; } if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { border = path[depth].p_ext[1].ee_block; ext_debug("leaf will be split." " next leaf starts at %d\n", le32_to_cpu(border)); } else { border = newext->ee_block; ext_debug("leaf will be added." " next leaf starts at %d\n", le32_to_cpu(border)); } /* * If error occurs, then we break processing * and mark filesystem read-only. index won't * be inserted and tree will be in consistent * state. Next mount will repair buffers too. */ /* * Get array to track all allocated blocks. * We need this to handle errors and free blocks * upon them. */ ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); if (!ablocks) return -ENOMEM; /* allocate all needed blocks */ ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); for (a = 0; a < depth - at; a++) { newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err, flags); if (newblock == 0) goto cleanup; ablocks[a] = newblock; } /* initialize new leaf */ newblock = ablocks[--a]; if (unlikely(newblock == 0)) { EXT4_ERROR_INODE(inode, "newblock == 0!"); err = -EFSCORRUPTED; goto cleanup; } bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); if (unlikely(!bh)) { err = -ENOMEM; goto cleanup; } lock_buffer(bh); err = ext4_journal_get_create_access(handle, bh); if (err) goto cleanup; neh = ext_block_hdr(bh); neh->eh_entries = 0; neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); neh->eh_magic = EXT4_EXT_MAGIC; neh->eh_depth = 0; /* move remainder of path[depth] to the new leaf */ if (unlikely(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max)) { EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", path[depth].p_hdr->eh_entries, path[depth].p_hdr->eh_max); err = -EFSCORRUPTED; goto cleanup; } /* start copy from next extent */ m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; ext4_ext_show_move(inode, path, newblock, depth); if (m) { struct ext4_extent *ex; ex = EXT_FIRST_EXTENT(neh); memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); le16_add_cpu(&neh->eh_entries, m); } ext4_extent_block_csum_set(inode, neh); set_buffer_uptodate(bh); unlock_buffer(bh); err = ext4_handle_dirty_metadata(handle, inode, bh); if (err) goto cleanup; brelse(bh); bh = NULL; /* correct old leaf */ if (m) { err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto cleanup; le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); err = ext4_ext_dirty(handle, inode, path + depth); if (err) goto cleanup; } /* create intermediate indexes */ k = depth - at - 1; if (unlikely(k < 0)) { EXT4_ERROR_INODE(inode, "k %d < 0!", k); err = -EFSCORRUPTED; goto cleanup; } if (k) ext_debug("create %d intermediate indices\n", k); /* insert new index into current index block */ /* current depth stored in i var */ i = depth - 1; while (k--) { oldblock = newblock; newblock = ablocks[--a]; bh = sb_getblk(inode->i_sb, newblock); if (unlikely(!bh)) { err = -ENOMEM; goto cleanup; } lock_buffer(bh); err = ext4_journal_get_create_access(handle, bh); if (err) goto cleanup; neh = ext_block_hdr(bh); neh->eh_entries = cpu_to_le16(1); neh->eh_magic = EXT4_EXT_MAGIC; neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); neh->eh_depth = cpu_to_le16(depth - i); fidx = EXT_FIRST_INDEX(neh); fidx->ei_block = border; ext4_idx_store_pblock(fidx, oldblock); ext_debug("int.index at %d (block %llu): %u -> %llu\n", i, newblock, le32_to_cpu(border), oldblock); /* move remainder of path[i] to the new index block */ if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != EXT_LAST_INDEX(path[i].p_hdr))) { EXT4_ERROR_INODE(inode, "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", le32_to_cpu(path[i].p_ext->ee_block)); err = -EFSCORRUPTED; goto cleanup; } /* start copy indexes */ m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, EXT_MAX_INDEX(path[i].p_hdr)); ext4_ext_show_move(inode, path, newblock, i); if (m) { memmove(++fidx, path[i].p_idx, sizeof(struct ext4_extent_idx) * m); le16_add_cpu(&neh->eh_entries, m); } ext4_extent_block_csum_set(inode, neh); set_buffer_uptodate(bh); unlock_buffer(bh); err = ext4_handle_dirty_metadata(handle, inode, bh); if (err) goto cleanup; brelse(bh); bh = NULL; /* correct old index */ if (m) { err = ext4_ext_get_access(handle, inode, path + i); if (err) goto cleanup; le16_add_cpu(&path[i].p_hdr->eh_entries, -m); err = ext4_ext_dirty(handle, inode, path + i); if (err) goto cleanup; } i--; } /* insert new index */ err = ext4_ext_insert_index(handle, inode, path + at, le32_to_cpu(border), newblock); cleanup: if (bh) { if (buffer_locked(bh)) unlock_buffer(bh); brelse(bh); } if (err) { /* free all allocated blocks in error case */ for (i = 0; i < depth; i++) { if (!ablocks[i]) continue; ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, EXT4_FREE_BLOCKS_METADATA); } } kfree(ablocks); return err; } /* * ext4_ext_grow_indepth: * implements tree growing procedure: * - allocates new block * - moves top-level data (index block or leaf) into the new block * - initializes new top-level, creating index that points to the * just created block */ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, unsigned int flags) { struct ext4_extent_header *neh; struct buffer_head *bh; ext4_fsblk_t newblock, goal = 0; struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; int err = 0; /* Try to prepend new index to old one */ if (ext_depth(inode)) goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode))); if (goal > le32_to_cpu(es->s_first_data_block)) { flags |= EXT4_MB_HINT_TRY_GOAL; goal--; } else goal = ext4_inode_to_goal_block(inode); newblock = ext4_new_meta_blocks(handle, inode, goal, flags, NULL, &err); if (newblock == 0) return err; bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); if (unlikely(!bh)) return -ENOMEM; lock_buffer(bh); err = ext4_journal_get_create_access(handle, bh); if (err) { unlock_buffer(bh); goto out; } /* move top-level index/leaf into new block */ memmove(bh->b_data, EXT4_I(inode)->i_data, sizeof(EXT4_I(inode)->i_data)); /* set size of new block */ neh = ext_block_hdr(bh); /* old root could have indexes or leaves * so calculate e_max right way */ if (ext_depth(inode)) neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); else neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); neh->eh_magic = EXT4_EXT_MAGIC; ext4_extent_block_csum_set(inode, neh); set_buffer_uptodate(bh); unlock_buffer(bh); err = ext4_handle_dirty_metadata(handle, inode, bh); if (err) goto out; /* Update top-level index: num,max,pointer */ neh = ext_inode_hdr(inode); neh->eh_entries = cpu_to_le16(1); ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); if (neh->eh_depth == 0) { /* Root extent block becomes index block */ neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); EXT_FIRST_INDEX(neh)->ei_block = EXT_FIRST_EXTENT(neh)->ee_block; } ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), ext4_idx_pblock(EXT_FIRST_INDEX(neh))); le16_add_cpu(&neh->eh_depth, 1); ext4_mark_inode_dirty(handle, inode); out: brelse(bh); return err; } /* * ext4_ext_create_new_leaf: * finds empty index and adds new leaf. * if no free index is found, then it requests in-depth growing. */ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, unsigned int mb_flags, unsigned int gb_flags, struct ext4_ext_path **ppath, struct ext4_extent *newext) { struct ext4_ext_path *path = *ppath; struct ext4_ext_path *curp; int depth, i, err = 0; repeat: i = depth = ext_depth(inode); /* walk up to the tree and look for free index entry */ curp = path + depth; while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { i--; curp--; } /* we use already allocated block for index block, * so subsequent data blocks should be contiguous */ if (EXT_HAS_FREE_INDEX(curp)) { /* if we found index with free entry, then use that * entry: create all needed subtree and add new leaf */ err = ext4_ext_split(handle, inode, mb_flags, path, newext, i); if (err) goto out; /* refill path */ path = ext4_find_extent(inode, (ext4_lblk_t)le32_to_cpu(newext->ee_block), ppath, gb_flags); if (IS_ERR(path)) err = PTR_ERR(path); } else { /* tree is full, time to grow in depth */ err = ext4_ext_grow_indepth(handle, inode, mb_flags); if (err) goto out; /* refill path */ path = ext4_find_extent(inode, (ext4_lblk_t)le32_to_cpu(newext->ee_block), ppath, gb_flags); if (IS_ERR(path)) { err = PTR_ERR(path); goto out; } /* * only first (depth 0 -> 1) produces free space; * in all other cases we have to split the grown tree */ depth = ext_depth(inode); if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { /* now we need to split */ goto repeat; } } out: return err; } /* * search the closest allocated block to the left for *logical * and returns it at @logical + it's physical address at @phys * if *logical is the smallest allocated block, the function * returns 0 at @phys * return value contains 0 (success) or error code */ static int ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t *logical, ext4_fsblk_t *phys) { struct ext4_extent_idx *ix; struct ext4_extent *ex; int depth, ee_len; if (unlikely(path == NULL)) { EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); return -EFSCORRUPTED; } depth = path->p_depth; *phys = 0; if (depth == 0 && path->p_ext == NULL) return 0; /* usually extent in the path covers blocks smaller * then *logical, but it can be that extent is the * first one in the file */ ex = path[depth].p_ext; ee_len = ext4_ext_get_actual_len(ex); if (*logical < le32_to_cpu(ex->ee_block)) { if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { EXT4_ERROR_INODE(inode, "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", *logical, le32_to_cpu(ex->ee_block)); return -EFSCORRUPTED; } while (--depth >= 0) { ix = path[depth].p_idx; if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { EXT4_ERROR_INODE(inode, "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", ix != NULL ? le32_to_cpu(ix->ei_block) : 0, EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, depth); return -EFSCORRUPTED; } } return 0; } if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { EXT4_ERROR_INODE(inode, "logical %d < ee_block %d + ee_len %d!", *logical, le32_to_cpu(ex->ee_block), ee_len); return -EFSCORRUPTED; } *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; *phys = ext4_ext_pblock(ex) + ee_len - 1; return 0; } /* * search the closest allocated block to the right for *logical * and returns it at @logical + it's physical address at @phys * if *logical is the largest allocated block, the function * returns 0 at @phys * return value contains 0 (success) or error code */ static int ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t *logical, ext4_fsblk_t *phys, struct ext4_extent **ret_ex) { struct buffer_head *bh = NULL; struct ext4_extent_header *eh; struct ext4_extent_idx *ix; struct ext4_extent *ex; ext4_fsblk_t block; int depth; /* Note, NOT eh_depth; depth from top of tree */ int ee_len; if (unlikely(path == NULL)) { EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); return -EFSCORRUPTED; } depth = path->p_depth; *phys = 0; if (depth == 0 && path->p_ext == NULL) return 0; /* usually extent in the path covers blocks smaller * then *logical, but it can be that extent is the * first one in the file */ ex = path[depth].p_ext; ee_len = ext4_ext_get_actual_len(ex); if (*logical < le32_to_cpu(ex->ee_block)) { if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { EXT4_ERROR_INODE(inode, "first_extent(path[%d].p_hdr) != ex", depth); return -EFSCORRUPTED; } while (--depth >= 0) { ix = path[depth].p_idx; if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { EXT4_ERROR_INODE(inode, "ix != EXT_FIRST_INDEX *logical %d!", *logical); return -EFSCORRUPTED; } } goto found_extent; } if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { EXT4_ERROR_INODE(inode, "logical %d < ee_block %d + ee_len %d!", *logical, le32_to_cpu(ex->ee_block), ee_len); return -EFSCORRUPTED; } if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { /* next allocated block in this leaf */ ex++; goto found_extent; } /* go up and search for index to the right */ while (--depth >= 0) { ix = path[depth].p_idx; if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) goto got_index; } /* we've gone up to the root and found no index to the right */ return 0; got_index: /* we've found index to the right, let's * follow it and find the closest allocated * block to the right */ ix++; block = ext4_idx_pblock(ix); while (++depth < path->p_depth) { /* subtract from p_depth to get proper eh_depth */ bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0); if (IS_ERR(bh)) return PTR_ERR(bh); eh = ext_block_hdr(bh); ix = EXT_FIRST_INDEX(eh); block = ext4_idx_pblock(ix); put_bh(bh); } bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0); if (IS_ERR(bh)) return PTR_ERR(bh); eh = ext_block_hdr(bh); ex = EXT_FIRST_EXTENT(eh); found_extent: *logical = le32_to_cpu(ex->ee_block); *phys = ext4_ext_pblock(ex); *ret_ex = ex; if (bh) put_bh(bh); return 0; } /* * ext4_ext_next_allocated_block: * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. * NOTE: it considers block number from index entry as * allocated block. Thus, index entries have to be consistent * with leaves. */ ext4_lblk_t ext4_ext_next_allocated_block(struct ext4_ext_path *path) { int depth; BUG_ON(path == NULL); depth = path->p_depth; if (depth == 0 && path->p_ext == NULL) return EXT_MAX_BLOCKS; while (depth >= 0) { if (depth == path->p_depth) { /* leaf */ if (path[depth].p_ext && path[depth].p_ext != EXT_LAST_EXTENT(path[depth].p_hdr)) return le32_to_cpu(path[depth].p_ext[1].ee_block); } else { /* index */ if (path[depth].p_idx != EXT_LAST_INDEX(path[depth].p_hdr)) return le32_to_cpu(path[depth].p_idx[1].ei_block); } depth--; } return EXT_MAX_BLOCKS; } /* * ext4_ext_next_leaf_block: * returns first allocated block from next leaf or EXT_MAX_BLOCKS */ static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) { int depth; BUG_ON(path == NULL); depth = path->p_depth; /* zero-tree has no leaf blocks at all */ if (depth == 0) return EXT_MAX_BLOCKS; /* go to index block */ depth--; while (depth >= 0) { if (path[depth].p_idx != EXT_LAST_INDEX(path[depth].p_hdr)) return (ext4_lblk_t) le32_to_cpu(path[depth].p_idx[1].ei_block); depth--; } return EXT_MAX_BLOCKS; } /* * ext4_ext_correct_indexes: * if leaf gets modified and modified extent is first in the leaf, * then we have to correct all indexes above. * TODO: do we need to correct tree in all cases? */ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, struct ext4_ext_path *path) { struct ext4_extent_header *eh; int depth = ext_depth(inode); struct ext4_extent *ex; __le32 border; int k, err = 0; eh = path[depth].p_hdr; ex = path[depth].p_ext; if (unlikely(ex == NULL || eh == NULL)) { EXT4_ERROR_INODE(inode, "ex %p == NULL or eh %p == NULL", ex, eh); return -EFSCORRUPTED; } if (depth == 0) { /* there is no tree at all */ return 0; } if (ex != EXT_FIRST_EXTENT(eh)) { /* we correct tree if first leaf got modified only */ return 0; } /* * TODO: we need correction if border is smaller than current one */ k = depth - 1; border = path[depth].p_ext->ee_block; err = ext4_ext_get_access(handle, inode, path + k); if (err) return err; path[k].p_idx->ei_block = border; err = ext4_ext_dirty(handle, inode, path + k); if (err) return err; while (k--) { /* change all left-side indexes */ if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) break; err = ext4_ext_get_access(handle, inode, path + k); if (err) break; path[k].p_idx->ei_block = border; err = ext4_ext_dirty(handle, inode, path + k); if (err) break; } return err; } int ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, struct ext4_extent *ex2) { unsigned short ext1_ee_len, ext2_ee_len; if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2)) return 0; ext1_ee_len = ext4_ext_get_actual_len(ex1); ext2_ee_len = ext4_ext_get_actual_len(ex2); if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != le32_to_cpu(ex2->ee_block)) return 0; /* * To allow future support for preallocated extents to be added * as an RO_COMPAT feature, refuse to merge to extents if * this can result in the top bit of ee_len being set. */ if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN) return 0; if (ext4_ext_is_unwritten(ex1) && (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) || atomic_read(&EXT4_I(inode)->i_unwritten) || (ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN))) return 0; #ifdef AGGRESSIVE_TEST if (ext1_ee_len >= 4) return 0; #endif if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) return 1; return 0; } /* * This function tries to merge the "ex" extent to the next extent in the tree. * It always tries to merge towards right. If you want to merge towards * left, pass "ex - 1" as argument instead of "ex". * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns * 1 if they got merged. */ static int ext4_ext_try_to_merge_right(struct inode *inode, struct ext4_ext_path *path, struct ext4_extent *ex) { struct ext4_extent_header *eh; unsigned int depth, len; int merge_done = 0, unwritten; depth = ext_depth(inode); BUG_ON(path[depth].p_hdr == NULL); eh = path[depth].p_hdr; while (ex < EXT_LAST_EXTENT(eh)) { if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) break; /* merge with next extent! */ unwritten = ext4_ext_is_unwritten(ex); ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) + ext4_ext_get_actual_len(ex + 1)); if (unwritten) ext4_ext_mark_unwritten(ex); if (ex + 1 < EXT_LAST_EXTENT(eh)) { len = (EXT_LAST_EXTENT(eh) - ex - 1) * sizeof(struct ext4_extent); memmove(ex + 1, ex + 2, len); } le16_add_cpu(&eh->eh_entries, -1); merge_done = 1; WARN_ON(eh->eh_entries == 0); if (!eh->eh_entries) EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); } return merge_done; } /* * This function does a very simple check to see if we can collapse * an extent tree with a single extent tree leaf block into the inode. */ static void ext4_ext_try_to_merge_up(handle_t *handle, struct inode *inode, struct ext4_ext_path *path) { size_t s; unsigned max_root = ext4_ext_space_root(inode, 0); ext4_fsblk_t blk; if ((path[0].p_depth != 1) || (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) return; /* * We need to modify the block allocation bitmap and the block * group descriptor to release the extent tree block. If we * can't get the journal credits, give up. */ if (ext4_journal_extend(handle, 2)) return; /* * Copy the extent data up to the inode */ blk = ext4_idx_pblock(path[0].p_idx); s = le16_to_cpu(path[1].p_hdr->eh_entries) * sizeof(struct ext4_extent_idx); s += sizeof(struct ext4_extent_header); path[1].p_maxdepth = path[0].p_maxdepth; memcpy(path[0].p_hdr, path[1].p_hdr, s); path[0].p_depth = 0; path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); path[0].p_hdr->eh_max = cpu_to_le16(max_root); brelse(path[1].p_bh); ext4_free_blocks(handle, inode, NULL, blk, 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); } /* * This function tries to merge the @ex extent to neighbours in the tree. * return 1 if merge left else 0. */ static void ext4_ext_try_to_merge(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, struct ext4_extent *ex) { struct ext4_extent_header *eh; unsigned int depth; int merge_done = 0; depth = ext_depth(inode); BUG_ON(path[depth].p_hdr == NULL); eh = path[depth].p_hdr; if (ex > EXT_FIRST_EXTENT(eh)) merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); if (!merge_done) (void) ext4_ext_try_to_merge_right(inode, path, ex); ext4_ext_try_to_merge_up(handle, inode, path); } /* * check if a portion of the "newext" extent overlaps with an * existing extent. * * If there is an overlap discovered, it updates the length of the newext * such that there will be no overlap, and then returns 1. * If there is no overlap found, it returns 0. */ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, struct inode *inode, struct ext4_extent *newext, struct ext4_ext_path *path) { ext4_lblk_t b1, b2; unsigned int depth, len1; unsigned int ret = 0; b1 = le32_to_cpu(newext->ee_block); len1 = ext4_ext_get_actual_len(newext); depth = ext_depth(inode); if (!path[depth].p_ext) goto out; b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); /* * get the next allocated block if the extent in the path * is before the requested block(s) */ if (b2 < b1) { b2 = ext4_ext_next_allocated_block(path); if (b2 == EXT_MAX_BLOCKS) goto out; b2 = EXT4_LBLK_CMASK(sbi, b2); } /* check for wrap through zero on extent logical start block*/ if (b1 + len1 < b1) { len1 = EXT_MAX_BLOCKS - b1; newext->ee_len = cpu_to_le16(len1); ret = 1; } /* check for overlap */ if (b1 + len1 > b2) { newext->ee_len = cpu_to_le16(b2 - b1); ret = 1; } out: return ret; } /* * ext4_ext_insert_extent: * tries to merge requsted extent into the existing extent or * inserts requested extent as new one into the tree, * creating new leaf in the no-space case. */ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, struct ext4_ext_path **ppath, struct ext4_extent *newext, int gb_flags) { struct ext4_ext_path *path = *ppath; struct ext4_extent_header *eh; struct ext4_extent *ex, *fex; struct ext4_extent *nearex; /* nearest extent */ struct ext4_ext_path *npath = NULL; int depth, len, err; ext4_lblk_t next; int mb_flags = 0, unwritten; if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) mb_flags |= EXT4_MB_DELALLOC_RESERVED; if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); return -EFSCORRUPTED; } depth = ext_depth(inode); ex = path[depth].p_ext; eh = path[depth].p_hdr; if (unlikely(path[depth].p_hdr == NULL)) { EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); return -EFSCORRUPTED; } /* try to insert block into found extent and return */ if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) { /* * Try to see whether we should rather test the extent on * right from ex, or from the left of ex. This is because * ext4_find_extent() can return either extent on the * left, or on the right from the searched position. This * will make merging more effective. */ if (ex < EXT_LAST_EXTENT(eh) && (le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex) < le32_to_cpu(newext->ee_block))) { ex += 1; goto prepend; } else if ((ex > EXT_FIRST_EXTENT(eh)) && (le32_to_cpu(newext->ee_block) + ext4_ext_get_actual_len(newext) < le32_to_cpu(ex->ee_block))) ex -= 1; /* Try to append newex to the ex */ if (ext4_can_extents_be_merged(inode, ex, newext)) { ext_debug("append [%d]%d block to %u:[%d]%d" "(from %llu)\n", ext4_ext_is_unwritten(newext), ext4_ext_get_actual_len(newext), le32_to_cpu(ex->ee_block), ext4_ext_is_unwritten(ex), ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); err = ext4_ext_get_access(handle, inode, path + depth); if (err) return err; unwritten = ext4_ext_is_unwritten(ex); ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) + ext4_ext_get_actual_len(newext)); if (unwritten) ext4_ext_mark_unwritten(ex); eh = path[depth].p_hdr; nearex = ex; goto merge; } prepend: /* Try to prepend newex to the ex */ if (ext4_can_extents_be_merged(inode, newext, ex)) { ext_debug("prepend %u[%d]%d block to %u:[%d]%d" "(from %llu)\n", le32_to_cpu(newext->ee_block), ext4_ext_is_unwritten(newext), ext4_ext_get_actual_len(newext), le32_to_cpu(ex->ee_block), ext4_ext_is_unwritten(ex), ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); err = ext4_ext_get_access(handle, inode, path + depth); if (err) return err; unwritten = ext4_ext_is_unwritten(ex); ex->ee_block = newext->ee_block; ext4_ext_store_pblock(ex, ext4_ext_pblock(newext)); ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) + ext4_ext_get_actual_len(newext)); if (unwritten) ext4_ext_mark_unwritten(ex); eh = path[depth].p_hdr; nearex = ex; goto merge; } } depth = ext_depth(inode); eh = path[depth].p_hdr; if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) goto has_space; /* probably next leaf has space for us? */ fex = EXT_LAST_EXTENT(eh); next = EXT_MAX_BLOCKS; if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) next = ext4_ext_next_leaf_block(path); if (next != EXT_MAX_BLOCKS) { ext_debug("next leaf block - %u\n", next); BUG_ON(npath != NULL); npath = ext4_find_extent(inode, next, NULL, 0); if (IS_ERR(npath)) return PTR_ERR(npath); BUG_ON(npath->p_depth != path->p_depth); eh = npath[depth].p_hdr; if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { ext_debug("next leaf isn't full(%d)\n", le16_to_cpu(eh->eh_entries)); path = npath; goto has_space; } ext_debug("next leaf has no free space(%d,%d)\n", le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); } /* * There is no free space in the found leaf. * We're gonna add a new leaf in the tree. */ if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) mb_flags |= EXT4_MB_USE_RESERVED; err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, ppath, newext); if (err) goto cleanup; depth = ext_depth(inode); eh = path[depth].p_hdr; has_space: nearex = path[depth].p_ext; err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto cleanup; if (!nearex) { /* there is no extent in this leaf, create first one */ ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n", le32_to_cpu(newext->ee_block), ext4_ext_pblock(newext), ext4_ext_is_unwritten(newext), ext4_ext_get_actual_len(newext)); nearex = EXT_FIRST_EXTENT(eh); } else { if (le32_to_cpu(newext->ee_block) > le32_to_cpu(nearex->ee_block)) { /* Insert after */ ext_debug("insert %u:%llu:[%d]%d before: " "nearest %p\n", le32_to_cpu(newext->ee_block), ext4_ext_pblock(newext), ext4_ext_is_unwritten(newext), ext4_ext_get_actual_len(newext), nearex); nearex++; } else { /* Insert before */ BUG_ON(newext->ee_block == nearex->ee_block); ext_debug("insert %u:%llu:[%d]%d after: " "nearest %p\n", le32_to_cpu(newext->ee_block), ext4_ext_pblock(newext), ext4_ext_is_unwritten(newext), ext4_ext_get_actual_len(newext), nearex); } len = EXT_LAST_EXTENT(eh) - nearex + 1; if (len > 0) { ext_debug("insert %u:%llu:[%d]%d: " "move %d extents from 0x%p to 0x%p\n", le32_to_cpu(newext->ee_block), ext4_ext_pblock(newext), ext4_ext_is_unwritten(newext), ext4_ext_get_actual_len(newext), len, nearex, nearex + 1); memmove(nearex + 1, nearex, len * sizeof(struct ext4_extent)); } } le16_add_cpu(&eh->eh_entries, 1); path[depth].p_ext = nearex; nearex->ee_block = newext->ee_block; ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); nearex->ee_len = newext->ee_len; merge: /* try to merge extents */ if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) ext4_ext_try_to_merge(handle, inode, path, nearex); /* time to correct all indexes above */ err = ext4_ext_correct_indexes(handle, inode, path); if (err) goto cleanup; err = ext4_ext_dirty(handle, inode, path + path->p_depth); cleanup: ext4_ext_drop_refs(npath); kfree(npath); return err; } static int ext4_fill_fiemap_extents(struct inode *inode, ext4_lblk_t block, ext4_lblk_t num, struct fiemap_extent_info *fieinfo) { struct ext4_ext_path *path = NULL; struct ext4_extent *ex; struct extent_status es; ext4_lblk_t next, next_del, start = 0, end = 0; ext4_lblk_t last = block + num; int exists, depth = 0, err = 0; unsigned int flags = 0; unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; while (block < last && block != EXT_MAX_BLOCKS) { num = last - block; /* find extent for this block */ down_read(&EXT4_I(inode)->i_data_sem); path = ext4_find_extent(inode, block, &path, 0); if (IS_ERR(path)) { up_read(&EXT4_I(inode)->i_data_sem); err = PTR_ERR(path); path = NULL; break; } depth = ext_depth(inode); if (unlikely(path[depth].p_hdr == NULL)) { up_read(&EXT4_I(inode)->i_data_sem); EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); err = -EFSCORRUPTED; break; } ex = path[depth].p_ext; next = ext4_ext_next_allocated_block(path); flags = 0; exists = 0; if (!ex) { /* there is no extent yet, so try to allocate * all requested space */ start = block; end = block + num; } else if (le32_to_cpu(ex->ee_block) > block) { /* need to allocate space before found extent */ start = block; end = le32_to_cpu(ex->ee_block); if (block + num < end) end = block + num; } else if (block >= le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex)) { /* need to allocate space after found extent */ start = block; end = block + num; if (end >= next) end = next; } else if (block >= le32_to_cpu(ex->ee_block)) { /* * some part of requested space is covered * by found extent */ start = block; end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); if (block + num < end) end = block + num; exists = 1; } else { BUG(); } BUG_ON(end <= start); if (!exists) { es.es_lblk = start; es.es_len = end - start; es.es_pblk = 0; } else { es.es_lblk = le32_to_cpu(ex->ee_block); es.es_len = ext4_ext_get_actual_len(ex); es.es_pblk = ext4_ext_pblock(ex); if (ext4_ext_is_unwritten(ex)) flags |= FIEMAP_EXTENT_UNWRITTEN; } /* * Find delayed extent and update es accordingly. We call * it even in !exists case to find out whether es is the * last existing extent or not. */ next_del = ext4_find_delayed_extent(inode, &es); if (!exists && next_del) { exists = 1; flags |= (FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN); } up_read(&EXT4_I(inode)->i_data_sem); if (unlikely(es.es_len == 0)) { EXT4_ERROR_INODE(inode, "es.es_len == 0"); err = -EFSCORRUPTED; break; } /* * This is possible iff next == next_del == EXT_MAX_BLOCKS. * we need to check next == EXT_MAX_BLOCKS because it is * possible that an extent is with unwritten and delayed * status due to when an extent is delayed allocated and * is allocated by fallocate status tree will track both of * them in a extent. * * So we could return a unwritten and delayed extent, and * its block is equal to 'next'. */ if (next == next_del && next == EXT_MAX_BLOCKS) { flags |= FIEMAP_EXTENT_LAST; if (unlikely(next_del != EXT_MAX_BLOCKS || next != EXT_MAX_BLOCKS)) { EXT4_ERROR_INODE(inode, "next extent == %u, next " "delalloc extent = %u", next, next_del); err = -EFSCORRUPTED; break; } } if (exists) { err = fiemap_fill_next_extent(fieinfo, (__u64)es.es_lblk << blksize_bits, (__u64)es.es_pblk << blksize_bits, (__u64)es.es_len << blksize_bits, flags); if (err < 0) break; if (err == 1) { err = 0; break; } } block = es.es_lblk + es.es_len; } ext4_ext_drop_refs(path); kfree(path); return err; } /* * ext4_ext_put_gap_in_cache: * calculate boundaries of the gap that the requested block fits into * and cache this gap */ static void ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t block) { int depth = ext_depth(inode); ext4_lblk_t len; ext4_lblk_t lblock; struct ext4_extent *ex; struct extent_status es; ex = path[depth].p_ext; if (ex == NULL) { /* there is no extent yet, so gap is [0;-] */ lblock = 0; len = EXT_MAX_BLOCKS; ext_debug("cache gap(whole file):"); } else if (block < le32_to_cpu(ex->ee_block)) { lblock = block; len = le32_to_cpu(ex->ee_block) - block; ext_debug("cache gap(before): %u [%u:%u]", block, le32_to_cpu(ex->ee_block), ext4_ext_get_actual_len(ex)); } else if (block >= le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex)) { ext4_lblk_t next; lblock = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); next = ext4_ext_next_allocated_block(path); ext_debug("cache gap(after): [%u:%u] %u", le32_to_cpu(ex->ee_block), ext4_ext_get_actual_len(ex), block); BUG_ON(next == lblock); len = next - lblock; } else { BUG(); } ext4_es_find_delayed_extent_range(inode, lblock, lblock + len - 1, &es); if (es.es_len) { /* There's delayed extent containing lblock? */ if (es.es_lblk <= lblock) return; len = min(es.es_lblk - lblock, len); } ext_debug(" -> %u:%u\n", lblock, len); ext4_es_insert_extent(inode, lblock, len, ~0, EXTENT_STATUS_HOLE); } /* * ext4_ext_rm_idx: * removes index from the index block. */ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, int depth) { int err; ext4_fsblk_t leaf; /* free index block */ depth--; path = path + depth; leaf = ext4_idx_pblock(path->p_idx); if (unlikely(path->p_hdr->eh_entries == 0)) { EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); return -EFSCORRUPTED; } err = ext4_ext_get_access(handle, inode, path); if (err) return err; if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; len *= sizeof(struct ext4_extent_idx); memmove(path->p_idx, path->p_idx + 1, len); } le16_add_cpu(&path->p_hdr->eh_entries, -1); err = ext4_ext_dirty(handle, inode, path); if (err) return err; ext_debug("index is empty, remove it, free block %llu\n", leaf); trace_ext4_ext_rm_idx(inode, leaf); ext4_free_blocks(handle, inode, NULL, leaf, 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); while (--depth >= 0) { if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) break; path--; err = ext4_ext_get_access(handle, inode, path); if (err) break; path->p_idx->ei_block = (path+1)->p_idx->ei_block; err = ext4_ext_dirty(handle, inode, path); if (err) break; } return err; } /* * ext4_ext_calc_credits_for_single_extent: * This routine returns max. credits that needed to insert an extent * to the extent tree. * When pass the actual path, the caller should calculate credits * under i_data_sem. */ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, struct ext4_ext_path *path) { if (path) { int depth = ext_depth(inode); int ret = 0; /* probably there is space in leaf? */ if (le16_to_cpu(path[depth].p_hdr->eh_entries) < le16_to_cpu(path[depth].p_hdr->eh_max)) { /* * There are some space in the leaf tree, no * need to account for leaf block credit * * bitmaps and block group descriptor blocks * and other metadata blocks still need to be * accounted. */ /* 1 bitmap, 1 block group descriptor */ ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); return ret; } } return ext4_chunk_trans_blocks(inode, nrblocks); } /* * How many index/leaf blocks need to change/allocate to add @extents extents? * * If we add a single extent, then in the worse case, each tree level * index/leaf need to be changed in case of the tree split. * * If more extents are inserted, they could cause the whole tree split more * than once, but this is really rare. */ int ext4_ext_index_trans_blocks(struct inode *inode, int extents) { int index; int depth; /* If we are converting the inline data, only one is needed here. */ if (ext4_has_inline_data(inode)) return 1; depth = ext_depth(inode); if (extents <= 1) index = depth * 2; else index = depth * 3; return index; } static inline int get_default_free_blocks_flags(struct inode *inode) { if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; else if (ext4_should_journal_data(inode)) return EXT4_FREE_BLOCKS_FORGET; return 0; } static int ext4_remove_blocks(handle_t *handle, struct inode *inode, struct ext4_extent *ex, long long *partial_cluster, ext4_lblk_t from, ext4_lblk_t to) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); unsigned short ee_len = ext4_ext_get_actual_len(ex); ext4_fsblk_t pblk; int flags = get_default_free_blocks_flags(inode); /* * For bigalloc file systems, we never free a partial cluster * at the beginning of the extent. Instead, we make a note * that we tried freeing the cluster, and check to see if we * need to free it on a subsequent call to ext4_remove_blocks, * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space. */ flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster); /* * If we have a partial cluster, and it's different from the * cluster of the last block, we need to explicitly free the * partial cluster here. */ pblk = ext4_ext_pblock(ex) + ee_len - 1; if (*partial_cluster > 0 && *partial_cluster != (long long) EXT4_B2C(sbi, pblk)) { ext4_free_blocks(handle, inode, NULL, EXT4_C2B(sbi, *partial_cluster), sbi->s_cluster_ratio, flags); *partial_cluster = 0; } #ifdef EXTENTS_STATS { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); spin_lock(&sbi->s_ext_stats_lock); sbi->s_ext_blocks += ee_len; sbi->s_ext_extents++; if (ee_len < sbi->s_ext_min) sbi->s_ext_min = ee_len; if (ee_len > sbi->s_ext_max) sbi->s_ext_max = ee_len; if (ext_depth(inode) > sbi->s_depth_max) sbi->s_depth_max = ext_depth(inode); spin_unlock(&sbi->s_ext_stats_lock); } #endif if (from >= le32_to_cpu(ex->ee_block) && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { /* tail removal */ ext4_lblk_t num; long long first_cluster; num = le32_to_cpu(ex->ee_block) + ee_len - from; pblk = ext4_ext_pblock(ex) + ee_len - num; /* * Usually we want to free partial cluster at the end of the * extent, except for the situation when the cluster is still * used by any other extent (partial_cluster is negative). */ if (*partial_cluster < 0 && *partial_cluster == -(long long) EXT4_B2C(sbi, pblk+num-1)) flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; ext_debug("free last %u blocks starting %llu partial %lld\n", num, pblk, *partial_cluster); ext4_free_blocks(handle, inode, NULL, pblk, num, flags); /* * If the block range to be freed didn't start at the * beginning of a cluster, and we removed the entire * extent and the cluster is not used by any other extent, * save the partial cluster here, since we might need to * delete if we determine that the truncate or punch hole * operation has removed all of the blocks in the cluster. * If that cluster is used by another extent, preserve its * negative value so it isn't freed later on. * * If the whole extent wasn't freed, we've reached the * start of the truncated/punched region and have finished * removing blocks. If there's a partial cluster here it's * shared with the remainder of the extent and is no longer * a candidate for removal. */ if (EXT4_PBLK_COFF(sbi, pblk) && ee_len == num) { first_cluster = (long long) EXT4_B2C(sbi, pblk); if (first_cluster != -*partial_cluster) *partial_cluster = first_cluster; } else { *partial_cluster = 0; } } else ext4_error(sbi->s_sb, "strange request: removal(2) " "%u-%u from %u:%u\n", from, to, le32_to_cpu(ex->ee_block), ee_len); return 0; } /* * ext4_ext_rm_leaf() Removes the extents associated with the * blocks appearing between "start" and "end". Both "start" * and "end" must appear in the same extent or EIO is returned. * * @handle: The journal handle * @inode: The files inode * @path: The path to the leaf * @partial_cluster: The cluster which we'll have to free if all extents * has been released from it. However, if this value is * negative, it's a cluster just to the right of the * punched region and it must not be freed. * @start: The first block to remove * @end: The last block to remove */ static int ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, long long *partial_cluster, ext4_lblk_t start, ext4_lblk_t end) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); int err = 0, correct_index = 0; int depth = ext_depth(inode), credits; struct ext4_extent_header *eh; ext4_lblk_t a, b; unsigned num; ext4_lblk_t ex_ee_block; unsigned short ex_ee_len; unsigned unwritten = 0; struct ext4_extent *ex; ext4_fsblk_t pblk; /* the header must be checked already in ext4_ext_remove_space() */ ext_debug("truncate since %u in leaf to %u\n", start, end); if (!path[depth].p_hdr) path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); eh = path[depth].p_hdr; if (unlikely(path[depth].p_hdr == NULL)) { EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); return -EFSCORRUPTED; } /* find where to start removing */ ex = path[depth].p_ext; if (!ex) ex = EXT_LAST_EXTENT(eh); ex_ee_block = le32_to_cpu(ex->ee_block); ex_ee_len = ext4_ext_get_actual_len(ex); trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster); while (ex >= EXT_FIRST_EXTENT(eh) && ex_ee_block + ex_ee_len > start) { if (ext4_ext_is_unwritten(ex)) unwritten = 1; else unwritten = 0; ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, unwritten, ex_ee_len); path[depth].p_ext = ex; a = ex_ee_block > start ? ex_ee_block : start; b = ex_ee_block+ex_ee_len - 1 < end ? ex_ee_block+ex_ee_len - 1 : end; ext_debug(" border %u:%u\n", a, b); /* If this extent is beyond the end of the hole, skip it */ if (end < ex_ee_block) { /* * We're going to skip this extent and move to another, * so note that its first cluster is in use to avoid * freeing it when removing blocks. Eventually, the * right edge of the truncated/punched region will * be just to the left. */ if (sbi->s_cluster_ratio > 1) { pblk = ext4_ext_pblock(ex); *partial_cluster = -(long long) EXT4_B2C(sbi, pblk); } ex--; ex_ee_block = le32_to_cpu(ex->ee_block); ex_ee_len = ext4_ext_get_actual_len(ex); continue; } else if (b != ex_ee_block + ex_ee_len - 1) { EXT4_ERROR_INODE(inode, "can not handle truncate %u:%u " "on extent %u:%u", start, end, ex_ee_block, ex_ee_block + ex_ee_len - 1); err = -EFSCORRUPTED; goto out; } else if (a != ex_ee_block) { /* remove tail of the extent */ num = a - ex_ee_block; } else { /* remove whole extent: excellent! */ num = 0; } /* * 3 for leaf, sb, and inode plus 2 (bmap and group * descriptor) for each block group; assume two block * groups plus ex_ee_len/blocks_per_block_group for * the worst case */ credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); if (ex == EXT_FIRST_EXTENT(eh)) { correct_index = 1; credits += (ext_depth(inode)) + 1; } credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); err = ext4_ext_truncate_extend_restart(handle, inode, credits); if (err) goto out; err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; err = ext4_remove_blocks(handle, inode, ex, partial_cluster, a, b); if (err) goto out; if (num == 0) /* this extent is removed; mark slot entirely unused */ ext4_ext_store_pblock(ex, 0); ex->ee_len = cpu_to_le16(num); /* * Do not mark unwritten if all the blocks in the * extent have been removed. */ if (unwritten && num) ext4_ext_mark_unwritten(ex); /* * If the extent was completely released, * we need to remove it from the leaf */ if (num == 0) { if (end != EXT_MAX_BLOCKS - 1) { /* * For hole punching, we need to scoot all the * extents up when an extent is removed so that * we dont have blank extents in the middle */ memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * sizeof(struct ext4_extent)); /* Now get rid of the one at the end */ memset(EXT_LAST_EXTENT(eh), 0, sizeof(struct ext4_extent)); } le16_add_cpu(&eh->eh_entries, -1); } err = ext4_ext_dirty(handle, inode, path + depth); if (err) goto out; ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num, ext4_ext_pblock(ex)); ex--; ex_ee_block = le32_to_cpu(ex->ee_block); ex_ee_len = ext4_ext_get_actual_len(ex); } if (correct_index && eh->eh_entries) err = ext4_ext_correct_indexes(handle, inode, path); /* * If there's a partial cluster and at least one extent remains in * the leaf, free the partial cluster if it isn't shared with the * current extent. If it is shared with the current extent * we zero partial_cluster because we've reached the start of the * truncated/punched region and we're done removing blocks. */ if (*partial_cluster > 0 && ex >= EXT_FIRST_EXTENT(eh)) { pblk = ext4_ext_pblock(ex) + ex_ee_len - 1; if (*partial_cluster != (long long) EXT4_B2C(sbi, pblk)) { ext4_free_blocks(handle, inode, NULL, EXT4_C2B(sbi, *partial_cluster), sbi->s_cluster_ratio, get_default_free_blocks_flags(inode)); } *partial_cluster = 0; } /* if this leaf is free, then we should * remove it from index block above */ if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) err = ext4_ext_rm_idx(handle, inode, path, depth); out: return err; } /* * ext4_ext_more_to_rm: * returns 1 if current index has to be freed (even partial) */ static int ext4_ext_more_to_rm(struct ext4_ext_path *path) { BUG_ON(path->p_idx == NULL); if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) return 0; /* * if truncate on deeper level happened, it wasn't partial, * so we have to consider current index for truncation */ if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) return 0; return 1; } int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, ext4_lblk_t end) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); int depth = ext_depth(inode); struct ext4_ext_path *path = NULL; long long partial_cluster = 0; handle_t *handle; int i = 0, err = 0; ext_debug("truncate since %u to %u\n", start, end); /* probably first extent we're gonna free will be last in block */ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1); if (IS_ERR(handle)) return PTR_ERR(handle); again: trace_ext4_ext_remove_space(inode, start, end, depth); /* * Check if we are removing extents inside the extent tree. If that * is the case, we are going to punch a hole inside the extent tree * so we have to check whether we need to split the extent covering * the last block to remove so we can easily remove the part of it * in ext4_ext_rm_leaf(). */ if (end < EXT_MAX_BLOCKS - 1) { struct ext4_extent *ex; ext4_lblk_t ee_block, ex_end, lblk; ext4_fsblk_t pblk; /* find extent for or closest extent to this block */ path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE); if (IS_ERR(path)) { ext4_journal_stop(handle); return PTR_ERR(path); } depth = ext_depth(inode); /* Leaf not may not exist only if inode has no blocks at all */ ex = path[depth].p_ext; if (!ex) { if (depth) { EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); err = -EFSCORRUPTED; } goto out; } ee_block = le32_to_cpu(ex->ee_block); ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1; /* * See if the last block is inside the extent, if so split * the extent at 'end' block so we can easily remove the * tail of the first part of the split extent in * ext4_ext_rm_leaf(). */ if (end >= ee_block && end < ex_end) { /* * If we're going to split the extent, note that * the cluster containing the block after 'end' is * in use to avoid freeing it when removing blocks. */ if (sbi->s_cluster_ratio > 1) { pblk = ext4_ext_pblock(ex) + end - ee_block + 2; partial_cluster = -(long long) EXT4_B2C(sbi, pblk); } /* * Split the extent in two so that 'end' is the last * block in the first new extent. Also we should not * fail removing space due to ENOSPC so try to use * reserved block if that happens. */ err = ext4_force_split_extent_at(handle, inode, &path, end + 1, 1); if (err < 0) goto out; } else if (sbi->s_cluster_ratio > 1 && end >= ex_end) { /* * If there's an extent to the right its first cluster * contains the immediate right boundary of the * truncated/punched region. Set partial_cluster to * its negative value so it won't be freed if shared * with the current extent. The end < ee_block case * is handled in ext4_ext_rm_leaf(). */ lblk = ex_end + 1; err = ext4_ext_search_right(inode, path, &lblk, &pblk, &ex); if (err) goto out; if (pblk) partial_cluster = -(long long) EXT4_B2C(sbi, pblk); } } /* * We start scanning from right side, freeing all the blocks * after i_size and walking into the tree depth-wise. */ depth = ext_depth(inode); if (path) { int k = i = depth; while (--k > 0) path[k].p_block = le16_to_cpu(path[k].p_hdr->eh_entries)+1; } else { path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS); if (path == NULL) { ext4_journal_stop(handle); return -ENOMEM; } path[0].p_maxdepth = path[0].p_depth = depth; path[0].p_hdr = ext_inode_hdr(inode); i = 0; if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) { err = -EFSCORRUPTED; goto out; } } err = 0; while (i >= 0 && err == 0) { if (i == depth) { /* this is leaf block */ err = ext4_ext_rm_leaf(handle, inode, path, &partial_cluster, start, end); /* root level has p_bh == NULL, brelse() eats this */ brelse(path[i].p_bh); path[i].p_bh = NULL; i--; continue; } /* this is index block */ if (!path[i].p_hdr) { ext_debug("initialize header\n"); path[i].p_hdr = ext_block_hdr(path[i].p_bh); } if (!path[i].p_idx) { /* this level hasn't been touched yet */ path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; ext_debug("init index ptr: hdr 0x%p, num %d\n", path[i].p_hdr, le16_to_cpu(path[i].p_hdr->eh_entries)); } else { /* we were already here, see at next index */ path[i].p_idx--; } ext_debug("level %d - index, first 0x%p, cur 0x%p\n", i, EXT_FIRST_INDEX(path[i].p_hdr), path[i].p_idx); if (ext4_ext_more_to_rm(path + i)) { struct buffer_head *bh; /* go to the next level */ ext_debug("move to level %d (block %llu)\n", i + 1, ext4_idx_pblock(path[i].p_idx)); memset(path + i + 1, 0, sizeof(*path)); bh = read_extent_tree_block(inode, ext4_idx_pblock(path[i].p_idx), depth - i - 1, EXT4_EX_NOCACHE); if (IS_ERR(bh)) { /* should we reset i_size? */ err = PTR_ERR(bh); break; } /* Yield here to deal with large extent trees. * Should be a no-op if we did IO above. */ cond_resched(); if (WARN_ON(i + 1 > depth)) { err = -EFSCORRUPTED; break; } path[i + 1].p_bh = bh; /* save actual number of indexes since this * number is changed at the next iteration */ path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); i++; } else { /* we finished processing this index, go up */ if (path[i].p_hdr->eh_entries == 0 && i > 0) { /* index is empty, remove it; * handle must be already prepared by the * truncatei_leaf() */ err = ext4_ext_rm_idx(handle, inode, path, i); } /* root level has p_bh == NULL, brelse() eats this */ brelse(path[i].p_bh); path[i].p_bh = NULL; i--; ext_debug("return to level %d\n", i); } } trace_ext4_ext_remove_space_done(inode, start, end, depth, partial_cluster, path->p_hdr->eh_entries); /* * If we still have something in the partial cluster and we have removed * even the first extent, then we should free the blocks in the partial * cluster as well. (This code will only run when there are no leaves * to the immediate left of the truncated/punched region.) */ if (partial_cluster > 0 && err == 0) { /* don't zero partial_cluster since it's not used afterwards */ ext4_free_blocks(handle, inode, NULL, EXT4_C2B(sbi, partial_cluster), sbi->s_cluster_ratio, get_default_free_blocks_flags(inode)); } /* TODO: flexible tree reduction should be here */ if (path->p_hdr->eh_entries == 0) { /* * truncate to zero freed all the tree, * so we need to correct eh_depth */ err = ext4_ext_get_access(handle, inode, path); if (err == 0) { ext_inode_hdr(inode)->eh_depth = 0; ext_inode_hdr(inode)->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); err = ext4_ext_dirty(handle, inode, path); } } out: ext4_ext_drop_refs(path); kfree(path); path = NULL; if (err == -EAGAIN) goto again; ext4_journal_stop(handle); return err; } /* * called at mount time */ void ext4_ext_init(struct super_block *sb) { /* * possible initialization would be here */ if (ext4_has_feature_extents(sb)) { #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) printk(KERN_INFO "EXT4-fs: file extents enabled" #ifdef AGGRESSIVE_TEST ", aggressive tests" #endif #ifdef CHECK_BINSEARCH ", check binsearch" #endif #ifdef EXTENTS_STATS ", stats" #endif "\n"); #endif #ifdef EXTENTS_STATS spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); EXT4_SB(sb)->s_ext_min = 1 << 30; EXT4_SB(sb)->s_ext_max = 0; #endif } } /* * called at umount time */ void ext4_ext_release(struct super_block *sb) { if (!ext4_has_feature_extents(sb)) return; #ifdef EXTENTS_STATS if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { struct ext4_sb_info *sbi = EXT4_SB(sb); printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", sbi->s_ext_blocks, sbi->s_ext_extents, sbi->s_ext_blocks / sbi->s_ext_extents); printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); } #endif } static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex) { ext4_lblk_t ee_block; ext4_fsblk_t ee_pblock; unsigned int ee_len; ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); ee_pblock = ext4_ext_pblock(ex); if (ee_len == 0) return 0; return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, EXTENT_STATUS_WRITTEN); } /* FIXME!! we need to try to merge to left or right after zero-out */ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) { ext4_fsblk_t ee_pblock; unsigned int ee_len; int ret; ee_len = ext4_ext_get_actual_len(ex); ee_pblock = ext4_ext_pblock(ex); if (ext4_encrypted_inode(inode)) return ext4_encrypted_zeroout(inode, ex); ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS); if (ret > 0) ret = 0; return ret; } /* * ext4_split_extent_at() splits an extent at given block. * * @handle: the journal handle * @inode: the file inode * @path: the path to the extent * @split: the logical block where the extent is splitted. * @split_flags: indicates if the extent could be zeroout if split fails, and * the states(init or unwritten) of new extents. * @flags: flags used to insert new extent to extent tree. * * * Splits extent [a, b] into two extents [a, @split) and [@split, b], states * of which are deterimined by split_flag. * * There are two cases: * a> the extent are splitted into two extent. * b> split is not needed, and just mark the extent. * * return 0 on success. */ static int ext4_split_extent_at(handle_t *handle, struct inode *inode, struct ext4_ext_path **ppath, ext4_lblk_t split, int split_flag, int flags) { struct ext4_ext_path *path = *ppath; ext4_fsblk_t newblock; ext4_lblk_t ee_block; struct ext4_extent *ex, newex, orig_ex, zero_ex; struct ext4_extent *ex2 = NULL; unsigned int ee_len, depth; int err = 0; BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); ext_debug("ext4_split_extents_at: inode %lu, logical" "block %llu\n", inode->i_ino, (unsigned long long)split); ext4_ext_show_leaf(inode, path); depth = ext_depth(inode); ex = path[depth].p_ext; ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); newblock = split - ee_block + ext4_ext_pblock(ex); BUG_ON(split < ee_block || split >= (ee_block + ee_len)); BUG_ON(!ext4_ext_is_unwritten(ex) && split_flag & (EXT4_EXT_MAY_ZEROOUT | EXT4_EXT_MARK_UNWRIT1 | EXT4_EXT_MARK_UNWRIT2)); err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; if (split == ee_block) { /* * case b: block @split is the block that the extent begins with * then we just change the state of the extent, and splitting * is not needed. */ if (split_flag & EXT4_EXT_MARK_UNWRIT2) ext4_ext_mark_unwritten(ex); else ext4_ext_mark_initialized(ex); if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) ext4_ext_try_to_merge(handle, inode, path, ex); err = ext4_ext_dirty(handle, inode, path + path->p_depth); goto out; } /* case a */ memcpy(&orig_ex, ex, sizeof(orig_ex)); ex->ee_len = cpu_to_le16(split - ee_block); if (split_flag & EXT4_EXT_MARK_UNWRIT1) ext4_ext_mark_unwritten(ex); /* * path may lead to new leaf, not to original leaf any more * after ext4_ext_insert_extent() returns, */ err = ext4_ext_dirty(handle, inode, path + depth); if (err) goto fix_extent_len; ex2 = &newex; ex2->ee_block = cpu_to_le32(split); ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); ext4_ext_store_pblock(ex2, newblock); if (split_flag & EXT4_EXT_MARK_UNWRIT2) ext4_ext_mark_unwritten(ex2); err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { if (split_flag & EXT4_EXT_DATA_VALID1) { err = ext4_ext_zeroout(inode, ex2); zero_ex.ee_block = ex2->ee_block; zero_ex.ee_len = cpu_to_le16( ext4_ext_get_actual_len(ex2)); ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex2)); } else { err = ext4_ext_zeroout(inode, ex); zero_ex.ee_block = ex->ee_block; zero_ex.ee_len = cpu_to_le16( ext4_ext_get_actual_len(ex)); ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); } } else { err = ext4_ext_zeroout(inode, &orig_ex); zero_ex.ee_block = orig_ex.ee_block; zero_ex.ee_len = cpu_to_le16( ext4_ext_get_actual_len(&orig_ex)); ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(&orig_ex)); } if (err) goto fix_extent_len; /* update the extent length and mark as initialized */ ex->ee_len = cpu_to_le16(ee_len); ext4_ext_try_to_merge(handle, inode, path, ex); err = ext4_ext_dirty(handle, inode, path + path->p_depth); if (err) goto fix_extent_len; /* update extent status tree */ err = ext4_zeroout_es(inode, &zero_ex); goto out; } else if (err) goto fix_extent_len; out: ext4_ext_show_leaf(inode, path); return err; fix_extent_len: ex->ee_len = orig_ex.ee_len; ext4_ext_dirty(handle, inode, path + path->p_depth); return err; } /* * ext4_split_extents() splits an extent and mark extent which is covered * by @map as split_flags indicates * * It may result in splitting the extent into multiple extents (up to three) * There are three possibilities: * a> There is no split required * b> Splits in two extents: Split is happening at either end of the extent * c> Splits in three extents: Somone is splitting in middle of the extent * */ static int ext4_split_extent(handle_t *handle, struct inode *inode, struct ext4_ext_path **ppath, struct ext4_map_blocks *map, int split_flag, int flags) { struct ext4_ext_path *path = *ppath; ext4_lblk_t ee_block; struct ext4_extent *ex; unsigned int ee_len, depth; int err = 0; int unwritten; int split_flag1, flags1; int allocated = map->m_len; depth = ext_depth(inode); ex = path[depth].p_ext; ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); unwritten = ext4_ext_is_unwritten(ex); if (map->m_lblk + map->m_len < ee_block + ee_len) { split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT; flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; if (unwritten) split_flag1 |= EXT4_EXT_MARK_UNWRIT1 | EXT4_EXT_MARK_UNWRIT2; if (split_flag & EXT4_EXT_DATA_VALID2) split_flag1 |= EXT4_EXT_DATA_VALID1; err = ext4_split_extent_at(handle, inode, ppath, map->m_lblk + map->m_len, split_flag1, flags1); if (err) goto out; } else { allocated = ee_len - (map->m_lblk - ee_block); } /* * Update path is required because previous ext4_split_extent_at() may * result in split of original leaf or extent zeroout. */ path = ext4_find_extent(inode, map->m_lblk, ppath, 0); if (IS_ERR(path)) return PTR_ERR(path); depth = ext_depth(inode); ex = path[depth].p_ext; if (!ex) { EXT4_ERROR_INODE(inode, "unexpected hole at %lu", (unsigned long) map->m_lblk); return -EFSCORRUPTED; } unwritten = ext4_ext_is_unwritten(ex); split_flag1 = 0; if (map->m_lblk >= ee_block) { split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; if (unwritten) { split_flag1 |= EXT4_EXT_MARK_UNWRIT1; split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | EXT4_EXT_MARK_UNWRIT2); } err = ext4_split_extent_at(handle, inode, ppath, map->m_lblk, split_flag1, flags); if (err) goto out; } ext4_ext_show_leaf(inode, path); out: return err ? err : allocated; } /* * This function is called by ext4_ext_map_blocks() if someone tries to write * to an unwritten extent. It may result in splitting the unwritten * extent into multiple extents (up to three - one initialized and two * unwritten). * There are three possibilities: * a> There is no split required: Entire extent should be initialized * b> Splits in two extents: Write is happening at either end of the extent * c> Splits in three extents: Somone is writing in middle of the extent * * Pre-conditions: * - The extent pointed to by 'path' is unwritten. * - The extent pointed to by 'path' contains a superset * of the logical span [map->m_lblk, map->m_lblk + map->m_len). * * Post-conditions on success: * - the returned value is the number of blocks beyond map->l_lblk * that are allocated and initialized. * It is guaranteed to be >= map->m_len. */ static int ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, struct ext4_ext_path **ppath, int flags) { struct ext4_ext_path *path = *ppath; struct ext4_sb_info *sbi; struct ext4_extent_header *eh; struct ext4_map_blocks split_map; struct ext4_extent zero_ex; struct ext4_extent *ex, *abut_ex; ext4_lblk_t ee_block, eof_block; unsigned int ee_len, depth, map_len = map->m_len; int allocated = 0, max_zeroout = 0; int err = 0; int split_flag = 0; ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" "block %llu, max_blocks %u\n", inode->i_ino, (unsigned long long)map->m_lblk, map_len); sbi = EXT4_SB(inode->i_sb); eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; if (eof_block < map->m_lblk + map_len) eof_block = map->m_lblk + map_len; depth = ext_depth(inode); eh = path[depth].p_hdr; ex = path[depth].p_ext; ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); zero_ex.ee_len = 0; trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); /* Pre-conditions */ BUG_ON(!ext4_ext_is_unwritten(ex)); BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); /* * Attempt to transfer newly initialized blocks from the currently * unwritten extent to its neighbor. This is much cheaper * than an insertion followed by a merge as those involve costly * memmove() calls. Transferring to the left is the common case in * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE) * followed by append writes. * * Limitations of the current logic: * - L1: we do not deal with writes covering the whole extent. * This would require removing the extent if the transfer * is possible. * - L2: we only attempt to merge with an extent stored in the * same extent tree node. */ if ((map->m_lblk == ee_block) && /* See if we can merge left */ (map_len < ee_len) && /*L1*/ (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/ ext4_lblk_t prev_lblk; ext4_fsblk_t prev_pblk, ee_pblk; unsigned int prev_len; abut_ex = ex - 1; prev_lblk = le32_to_cpu(abut_ex->ee_block); prev_len = ext4_ext_get_actual_len(abut_ex); prev_pblk = ext4_ext_pblock(abut_ex); ee_pblk = ext4_ext_pblock(ex); /* * A transfer of blocks from 'ex' to 'abut_ex' is allowed * upon those conditions: * - C1: abut_ex is initialized, * - C2: abut_ex is logically abutting ex, * - C3: abut_ex is physically abutting ex, * - C4: abut_ex can receive the additional blocks without * overflowing the (initialized) length limit. */ if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ ((prev_lblk + prev_len) == ee_block) && /*C2*/ ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; trace_ext4_ext_convert_to_initialized_fastpath(inode, map, ex, abut_ex); /* Shift the start of ex by 'map_len' blocks */ ex->ee_block = cpu_to_le32(ee_block + map_len); ext4_ext_store_pblock(ex, ee_pblk + map_len); ex->ee_len = cpu_to_le16(ee_len - map_len); ext4_ext_mark_unwritten(ex); /* Restore the flag */ /* Extend abut_ex by 'map_len' blocks */ abut_ex->ee_len = cpu_to_le16(prev_len + map_len); /* Result: number of initialized blocks past m_lblk */ allocated = map_len; } } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) && (map_len < ee_len) && /*L1*/ ex < EXT_LAST_EXTENT(eh)) { /*L2*/ /* See if we can merge right */ ext4_lblk_t next_lblk; ext4_fsblk_t next_pblk, ee_pblk; unsigned int next_len; abut_ex = ex + 1; next_lblk = le32_to_cpu(abut_ex->ee_block); next_len = ext4_ext_get_actual_len(abut_ex); next_pblk = ext4_ext_pblock(abut_ex); ee_pblk = ext4_ext_pblock(ex); /* * A transfer of blocks from 'ex' to 'abut_ex' is allowed * upon those conditions: * - C1: abut_ex is initialized, * - C2: abut_ex is logically abutting ex, * - C3: abut_ex is physically abutting ex, * - C4: abut_ex can receive the additional blocks without * overflowing the (initialized) length limit. */ if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ ((map->m_lblk + map_len) == next_lblk) && /*C2*/ ((ee_pblk + ee_len) == next_pblk) && /*C3*/ (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; trace_ext4_ext_convert_to_initialized_fastpath(inode, map, ex, abut_ex); /* Shift the start of abut_ex by 'map_len' blocks */ abut_ex->ee_block = cpu_to_le32(next_lblk - map_len); ext4_ext_store_pblock(abut_ex, next_pblk - map_len); ex->ee_len = cpu_to_le16(ee_len - map_len); ext4_ext_mark_unwritten(ex); /* Restore the flag */ /* Extend abut_ex by 'map_len' blocks */ abut_ex->ee_len = cpu_to_le16(next_len + map_len); /* Result: number of initialized blocks past m_lblk */ allocated = map_len; } } if (allocated) { /* Mark the block containing both extents as dirty */ ext4_ext_dirty(handle, inode, path + depth); /* Update path to point to the right extent */ path[depth].p_ext = abut_ex; goto out; } else allocated = ee_len - (map->m_lblk - ee_block); WARN_ON(map->m_lblk < ee_block); /* * It is safe to convert extent to initialized via explicit * zeroout only if extent is fully inside i_size or new_size. */ split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; if (EXT4_EXT_MAY_ZEROOUT & split_flag) max_zeroout = sbi->s_extent_max_zeroout_kb >> (inode->i_sb->s_blocksize_bits - 10); if (ext4_encrypted_inode(inode)) max_zeroout = 0; /* If extent is less than s_max_zeroout_kb, zeroout directly */ if (max_zeroout && (ee_len <= max_zeroout)) { err = ext4_ext_zeroout(inode, ex); if (err) goto out; zero_ex.ee_block = ex->ee_block; zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)); ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; ext4_ext_mark_initialized(ex); ext4_ext_try_to_merge(handle, inode, path, ex); err = ext4_ext_dirty(handle, inode, path + path->p_depth); goto out; } /* * four cases: * 1. split the extent into three extents. * 2. split the extent into two extents, zeroout the first half. * 3. split the extent into two extents, zeroout the second half. * 4. split the extent into two extents with out zeroout. */ split_map.m_lblk = map->m_lblk; split_map.m_len = map->m_len; if (max_zeroout && (allocated > map->m_len)) { if (allocated <= max_zeroout) { /* case 3 */ zero_ex.ee_block = cpu_to_le32(map->m_lblk); zero_ex.ee_len = cpu_to_le16(allocated); ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex) + map->m_lblk - ee_block); err = ext4_ext_zeroout(inode, &zero_ex); if (err) goto out; split_map.m_lblk = map->m_lblk; split_map.m_len = allocated; } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) { /* case 2 */ if (map->m_lblk != ee_block) { zero_ex.ee_block = ex->ee_block; zero_ex.ee_len = cpu_to_le16(map->m_lblk - ee_block); ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); err = ext4_ext_zeroout(inode, &zero_ex); if (err) goto out; } split_map.m_lblk = ee_block; split_map.m_len = map->m_lblk - ee_block + map->m_len; allocated = map->m_len; } } err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag, flags); if (err > 0) err = 0; out: /* If we have gotten a failure, don't zero out status tree */ if (!err) err = ext4_zeroout_es(inode, &zero_ex); return err ? err : allocated; } /* * This function is called by ext4_ext_map_blocks() from * ext4_get_blocks_dio_write() when DIO to write * to an unwritten extent. * * Writing to an unwritten extent may result in splitting the unwritten * extent into multiple initialized/unwritten extents (up to three) * There are three possibilities: * a> There is no split required: Entire extent should be unwritten * b> Splits in two extents: Write is happening at either end of the extent * c> Splits in three extents: Somone is writing in middle of the extent * * This works the same way in the case of initialized -> unwritten conversion. * * One of more index blocks maybe needed if the extent tree grow after * the unwritten extent split. To prevent ENOSPC occur at the IO * complete, we need to split the unwritten extent before DIO submit * the IO. The unwritten extent called at this time will be split * into three unwritten extent(at most). After IO complete, the part * being filled will be convert to initialized by the end_io callback function * via ext4_convert_unwritten_extents(). * * Returns the size of unwritten extent to be written on success. */ static int ext4_split_convert_extents(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, struct ext4_ext_path **ppath, int flags) { struct ext4_ext_path *path = *ppath; ext4_lblk_t eof_block; ext4_lblk_t ee_block; struct ext4_extent *ex; unsigned int ee_len; int split_flag = 0, depth; ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n", __func__, inode->i_ino, (unsigned long long)map->m_lblk, map->m_len); eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; if (eof_block < map->m_lblk + map->m_len) eof_block = map->m_lblk + map->m_len; /* * It is safe to convert extent to initialized via explicit * zeroout only if extent is fully insde i_size or new_size. */ depth = ext_depth(inode); ex = path[depth].p_ext; ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); /* Convert to unwritten */ if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) { split_flag |= EXT4_EXT_DATA_VALID1; /* Convert to initialized */ } else if (flags & EXT4_GET_BLOCKS_CONVERT) { split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2); } flags |= EXT4_GET_BLOCKS_PRE_IO; return ext4_split_extent(handle, inode, ppath, map, split_flag, flags); } static int ext4_convert_unwritten_extents_endio(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, struct ext4_ext_path **ppath) { struct ext4_ext_path *path = *ppath; struct ext4_extent *ex; ext4_lblk_t ee_block; unsigned int ee_len; int depth; int err = 0; depth = ext_depth(inode); ex = path[depth].p_ext; ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" "block %llu, max_blocks %u\n", inode->i_ino, (unsigned long long)ee_block, ee_len); /* If extent is larger than requested it is a clear sign that we still * have some extent state machine issues left. So extent_split is still * required. * TODO: Once all related issues will be fixed this situation should be * illegal. */ if (ee_block != map->m_lblk || ee_len > map->m_len) { #ifdef EXT4_DEBUG ext4_warning("Inode (%ld) finished: extent logical block %llu," " len %u; IO logical block %llu, len %u\n", inode->i_ino, (unsigned long long)ee_block, ee_len, (unsigned long long)map->m_lblk, map->m_len); #endif err = ext4_split_convert_extents(handle, inode, map, ppath, EXT4_GET_BLOCKS_CONVERT); if (err < 0) return err; path = ext4_find_extent(inode, map->m_lblk, ppath, 0); if (IS_ERR(path)) return PTR_ERR(path); depth = ext_depth(inode); ex = path[depth].p_ext; } err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; /* first mark the extent as initialized */ ext4_ext_mark_initialized(ex); /* note: ext4_ext_correct_indexes() isn't needed here because * borders are not changed */ ext4_ext_try_to_merge(handle, inode, path, ex); /* Mark modified extent as dirty */ err = ext4_ext_dirty(handle, inode, path + path->p_depth); out: ext4_ext_show_leaf(inode, path); return err; } static void unmap_underlying_metadata_blocks(struct block_device *bdev, sector_t block, int count) { int i; for (i = 0; i < count; i++) unmap_underlying_metadata(bdev, block + i); } /* * Handle EOFBLOCKS_FL flag, clearing it if necessary */ static int check_eofblocks_fl(handle_t *handle, struct inode *inode, ext4_lblk_t lblk, struct ext4_ext_path *path, unsigned int len) { int i, depth; struct ext4_extent_header *eh; struct ext4_extent *last_ex; if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) return 0; depth = ext_depth(inode); eh = path[depth].p_hdr; /* * We're going to remove EOFBLOCKS_FL entirely in future so we * do not care for this case anymore. Simply remove the flag * if there are no extents. */ if (unlikely(!eh->eh_entries)) goto out; last_ex = EXT_LAST_EXTENT(eh); /* * We should clear the EOFBLOCKS_FL flag if we are writing the * last block in the last extent in the file. We test this by * first checking to see if the caller to * ext4_ext_get_blocks() was interested in the last block (or * a block beyond the last block) in the current extent. If * this turns out to be false, we can bail out from this * function immediately. */ if (lblk + len < le32_to_cpu(last_ex->ee_block) + ext4_ext_get_actual_len(last_ex)) return 0; /* * If the caller does appear to be planning to write at or * beyond the end of the current extent, we then test to see * if the current extent is the last extent in the file, by * checking to make sure it was reached via the rightmost node * at each level of the tree. */ for (i = depth-1; i >= 0; i--) if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) return 0; out: ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); return ext4_mark_inode_dirty(handle, inode); } /** * ext4_find_delalloc_range: find delayed allocated block in the given range. * * Return 1 if there is a delalloc block in the range, otherwise 0. */ int ext4_find_delalloc_range(struct inode *inode, ext4_lblk_t lblk_start, ext4_lblk_t lblk_end) { struct extent_status es; ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es); if (es.es_len == 0) return 0; /* there is no delay extent in this tree */ else if (es.es_lblk <= lblk_start && lblk_start < es.es_lblk + es.es_len) return 1; else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end) return 1; else return 0; } int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); ext4_lblk_t lblk_start, lblk_end; lblk_start = EXT4_LBLK_CMASK(sbi, lblk); lblk_end = lblk_start + sbi->s_cluster_ratio - 1; return ext4_find_delalloc_range(inode, lblk_start, lblk_end); } /** * Determines how many complete clusters (out of those specified by the 'map') * are under delalloc and were reserved quota for. * This function is called when we are writing out the blocks that were * originally written with their allocation delayed, but then the space was * allocated using fallocate() before the delayed allocation could be resolved. * The cases to look for are: * ('=' indicated delayed allocated blocks * '-' indicates non-delayed allocated blocks) * (a) partial clusters towards beginning and/or end outside of allocated range * are not delalloc'ed. * Ex: * |----c---=|====c====|====c====|===-c----| * |++++++ allocated ++++++| * ==> 4 complete clusters in above example * * (b) partial cluster (outside of allocated range) towards either end is * marked for delayed allocation. In this case, we will exclude that * cluster. * Ex: * |----====c========|========c========| * |++++++ allocated ++++++| * ==> 1 complete clusters in above example * * Ex: * |================c================| * |++++++ allocated ++++++| * ==> 0 complete clusters in above example * * The ext4_da_update_reserve_space will be called only if we * determine here that there were some "entire" clusters that span * this 'allocated' range. * In the non-bigalloc case, this function will just end up returning num_blks * without ever calling ext4_find_delalloc_range. */ static unsigned int get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, unsigned int num_blks) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); ext4_lblk_t alloc_cluster_start, alloc_cluster_end; ext4_lblk_t lblk_from, lblk_to, c_offset; unsigned int allocated_clusters = 0; alloc_cluster_start = EXT4_B2C(sbi, lblk_start); alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1); /* max possible clusters for this allocation */ allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1; trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); /* Check towards left side */ c_offset = EXT4_LBLK_COFF(sbi, lblk_start); if (c_offset) { lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start); lblk_to = lblk_from + c_offset - 1; if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) allocated_clusters--; } /* Now check towards right. */ c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks); if (allocated_clusters && c_offset) { lblk_from = lblk_start + num_blks; lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) allocated_clusters--; } return allocated_clusters; } static int convert_initialized_extent(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, struct ext4_ext_path **ppath, int flags, unsigned int allocated, ext4_fsblk_t newblock) { struct ext4_ext_path *path = *ppath; struct ext4_extent *ex; ext4_lblk_t ee_block; unsigned int ee_len; int depth; int err = 0; /* * Make sure that the extent is no bigger than we support with * unwritten extent */ if (map->m_len > EXT_UNWRITTEN_MAX_LEN) map->m_len = EXT_UNWRITTEN_MAX_LEN / 2; depth = ext_depth(inode); ex = path[depth].p_ext; ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); ext_debug("%s: inode %lu, logical" "block %llu, max_blocks %u\n", __func__, inode->i_ino, (unsigned long long)ee_block, ee_len); if (ee_block != map->m_lblk || ee_len > map->m_len) { err = ext4_split_convert_extents(handle, inode, map, ppath, EXT4_GET_BLOCKS_CONVERT_UNWRITTEN); if (err < 0) return err; path = ext4_find_extent(inode, map->m_lblk, ppath, 0); if (IS_ERR(path)) return PTR_ERR(path); depth = ext_depth(inode); ex = path[depth].p_ext; if (!ex) { EXT4_ERROR_INODE(inode, "unexpected hole at %lu", (unsigned long) map->m_lblk); return -EFSCORRUPTED; } } err = ext4_ext_get_access(handle, inode, path + depth); if (err) return err; /* first mark the extent as unwritten */ ext4_ext_mark_unwritten(ex); /* note: ext4_ext_correct_indexes() isn't needed here because * borders are not changed */ ext4_ext_try_to_merge(handle, inode, path, ex); /* Mark modified extent as dirty */ err = ext4_ext_dirty(handle, inode, path + path->p_depth); if (err) return err; ext4_ext_show_leaf(inode, path); ext4_update_inode_fsync_trans(handle, inode, 1); err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len); if (err) return err; map->m_flags |= EXT4_MAP_UNWRITTEN; if (allocated > map->m_len) allocated = map->m_len; map->m_len = allocated; return allocated; } static int ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, struct ext4_ext_path **ppath, int flags, unsigned int allocated, ext4_fsblk_t newblock) { struct ext4_ext_path *path = *ppath; int ret = 0; int err = 0; ext4_io_end_t *io = ext4_inode_aio(inode); ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical " "block %llu, max_blocks %u, flags %x, allocated %u\n", inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, flags, allocated); ext4_ext_show_leaf(inode, path); /* * When writing into unwritten space, we should not fail to * allocate metadata blocks for the new extent block if needed. */ flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL; trace_ext4_ext_handle_unwritten_extents(inode, map, flags, allocated, newblock); /* get_block() before submit the IO, split the extent */ if (flags & EXT4_GET_BLOCKS_PRE_IO) { ret = ext4_split_convert_extents(handle, inode, map, ppath, flags | EXT4_GET_BLOCKS_CONVERT); if (ret <= 0) goto out; /* * Flag the inode(non aio case) or end_io struct (aio case) * that this IO needs to conversion to written when IO is * completed */ if (io) ext4_set_io_unwritten_flag(inode, io); else ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); map->m_flags |= EXT4_MAP_UNWRITTEN; goto out; } /* IO end_io complete, convert the filled extent to written */ if (flags & EXT4_GET_BLOCKS_CONVERT) { ret = ext4_convert_unwritten_extents_endio(handle, inode, map, ppath); if (ret >= 0) { ext4_update_inode_fsync_trans(handle, inode, 1); err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len); } else err = ret; map->m_flags |= EXT4_MAP_MAPPED; map->m_pblk = newblock; if (allocated > map->m_len) allocated = map->m_len; map->m_len = allocated; goto out2; } /* buffered IO case */ /* * repeat fallocate creation request * we already have an unwritten extent */ if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { map->m_flags |= EXT4_MAP_UNWRITTEN; goto map_out; } /* buffered READ or buffered write_begin() lookup */ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { /* * We have blocks reserved already. We * return allocated blocks so that delalloc * won't do block reservation for us. But * the buffer head will be unmapped so that * a read from the block returns 0s. */ map->m_flags |= EXT4_MAP_UNWRITTEN; goto out1; } /* buffered write, writepage time, convert*/ ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags); if (ret >= 0) ext4_update_inode_fsync_trans(handle, inode, 1); out: if (ret <= 0) { err = ret; goto out2; } else allocated = ret; map->m_flags |= EXT4_MAP_NEW; /* * if we allocated more blocks than requested * we need to make sure we unmap the extra block * allocated. The actual needed block will get * unmapped later when we find the buffer_head marked * new. */ if (allocated > map->m_len) { unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, newblock + map->m_len, allocated - map->m_len); allocated = map->m_len; } map->m_len = allocated; /* * If we have done fallocate with the offset that is already * delayed allocated, we would have block reservation * and quota reservation done in the delayed write path. * But fallocate would have already updated quota and block * count for this offset. So cancel these reservation */ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { unsigned int reserved_clusters; reserved_clusters = get_reserved_cluster_alloc(inode, map->m_lblk, map->m_len); if (reserved_clusters) ext4_da_update_reserve_space(inode, reserved_clusters, 0); } map_out: map->m_flags |= EXT4_MAP_MAPPED; if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) { err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len); if (err < 0) goto out2; } out1: if (allocated > map->m_len) allocated = map->m_len; ext4_ext_show_leaf(inode, path); map->m_pblk = newblock; map->m_len = allocated; out2: return err ? err : allocated; } /* * get_implied_cluster_alloc - check to see if the requested * allocation (in the map structure) overlaps with a cluster already * allocated in an extent. * @sb The filesystem superblock structure * @map The requested lblk->pblk mapping * @ex The extent structure which might contain an implied * cluster allocation * * This function is called by ext4_ext_map_blocks() after we failed to * find blocks that were already in the inode's extent tree. Hence, * we know that the beginning of the requested region cannot overlap * the extent from the inode's extent tree. There are three cases we * want to catch. The first is this case: * * |--- cluster # N--| * |--- extent ---| |---- requested region ---| * |==========| * * The second case that we need to test for is this one: * * |--------- cluster # N ----------------| * |--- requested region --| |------- extent ----| * |=======================| * * The third case is when the requested region lies between two extents * within the same cluster: * |------------- cluster # N-------------| * |----- ex -----| |---- ex_right ----| * |------ requested region ------| * |================| * * In each of the above cases, we need to set the map->m_pblk and * map->m_len so it corresponds to the return the extent labelled as * "|====|" from cluster #N, since it is already in use for data in * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to * signal to ext4_ext_map_blocks() that map->m_pblk should be treated * as a new "allocated" block region. Otherwise, we will return 0 and * ext4_ext_map_blocks() will then allocate one or more new clusters * by calling ext4_mb_new_blocks(). */ static int get_implied_cluster_alloc(struct super_block *sb, struct ext4_map_blocks *map, struct ext4_extent *ex, struct ext4_ext_path *path) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); ext4_lblk_t ex_cluster_start, ex_cluster_end; ext4_lblk_t rr_cluster_start; ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); ext4_fsblk_t ee_start = ext4_ext_pblock(ex); unsigned short ee_len = ext4_ext_get_actual_len(ex); /* The extent passed in that we are trying to match */ ex_cluster_start = EXT4_B2C(sbi, ee_block); ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); /* The requested region passed into ext4_map_blocks() */ rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); if ((rr_cluster_start == ex_cluster_end) || (rr_cluster_start == ex_cluster_start)) { if (rr_cluster_start == ex_cluster_end) ee_start += ee_len - 1; map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; map->m_len = min(map->m_len, (unsigned) sbi->s_cluster_ratio - c_offset); /* * Check for and handle this case: * * |--------- cluster # N-------------| * |------- extent ----| * |--- requested region ---| * |===========| */ if (map->m_lblk < ee_block) map->m_len = min(map->m_len, ee_block - map->m_lblk); /* * Check for the case where there is already another allocated * block to the right of 'ex' but before the end of the cluster. * * |------------- cluster # N-------------| * |----- ex -----| |---- ex_right ----| * |------ requested region ------| * |================| */ if (map->m_lblk > ee_block) { ext4_lblk_t next = ext4_ext_next_allocated_block(path); map->m_len = min(map->m_len, next - map->m_lblk); } trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); return 1; } trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); return 0; } /* * Block allocation/map/preallocation routine for extents based files * * * Need to be called with * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) * * return > 0, number of of blocks already mapped/allocated * if create == 0 and these are pre-allocated blocks * buffer head is unmapped * otherwise blocks are mapped * * return = 0, if plain look up failed (blocks have not been allocated) * buffer head is unmapped * * return < 0, error case. */ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags) { struct ext4_ext_path *path = NULL; struct ext4_extent newex, *ex, *ex2; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); ext4_fsblk_t newblock = 0; int free_on_err = 0, err = 0, depth, ret; unsigned int allocated = 0, offset = 0; unsigned int allocated_clusters = 0; struct ext4_allocation_request ar; ext4_io_end_t *io = ext4_inode_aio(inode); ext4_lblk_t cluster_offset; int set_unwritten = 0; bool map_from_cluster = false; ext_debug("blocks %u/%u requested for inode %lu\n", map->m_lblk, map->m_len, inode->i_ino); trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); /* find extent for this block */ path = ext4_find_extent(inode, map->m_lblk, NULL, 0); if (IS_ERR(path)) { err = PTR_ERR(path); path = NULL; goto out2; } depth = ext_depth(inode); /* * consistent leaf must not be empty; * this situation is possible, though, _during_ tree modification; * this is why assert can't be put in ext4_find_extent() */ if (unlikely(path[depth].p_ext == NULL && depth != 0)) { EXT4_ERROR_INODE(inode, "bad extent address " "lblock: %lu, depth: %d pblock %lld", (unsigned long) map->m_lblk, depth, path[depth].p_block); err = -EFSCORRUPTED; goto out2; } ex = path[depth].p_ext; if (ex) { ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); ext4_fsblk_t ee_start = ext4_ext_pblock(ex); unsigned short ee_len; /* * unwritten extents are treated as holes, except that * we split out initialized portions during a write. */ ee_len = ext4_ext_get_actual_len(ex); trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); /* if found extent covers block, simply return it */ if (in_range(map->m_lblk, ee_block, ee_len)) { newblock = map->m_lblk - ee_block + ee_start; /* number of remaining blocks in the extent */ allocated = ee_len - (map->m_lblk - ee_block); ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, ee_block, ee_len, newblock); /* * If the extent is initialized check whether the * caller wants to convert it to unwritten. */ if ((!ext4_ext_is_unwritten(ex)) && (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { allocated = convert_initialized_extent( handle, inode, map, &path, flags, allocated, newblock); goto out2; } else if (!ext4_ext_is_unwritten(ex)) goto out; ret = ext4_ext_handle_unwritten_extents( handle, inode, map, &path, flags, allocated, newblock); if (ret < 0) err = ret; else allocated = ret; goto out2; } } /* * requested block isn't allocated yet; * we couldn't try to create block if create flag is zero */ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { /* * put just found gap into cache to speed up * subsequent requests */ ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); goto out2; } /* * Okay, we need to do block allocation. */ newex.ee_block = cpu_to_le32(map->m_lblk); cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); /* * If we are doing bigalloc, check to see if the extent returned * by ext4_find_extent() implies a cluster we can use. */ if (cluster_offset && ex && get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { ar.len = allocated = map->m_len; newblock = map->m_pblk; map_from_cluster = true; goto got_allocated_blocks; } /* find neighbour allocated blocks */ ar.lleft = map->m_lblk; err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); if (err) goto out2; ar.lright = map->m_lblk; ex2 = NULL; err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); if (err) goto out2; /* Check if the extent after searching to the right implies a * cluster we can use. */ if ((sbi->s_cluster_ratio > 1) && ex2 && get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { ar.len = allocated = map->m_len; newblock = map->m_pblk; map_from_cluster = true; goto got_allocated_blocks; } /* * See if request is beyond maximum number of blocks we can have in * a single extent. For an initialized extent this limit is * EXT_INIT_MAX_LEN and for an unwritten extent this limit is * EXT_UNWRITTEN_MAX_LEN. */ if (map->m_len > EXT_INIT_MAX_LEN && !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) map->m_len = EXT_INIT_MAX_LEN; else if (map->m_len > EXT_UNWRITTEN_MAX_LEN && (flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) map->m_len = EXT_UNWRITTEN_MAX_LEN; /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ newex.ee_len = cpu_to_le16(map->m_len); err = ext4_ext_check_overlap(sbi, inode, &newex, path); if (err) allocated = ext4_ext_get_actual_len(&newex); else allocated = map->m_len; /* allocate new block */ ar.inode = inode; ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); ar.logical = map->m_lblk; /* * We calculate the offset from the beginning of the cluster * for the logical block number, since when we allocate a * physical cluster, the physical block should start at the * same offset from the beginning of the cluster. This is * needed so that future calls to get_implied_cluster_alloc() * work correctly. */ offset = EXT4_LBLK_COFF(sbi, map->m_lblk); ar.len = EXT4_NUM_B2C(sbi, offset+allocated); ar.goal -= offset; ar.logical -= offset; if (S_ISREG(inode->i_mode)) ar.flags = EXT4_MB_HINT_DATA; else /* disable in-core preallocation for non-regular files */ ar.flags = 0; if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) ar.flags |= EXT4_MB_HINT_NOPREALLOC; if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) ar.flags |= EXT4_MB_DELALLOC_RESERVED; if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) ar.flags |= EXT4_MB_USE_RESERVED; newblock = ext4_mb_new_blocks(handle, &ar, &err); if (!newblock) goto out2; ext_debug("allocate new block: goal %llu, found %llu/%u\n", ar.goal, newblock, allocated); free_on_err = 1; allocated_clusters = ar.len; ar.len = EXT4_C2B(sbi, ar.len) - offset; if (ar.len > allocated) ar.len = allocated; got_allocated_blocks: /* try to insert new extent into found leaf and return */ ext4_ext_store_pblock(&newex, newblock + offset); newex.ee_len = cpu_to_le16(ar.len); /* Mark unwritten */ if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){ ext4_ext_mark_unwritten(&newex); map->m_flags |= EXT4_MAP_UNWRITTEN; /* * io_end structure was created for every IO write to an * unwritten extent. To avoid unnecessary conversion, * here we flag the IO that really needs the conversion. * For non asycn direct IO case, flag the inode state * that we need to perform conversion when IO is done. */ if (flags & EXT4_GET_BLOCKS_PRE_IO) set_unwritten = 1; } err = 0; if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len); if (!err) err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags); if (!err && set_unwritten) { if (io) ext4_set_io_unwritten_flag(inode, io); else ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); } if (err && free_on_err) { int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ? EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0; /* free data blocks we just allocated */ /* not a good idea to call discard here directly, * but otherwise we'd need to call it every free() */ ext4_discard_preallocations(inode); ext4_free_blocks(handle, inode, NULL, newblock, EXT4_C2B(sbi, allocated_clusters), fb_flags); goto out2; } /* previous routine could use block we allocated */ newblock = ext4_ext_pblock(&newex); allocated = ext4_ext_get_actual_len(&newex); if (allocated > map->m_len) allocated = map->m_len; map->m_flags |= EXT4_MAP_NEW; /* * Update reserved blocks/metadata blocks after successful * block allocation which had been deferred till now. */ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { unsigned int reserved_clusters; /* * Check how many clusters we had reserved this allocated range */ reserved_clusters = get_reserved_cluster_alloc(inode, map->m_lblk, allocated); if (!map_from_cluster) { BUG_ON(allocated_clusters < reserved_clusters); if (reserved_clusters < allocated_clusters) { struct ext4_inode_info *ei = EXT4_I(inode); int reservation = allocated_clusters - reserved_clusters; /* * It seems we claimed few clusters outside of * the range of this allocation. We should give * it back to the reservation pool. This can * happen in the following case: * * * Suppose s_cluster_ratio is 4 (i.e., each * cluster has 4 blocks. Thus, the clusters * are [0-3],[4-7],[8-11]... * * First comes delayed allocation write for * logical blocks 10 & 11. Since there were no * previous delayed allocated blocks in the * range [8-11], we would reserve 1 cluster * for this write. * * Next comes write for logical blocks 3 to 8. * In this case, we will reserve 2 clusters * (for [0-3] and [4-7]; and not for [8-11] as * that range has a delayed allocated blocks. * Thus total reserved clusters now becomes 3. * * Now, during the delayed allocation writeout * time, we will first write blocks [3-8] and * allocate 3 clusters for writing these * blocks. Also, we would claim all these * three clusters above. * * Now when we come here to writeout the * blocks [10-11], we would expect to claim * the reservation of 1 cluster we had made * (and we would claim it since there are no * more delayed allocated blocks in the range * [8-11]. But our reserved cluster count had * already gone to 0. * * Thus, at the step 4 above when we determine * that there are still some unwritten delayed * allocated blocks outside of our current * block range, we should increment the * reserved clusters count so that when the * remaining blocks finally gets written, we * could claim them. */ dquot_reserve_block(inode, EXT4_C2B(sbi, reservation)); spin_lock(&ei->i_block_reservation_lock); ei->i_reserved_data_blocks += reservation; spin_unlock(&ei->i_block_reservation_lock); } /* * We will claim quota for all newly allocated blocks. * We're updating the reserved space *after* the * correction above so we do not accidentally free * all the metadata reservation because we might * actually need it later on. */ ext4_da_update_reserve_space(inode, allocated_clusters, 1); } } /* * Cache the extent and update transaction to commit on fdatasync only * when it is _not_ an unwritten extent. */ if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0) ext4_update_inode_fsync_trans(handle, inode, 1); else ext4_update_inode_fsync_trans(handle, inode, 0); out: if (allocated > map->m_len) allocated = map->m_len; ext4_ext_show_leaf(inode, path); map->m_flags |= EXT4_MAP_MAPPED; map->m_pblk = newblock; map->m_len = allocated; out2: ext4_ext_drop_refs(path); kfree(path); trace_ext4_ext_map_blocks_exit(inode, flags, map, err ? err : allocated); return err ? err : allocated; } void ext4_ext_truncate(handle_t *handle, struct inode *inode) { struct super_block *sb = inode->i_sb; ext4_lblk_t last_block; int err = 0; /* * TODO: optimization is possible here. * Probably we need not scan at all, * because page truncation is enough. */ /* we have to know where to truncate from in crash case */ EXT4_I(inode)->i_disksize = inode->i_size; ext4_mark_inode_dirty(handle, inode); last_block = (inode->i_size + sb->s_blocksize - 1) >> EXT4_BLOCK_SIZE_BITS(sb); retry: err = ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block); if (err == -ENOMEM) { cond_resched(); congestion_wait(BLK_RW_ASYNC, HZ/50); goto retry; } if (err) { ext4_std_error(inode->i_sb, err); return; } err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); ext4_std_error(inode->i_sb, err); } static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, ext4_lblk_t len, loff_t new_size, int flags, int mode) { struct inode *inode = file_inode(file); handle_t *handle; int ret = 0; int ret2 = 0; int retries = 0; int depth = 0; struct ext4_map_blocks map; unsigned int credits; loff_t epos; map.m_lblk = offset; map.m_len = len; /* * Don't normalize the request if it can fit in one extent so * that it doesn't get unnecessarily split into multiple * extents. */ if (len <= EXT_UNWRITTEN_MAX_LEN) flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; /* Wait all existing dio workers, newcomers will block on i_mutex */ ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); /* * credits to insert 1 extent into extent tree */ credits = ext4_chunk_trans_blocks(inode, len); /* * We can only call ext_depth() on extent based inodes */ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) depth = ext_depth(inode); else depth = -1; retry: while (ret >= 0 && len) { /* * Recalculate credits when extent tree depth changes. */ if (depth >= 0 && depth != ext_depth(inode)) { credits = ext4_chunk_trans_blocks(inode, len); depth = ext_depth(inode); } handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); break; } ret = ext4_map_blocks(handle, inode, &map, flags); if (ret <= 0) { ext4_debug("inode #%lu: block %u: len %u: " "ext4_ext_map_blocks returned %d", inode->i_ino, map.m_lblk, map.m_len, ret); ext4_mark_inode_dirty(handle, inode); ret2 = ext4_journal_stop(handle); break; } map.m_lblk += ret; map.m_len = len = len - ret; epos = (loff_t)map.m_lblk << inode->i_blkbits; inode->i_ctime = ext4_current_time(inode); if (new_size) { if (epos > new_size) epos = new_size; if (ext4_update_inode_size(inode, epos) & 0x1) inode->i_mtime = inode->i_ctime; } else { if (epos > inode->i_size) ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); } ext4_mark_inode_dirty(handle, inode); ret2 = ext4_journal_stop(handle); if (ret2) break; } if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) { ret = 0; goto retry; } ext4_inode_resume_unlocked_dio(inode); return ret > 0 ? ret2 : ret; } static long ext4_zero_range(struct file *file, loff_t offset, loff_t len, int mode) { struct inode *inode = file_inode(file); handle_t *handle = NULL; unsigned int max_blocks; loff_t new_size = 0; int ret = 0; int flags; int credits; int partial_begin, partial_end; loff_t start, end; ext4_lblk_t lblk; struct address_space *mapping = inode->i_mapping; unsigned int blkbits = inode->i_blkbits; trace_ext4_zero_range(inode, offset, len, mode); if (!S_ISREG(inode->i_mode)) return -EINVAL; /* Call ext4_force_commit to flush all data in case of data=journal. */ if (ext4_should_journal_data(inode)) { ret = ext4_force_commit(inode->i_sb); if (ret) return ret; } /* * Write out all dirty pages to avoid race conditions * Then release them. */ if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1); if (ret) return ret; } /* * Round up offset. This is not fallocate, we neet to zero out * blocks, so convert interior block aligned part of the range to * unwritten and possibly manually zero out unaligned parts of the * range. */ start = round_up(offset, 1 << blkbits); end = round_down((offset + len), 1 << blkbits); if (start < offset || end > offset + len) return -EINVAL; partial_begin = offset & ((1 << blkbits) - 1); partial_end = (offset + len) & ((1 << blkbits) - 1); lblk = start >> blkbits; max_blocks = (end >> blkbits); if (max_blocks < lblk) max_blocks = 0; else max_blocks -= lblk; mutex_lock(&inode->i_mutex); /* * Indirect files do not support unwritten extnets */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { ret = -EOPNOTSUPP; goto out_mutex; } if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > i_size_read(inode)) { new_size = offset + len; ret = inode_newsize_ok(inode, new_size); if (ret) goto out_mutex; } flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; if (mode & FALLOC_FL_KEEP_SIZE) flags |= EXT4_GET_BLOCKS_KEEP_SIZE; /* Preallocate the range including the unaligned edges */ if (partial_begin || partial_end) { ret = ext4_alloc_file_blocks(file, round_down(offset, 1 << blkbits) >> blkbits, (round_up((offset + len), 1 << blkbits) - round_down(offset, 1 << blkbits)) >> blkbits, new_size, flags, mode); if (ret) goto out_mutex; } /* Zero range excluding the unaligned edges */ if (max_blocks > 0) { flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | EXT4_EX_NOCACHE); /* Now release the pages and zero block aligned part of pages*/ truncate_pagecache_range(inode, start, end - 1); inode->i_mtime = inode->i_ctime = ext4_current_time(inode); /* Wait all existing dio workers, newcomers will block on i_mutex */ ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags, mode); if (ret) goto out_dio; } if (!partial_begin && !partial_end) goto out_dio; /* * In worst case we have to writeout two nonadjacent unwritten * blocks and update the inode */ credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1; if (ext4_should_journal_data(inode)) credits += 2; handle = ext4_journal_start(inode, EXT4_HT_MISC, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); ext4_std_error(inode->i_sb, ret); goto out_dio; } inode->i_mtime = inode->i_ctime = ext4_current_time(inode); if (new_size) { ext4_update_inode_size(inode, new_size); } else { /* * Mark that we allocate beyond EOF so the subsequent truncate * can proceed even if the new size is the same as i_size. */ if ((offset + len) > i_size_read(inode)) ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); } ext4_mark_inode_dirty(handle, inode); /* Zero out partial block at the edges of the range */ ret = ext4_zero_partial_blocks(handle, inode, offset, len); if (file->f_flags & O_SYNC) ext4_handle_sync(handle); ext4_journal_stop(handle); out_dio: ext4_inode_resume_unlocked_dio(inode); out_mutex: mutex_unlock(&inode->i_mutex); return ret; } /* * preallocate space for a file. This implements ext4's fallocate file * operation, which gets called from sys_fallocate system call. * For block-mapped files, posix_fallocate should fall back to the method * of writing zeroes to the required new blocks (the same behavior which is * expected for file systems which do not support fallocate() system call). */ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); loff_t new_size = 0; unsigned int max_blocks; int ret = 0; int flags; ext4_lblk_t lblk; unsigned int blkbits = inode->i_blkbits; /* * Encrypted inodes can't handle collapse range or insert * range since we would need to re-encrypt blocks with a * different IV or XTS tweak (which are based on the logical * block number). * * XXX It's not clear why zero range isn't working, but we'll * leave it disabled for encrypted inodes for now. This is a * bug we should fix.... */ if (ext4_encrypted_inode(inode) && (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE | FALLOC_FL_ZERO_RANGE))) return -EOPNOTSUPP; /* Return error if mode is not supported */ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)) return -EOPNOTSUPP; if (mode & FALLOC_FL_PUNCH_HOLE) return ext4_punch_hole(inode, offset, len); ret = ext4_convert_inline_data(inode); if (ret) return ret; if (mode & FALLOC_FL_COLLAPSE_RANGE) return ext4_collapse_range(inode, offset, len); if (mode & FALLOC_FL_INSERT_RANGE) return ext4_insert_range(inode, offset, len); if (mode & FALLOC_FL_ZERO_RANGE) return ext4_zero_range(file, offset, len, mode); trace_ext4_fallocate_enter(inode, offset, len, mode); lblk = offset >> blkbits; /* * We can't just convert len to max_blocks because * If blocksize = 4096 offset = 3072 and len = 2048 */ max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - lblk; flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; if (mode & FALLOC_FL_KEEP_SIZE) flags |= EXT4_GET_BLOCKS_KEEP_SIZE; mutex_lock(&inode->i_mutex); /* * We only support preallocation for extent-based files only */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { ret = -EOPNOTSUPP; goto out; } if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > i_size_read(inode)) { new_size = offset + len; ret = inode_newsize_ok(inode, new_size); if (ret) goto out; } ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags, mode); if (ret) goto out; if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) { ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal, EXT4_I(inode)->i_sync_tid); } out: mutex_unlock(&inode->i_mutex); trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); return ret; } /* * This function convert a range of blocks to written extents * The caller of this function will pass the start offset and the size. * all unwritten extents within this range will be converted to * written extents. * * This function is called from the direct IO end io call back * function, to convert the fallocated extents after IO is completed. * Returns 0 on success. */ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, loff_t offset, ssize_t len) { unsigned int max_blocks; int ret = 0; int ret2 = 0; struct ext4_map_blocks map; unsigned int credits, blkbits = inode->i_blkbits; map.m_lblk = offset >> blkbits; /* * We can't just convert len to max_blocks because * If blocksize = 4096 offset = 3072 and len = 2048 */ max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - map.m_lblk); /* * This is somewhat ugly but the idea is clear: When transaction is * reserved, everything goes into it. Otherwise we rather start several * smaller transactions for conversion of each extent separately. */ if (handle) { handle = ext4_journal_start_reserved(handle, EXT4_HT_EXT_CONVERT); if (IS_ERR(handle)) return PTR_ERR(handle); credits = 0; } else { /* * credits to insert 1 extent into extent tree */ credits = ext4_chunk_trans_blocks(inode, max_blocks); } while (ret >= 0 && ret < max_blocks) { map.m_lblk += ret; map.m_len = (max_blocks -= ret); if (credits) { handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); break; } } ret = ext4_map_blocks(handle, inode, &map, EXT4_GET_BLOCKS_IO_CONVERT_EXT); if (ret <= 0) ext4_warning(inode->i_sb, "inode #%lu: block %u: len %u: " "ext4_ext_map_blocks returned %d", inode->i_ino, map.m_lblk, map.m_len, ret); ext4_mark_inode_dirty(handle, inode); if (credits) ret2 = ext4_journal_stop(handle); if (ret <= 0 || ret2) break; } if (!credits) ret2 = ext4_journal_stop(handle); return ret > 0 ? ret2 : ret; } /* * If newes is not existing extent (newes->ec_pblk equals zero) find * delayed extent at start of newes and update newes accordingly and * return start of the next delayed extent. * * If newes is existing extent (newes->ec_pblk is not equal zero) * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed * extent found. Leave newes unmodified. */ static int ext4_find_delayed_extent(struct inode *inode, struct extent_status *newes) { struct extent_status es; ext4_lblk_t block, next_del; if (newes->es_pblk == 0) { ext4_es_find_delayed_extent_range(inode, newes->es_lblk, newes->es_lblk + newes->es_len - 1, &es); /* * No extent in extent-tree contains block @newes->es_pblk, * then the block may stay in 1)a hole or 2)delayed-extent. */ if (es.es_len == 0) /* A hole found. */ return 0; if (es.es_lblk > newes->es_lblk) { /* A hole found. */ newes->es_len = min(es.es_lblk - newes->es_lblk, newes->es_len); return 0; } newes->es_len = es.es_lblk + es.es_len - newes->es_lblk; } block = newes->es_lblk + newes->es_len; ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es); if (es.es_len == 0) next_del = EXT_MAX_BLOCKS; else next_del = es.es_lblk; return next_del; } /* fiemap flags we can handle specified here */ #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) static int ext4_xattr_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo) { __u64 physical = 0; __u64 length; __u32 flags = FIEMAP_EXTENT_LAST; int blockbits = inode->i_sb->s_blocksize_bits; int error = 0; /* in-inode? */ if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { struct ext4_iloc iloc; int offset; /* offset of xattr in inode */ error = ext4_get_inode_loc(inode, &iloc); if (error) return error; physical = (__u64)iloc.bh->b_blocknr << blockbits; offset = EXT4_GOOD_OLD_INODE_SIZE + EXT4_I(inode)->i_extra_isize; physical += offset; length = EXT4_SB(inode->i_sb)->s_inode_size - offset; flags |= FIEMAP_EXTENT_DATA_INLINE; brelse(iloc.bh); } else { /* external block */ physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; length = inode->i_sb->s_blocksize; } if (physical) error = fiemap_fill_next_extent(fieinfo, 0, physical, length, flags); return (error < 0 ? error : 0); } int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len) { ext4_lblk_t start_blk; int error = 0; if (ext4_has_inline_data(inode)) { int has_inline = 1; error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline, start, len); if (has_inline) return error; } if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { error = ext4_ext_precache(inode); if (error) return error; } /* fallback to generic here if not in extents fmt */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return generic_block_fiemap(inode, fieinfo, start, len, ext4_get_block); if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) return -EBADR; if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { error = ext4_xattr_fiemap(inode, fieinfo); } else { ext4_lblk_t len_blks; __u64 last_blk; start_blk = start >> inode->i_sb->s_blocksize_bits; last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; if (last_blk >= EXT_MAX_BLOCKS) last_blk = EXT_MAX_BLOCKS-1; len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; /* * Walk the extent tree gathering extent information * and pushing extents back to the user. */ error = ext4_fill_fiemap_extents(inode, start_blk, len_blks, fieinfo); } return error; } /* * ext4_access_path: * Function to access the path buffer for marking it dirty. * It also checks if there are sufficient credits left in the journal handle * to update path. */ static int ext4_access_path(handle_t *handle, struct inode *inode, struct ext4_ext_path *path) { int credits, err; if (!ext4_handle_valid(handle)) return 0; /* * Check if need to extend journal credits * 3 for leaf, sb, and inode plus 2 (bmap and group * descriptor) for each block group; assume two block * groups */ if (handle->h_buffer_credits < 7) { credits = ext4_writepage_trans_blocks(inode); err = ext4_ext_truncate_extend_restart(handle, inode, credits); /* EAGAIN is success */ if (err && err != -EAGAIN) return err; } err = ext4_ext_get_access(handle, inode, path); return err; } /* * ext4_ext_shift_path_extents: * Shift the extents of a path structure lying between path[depth].p_ext * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells * if it is right shift or left shift operation. */ static int ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift, struct inode *inode, handle_t *handle, enum SHIFT_DIRECTION SHIFT) { int depth, err = 0; struct ext4_extent *ex_start, *ex_last; bool update = 0; depth = path->p_depth; while (depth >= 0) { if (depth == path->p_depth) { ex_start = path[depth].p_ext; if (!ex_start) return -EFSCORRUPTED; ex_last = EXT_LAST_EXTENT(path[depth].p_hdr); err = ext4_access_path(handle, inode, path + depth); if (err) goto out; if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) update = 1; while (ex_start <= ex_last) { if (SHIFT == SHIFT_LEFT) { le32_add_cpu(&ex_start->ee_block, -shift); /* Try to merge to the left. */ if ((ex_start > EXT_FIRST_EXTENT(path[depth].p_hdr)) && ext4_ext_try_to_merge_right(inode, path, ex_start - 1)) ex_last--; else ex_start++; } else { le32_add_cpu(&ex_last->ee_block, shift); ext4_ext_try_to_merge_right(inode, path, ex_last); ex_last--; } } err = ext4_ext_dirty(handle, inode, path + depth); if (err) goto out; if (--depth < 0 || !update) break; } /* Update index too */ err = ext4_access_path(handle, inode, path + depth); if (err) goto out; if (SHIFT == SHIFT_LEFT) le32_add_cpu(&path[depth].p_idx->ei_block, -shift); else le32_add_cpu(&path[depth].p_idx->ei_block, shift); err = ext4_ext_dirty(handle, inode, path + depth); if (err) goto out; /* we are done if current index is not a starting index */ if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr)) break; depth--; } out: return err; } /* * ext4_ext_shift_extents: * All the extents which lies in the range from @start to the last allocated * block for the @inode are shifted either towards left or right (depending * upon @SHIFT) by @shift blocks. * On success, 0 is returned, error otherwise. */ static int ext4_ext_shift_extents(struct inode *inode, handle_t *handle, ext4_lblk_t start, ext4_lblk_t shift, enum SHIFT_DIRECTION SHIFT) { struct ext4_ext_path *path; int ret = 0, depth; struct ext4_extent *extent; ext4_lblk_t stop, *iterator, ex_start, ex_end; /* Let path point to the last extent */ path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0); if (IS_ERR(path)) return PTR_ERR(path); depth = path->p_depth; extent = path[depth].p_ext; if (!extent) goto out; stop = le32_to_cpu(extent->ee_block) + ext4_ext_get_actual_len(extent); /* * In case of left shift, Don't start shifting extents until we make * sure the hole is big enough to accommodate the shift. */ if (SHIFT == SHIFT_LEFT) { path = ext4_find_extent(inode, start - 1, &path, 0); if (IS_ERR(path)) return PTR_ERR(path); depth = path->p_depth; extent = path[depth].p_ext; if (extent) { ex_start = le32_to_cpu(extent->ee_block); ex_end = le32_to_cpu(extent->ee_block) + ext4_ext_get_actual_len(extent); } else { ex_start = 0; ex_end = 0; } if ((start == ex_start && shift > ex_start) || (shift > start - ex_end)) { ext4_ext_drop_refs(path); kfree(path); return -EINVAL; } } /* * In case of left shift, iterator points to start and it is increased * till we reach stop. In case of right shift, iterator points to stop * and it is decreased till we reach start. */ if (SHIFT == SHIFT_LEFT) iterator = &start; else iterator = &stop; /* Its safe to start updating extents */ while (start < stop) { path = ext4_find_extent(inode, *iterator, &path, 0); if (IS_ERR(path)) return PTR_ERR(path); depth = path->p_depth; extent = path[depth].p_ext; if (!extent) { EXT4_ERROR_INODE(inode, "unexpected hole at %lu", (unsigned long) *iterator); return -EFSCORRUPTED; } if (SHIFT == SHIFT_LEFT && *iterator > le32_to_cpu(extent->ee_block)) { /* Hole, move to the next extent */ if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) { path[depth].p_ext++; } else { *iterator = ext4_ext_next_allocated_block(path); continue; } } if (SHIFT == SHIFT_LEFT) { extent = EXT_LAST_EXTENT(path[depth].p_hdr); *iterator = le32_to_cpu(extent->ee_block) + ext4_ext_get_actual_len(extent); } else { extent = EXT_FIRST_EXTENT(path[depth].p_hdr); *iterator = le32_to_cpu(extent->ee_block) > 0 ? le32_to_cpu(extent->ee_block) - 1 : 0; /* Update path extent in case we need to stop */ while (le32_to_cpu(extent->ee_block) < start) extent++; path[depth].p_ext = extent; } ret = ext4_ext_shift_path_extents(path, shift, inode, handle, SHIFT); if (ret) break; } out: ext4_ext_drop_refs(path); kfree(path); return ret; } /* * ext4_collapse_range: * This implements the fallocate's collapse range functionality for ext4 * Returns: 0 and non-zero on error. */ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) { struct super_block *sb = inode->i_sb; ext4_lblk_t punch_start, punch_stop; handle_t *handle; unsigned int credits; loff_t new_size, ioffset; int ret; /* * We need to test this early because xfstests assumes that a * collapse range of (0, 1) will return EOPNOTSUPP if the file * system does not support collapse range. */ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) return -EOPNOTSUPP; /* Collapse range works only on fs block size aligned offsets. */ if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) || len & (EXT4_CLUSTER_SIZE(sb) - 1)) return -EINVAL; if (!S_ISREG(inode->i_mode)) return -EINVAL; trace_ext4_collapse_range(inode, offset, len); punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb); punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb); /* Call ext4_force_commit to flush all data in case of data=journal. */ if (ext4_should_journal_data(inode)) { ret = ext4_force_commit(inode->i_sb); if (ret) return ret; } /* * Need to round down offset to be aligned with page size boundary * for page size > block size. */ ioffset = round_down(offset, PAGE_SIZE); /* Write out all dirty pages */ ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, LLONG_MAX); if (ret) return ret; /* Take mutex lock */ mutex_lock(&inode->i_mutex); /* * There is no need to overlap collapse range with EOF, in which case * it is effectively a truncate operation */ if (offset + len >= i_size_read(inode)) { ret = -EINVAL; goto out_mutex; } /* Currently just for extent based files */ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { ret = -EOPNOTSUPP; goto out_mutex; } truncate_pagecache(inode, ioffset); /* Wait for existing dio to complete */ ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); credits = ext4_writepage_trans_blocks(inode); handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out_dio; } down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); ret = ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start); if (ret) { up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1); if (ret) { up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } ext4_discard_preallocations(inode); ret = ext4_ext_shift_extents(inode, handle, punch_stop, punch_stop - punch_start, SHIFT_LEFT); if (ret) { up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } new_size = i_size_read(inode) - len; i_size_write(inode, new_size); EXT4_I(inode)->i_disksize = new_size; up_write(&EXT4_I(inode)->i_data_sem); if (IS_SYNC(inode)) ext4_handle_sync(handle); inode->i_mtime = inode->i_ctime = ext4_current_time(inode); ext4_mark_inode_dirty(handle, inode); out_stop: ext4_journal_stop(handle); out_dio: ext4_inode_resume_unlocked_dio(inode); out_mutex: mutex_unlock(&inode->i_mutex); return ret; } /* * ext4_insert_range: * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate. * The data blocks starting from @offset to the EOF are shifted by @len * towards right to create a hole in the @inode. Inode size is increased * by len bytes. * Returns 0 on success, error otherwise. */ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) { struct super_block *sb = inode->i_sb; handle_t *handle; struct ext4_ext_path *path; struct ext4_extent *extent; ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0; unsigned int credits, ee_len; int ret = 0, depth, split_flag = 0; loff_t ioffset; /* * We need to test this early because xfstests assumes that an * insert range of (0, 1) will return EOPNOTSUPP if the file * system does not support insert range. */ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) return -EOPNOTSUPP; /* Insert range works only on fs block size aligned offsets. */ if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) || len & (EXT4_CLUSTER_SIZE(sb) - 1)) return -EINVAL; if (!S_ISREG(inode->i_mode)) return -EOPNOTSUPP; trace_ext4_insert_range(inode, offset, len); offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb); len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb); /* Call ext4_force_commit to flush all data in case of data=journal */ if (ext4_should_journal_data(inode)) { ret = ext4_force_commit(inode->i_sb); if (ret) return ret; } /* * Need to round down to align start offset to page size boundary * for page size > block size. */ ioffset = round_down(offset, PAGE_SIZE); /* Write out all dirty pages */ ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, LLONG_MAX); if (ret) return ret; /* Take mutex lock */ mutex_lock(&inode->i_mutex); /* Currently just for extent based files */ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { ret = -EOPNOTSUPP; goto out_mutex; } /* Check for wrap through zero */ if (inode->i_size + len > inode->i_sb->s_maxbytes) { ret = -EFBIG; goto out_mutex; } /* Offset should be less than i_size */ if (offset >= i_size_read(inode)) { ret = -EINVAL; goto out_mutex; } truncate_pagecache(inode, ioffset); /* Wait for existing dio to complete */ ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); credits = ext4_writepage_trans_blocks(inode); handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out_dio; } /* Expand file to avoid data loss if there is error while shifting */ inode->i_size += len; EXT4_I(inode)->i_disksize += len; inode->i_mtime = inode->i_ctime = ext4_current_time(inode); ret = ext4_mark_inode_dirty(handle, inode); if (ret) goto out_stop; down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); path = ext4_find_extent(inode, offset_lblk, NULL, 0); if (IS_ERR(path)) { up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } depth = ext_depth(inode); extent = path[depth].p_ext; if (extent) { ee_start_lblk = le32_to_cpu(extent->ee_block); ee_len = ext4_ext_get_actual_len(extent); /* * If offset_lblk is not the starting block of extent, split * the extent @offset_lblk */ if ((offset_lblk > ee_start_lblk) && (offset_lblk < (ee_start_lblk + ee_len))) { if (ext4_ext_is_unwritten(extent)) split_flag = EXT4_EXT_MARK_UNWRIT1 | EXT4_EXT_MARK_UNWRIT2; ret = ext4_split_extent_at(handle, inode, &path, offset_lblk, split_flag, EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO | EXT4_GET_BLOCKS_METADATA_NOFAIL); } ext4_ext_drop_refs(path); kfree(path); if (ret < 0) { up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } } ret = ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk); if (ret) { up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } /* * if offset_lblk lies in a hole which is at start of file, use * ee_start_lblk to shift extents */ ret = ext4_ext_shift_extents(inode, handle, ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk, len_lblk, SHIFT_RIGHT); up_write(&EXT4_I(inode)->i_data_sem); if (IS_SYNC(inode)) ext4_handle_sync(handle); out_stop: ext4_journal_stop(handle); out_dio: ext4_inode_resume_unlocked_dio(inode); out_mutex: mutex_unlock(&inode->i_mutex); return ret; } /** * ext4_swap_extents - Swap extents between two inodes * * @inode1: First inode * @inode2: Second inode * @lblk1: Start block for first inode * @lblk2: Start block for second inode * @count: Number of blocks to swap * @mark_unwritten: Mark second inode's extents as unwritten after swap * @erp: Pointer to save error value * * This helper routine does exactly what is promise "swap extents". All other * stuff such as page-cache locking consistency, bh mapping consistency or * extent's data copying must be performed by caller. * Locking: * i_mutex is held for both inodes * i_data_sem is locked for write for both inodes * Assumptions: * All pages from requested range are locked for both inodes */ int ext4_swap_extents(handle_t *handle, struct inode *inode1, struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2, ext4_lblk_t count, int unwritten, int *erp) { struct ext4_ext_path *path1 = NULL; struct ext4_ext_path *path2 = NULL; int replaced_count = 0; BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem)); BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem)); BUG_ON(!mutex_is_locked(&inode1->i_mutex)); BUG_ON(!mutex_is_locked(&inode2->i_mutex)); *erp = ext4_es_remove_extent(inode1, lblk1, count); if (unlikely(*erp)) return 0; *erp = ext4_es_remove_extent(inode2, lblk2, count); if (unlikely(*erp)) return 0; while (count) { struct ext4_extent *ex1, *ex2, tmp_ex; ext4_lblk_t e1_blk, e2_blk; int e1_len, e2_len, len; int split = 0; path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE); if (IS_ERR(path1)) { *erp = PTR_ERR(path1); path1 = NULL; finish: count = 0; goto repeat; } path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE); if (IS_ERR(path2)) { *erp = PTR_ERR(path2); path2 = NULL; goto finish; } ex1 = path1[path1->p_depth].p_ext; ex2 = path2[path2->p_depth].p_ext; /* Do we have somthing to swap ? */ if (unlikely(!ex2 || !ex1)) goto finish; e1_blk = le32_to_cpu(ex1->ee_block); e2_blk = le32_to_cpu(ex2->ee_block); e1_len = ext4_ext_get_actual_len(ex1); e2_len = ext4_ext_get_actual_len(ex2); /* Hole handling */ if (!in_range(lblk1, e1_blk, e1_len) || !in_range(lblk2, e2_blk, e2_len)) { ext4_lblk_t next1, next2; /* if hole after extent, then go to next extent */ next1 = ext4_ext_next_allocated_block(path1); next2 = ext4_ext_next_allocated_block(path2); /* If hole before extent, then shift to that extent */ if (e1_blk > lblk1) next1 = e1_blk; if (e2_blk > lblk2) next2 = e1_blk; /* Do we have something to swap */ if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS) goto finish; /* Move to the rightest boundary */ len = next1 - lblk1; if (len < next2 - lblk2) len = next2 - lblk2; if (len > count) len = count; lblk1 += len; lblk2 += len; count -= len; goto repeat; } /* Prepare left boundary */ if (e1_blk < lblk1) { split = 1; *erp = ext4_force_split_extent_at(handle, inode1, &path1, lblk1, 0); if (unlikely(*erp)) goto finish; } if (e2_blk < lblk2) { split = 1; *erp = ext4_force_split_extent_at(handle, inode2, &path2, lblk2, 0); if (unlikely(*erp)) goto finish; } /* ext4_split_extent_at() may result in leaf extent split, * path must to be revalidated. */ if (split) goto repeat; /* Prepare right boundary */ len = count; if (len > e1_blk + e1_len - lblk1) len = e1_blk + e1_len - lblk1; if (len > e2_blk + e2_len - lblk2) len = e2_blk + e2_len - lblk2; if (len != e1_len) { split = 1; *erp = ext4_force_split_extent_at(handle, inode1, &path1, lblk1 + len, 0); if (unlikely(*erp)) goto finish; } if (len != e2_len) { split = 1; *erp = ext4_force_split_extent_at(handle, inode2, &path2, lblk2 + len, 0); if (*erp) goto finish; } /* ext4_split_extent_at() may result in leaf extent split, * path must to be revalidated. */ if (split) goto repeat; BUG_ON(e2_len != e1_len); *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth); if (unlikely(*erp)) goto finish; *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth); if (unlikely(*erp)) goto finish; /* Both extents are fully inside boundaries. Swap it now */ tmp_ex = *ex1; ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2)); ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex)); ex1->ee_len = cpu_to_le16(e2_len); ex2->ee_len = cpu_to_le16(e1_len); if (unwritten) ext4_ext_mark_unwritten(ex2); if (ext4_ext_is_unwritten(&tmp_ex)) ext4_ext_mark_unwritten(ex1); ext4_ext_try_to_merge(handle, inode2, path2, ex2); ext4_ext_try_to_merge(handle, inode1, path1, ex1); *erp = ext4_ext_dirty(handle, inode2, path2 + path2->p_depth); if (unlikely(*erp)) goto finish; *erp = ext4_ext_dirty(handle, inode1, path1 + path1->p_depth); /* * Looks scarry ah..? second inode already points to new blocks, * and it was successfully dirtied. But luckily error may happen * only due to journal error, so full transaction will be * aborted anyway. */ if (unlikely(*erp)) goto finish; lblk1 += len; lblk2 += len; replaced_count += len; count -= len; repeat: ext4_ext_drop_refs(path1); kfree(path1); ext4_ext_drop_refs(path2); kfree(path2); path1 = path2 = NULL; } return replaced_count; }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_1819_1
crossvul-cpp_data_good_1496_1
/* ssl/s3_clnt.c */ /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) * All rights reserved. * * This package is an SSL implementation written * by Eric Young (eay@cryptsoft.com). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson (tjh@cryptsoft.com). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young (eay@cryptsoft.com)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ /* ==================================================================== * Copyright (c) 1998-2007 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * openssl-core@openssl.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.openssl.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ /* ==================================================================== * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. * * Portions of the attached software ("Contribution") are developed by * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project. * * The Contribution is licensed pursuant to the OpenSSL open source * license provided above. * * ECC cipher suite support in OpenSSL originally written by * Vipul Gupta and Sumit Gupta of Sun Microsystems Laboratories. * */ /* ==================================================================== * Copyright 2005 Nokia. All rights reserved. * * The portions of the attached software ("Contribution") is developed by * Nokia Corporation and is licensed pursuant to the OpenSSL open source * license. * * The Contribution, originally written by Mika Kousa and Pasi Eronen of * Nokia Corporation, consists of the "PSK" (Pre-Shared Key) ciphersuites * support (see RFC 4279) to OpenSSL. * * No patent licenses or other rights except those expressly stated in * the OpenSSL open source license shall be deemed granted or received * expressly, by implication, estoppel, or otherwise. * * No assurances are provided by Nokia that the Contribution does not * infringe the patent or other intellectual property rights of any third * party or that the license provides you with all the necessary rights * to make use of the Contribution. * * THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. IN * ADDITION TO THE DISCLAIMERS INCLUDED IN THE LICENSE, NOKIA * SPECIFICALLY DISCLAIMS ANY LIABILITY FOR CLAIMS BROUGHT BY YOU OR ANY * OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS OR * OTHERWISE. */ #include <stdio.h> #include "ssl_locl.h" #include <openssl/buffer.h> #include <openssl/rand.h> #include <openssl/objects.h> #include <openssl/evp.h> #include <openssl/md5.h> #ifndef OPENSSL_NO_DH # include <openssl/dh.h> #endif #include <openssl/bn.h> #ifndef OPENSSL_NO_ENGINE # include <openssl/engine.h> #endif static int ssl_set_version(SSL *s); static int ca_dn_cmp(const X509_NAME *const *a, const X509_NAME *const *b); static int ssl3_check_finished(SSL *s); static int ssl_cipher_list_to_bytes(SSL *s, STACK_OF(SSL_CIPHER) *sk, unsigned char *p, int (*put_cb) (const SSL_CIPHER *, unsigned char *)); int ssl3_connect(SSL *s) { BUF_MEM *buf = NULL; unsigned long Time = (unsigned long)time(NULL); void (*cb) (const SSL *ssl, int type, int val) = NULL; int ret = -1; int new_state, state, skip = 0; RAND_add(&Time, sizeof(Time), 0); ERR_clear_error(); clear_sys_error(); if (s->info_callback != NULL) cb = s->info_callback; else if (s->ctx->info_callback != NULL) cb = s->ctx->info_callback; s->in_handshake++; if (!SSL_in_init(s) || SSL_in_before(s)) { if (!SSL_clear(s)) return -1; } #ifndef OPENSSL_NO_HEARTBEATS /* * If we're awaiting a HeartbeatResponse, pretend we already got and * don't await it anymore, because Heartbeats don't make sense during * handshakes anyway. */ if (s->tlsext_hb_pending) { s->tlsext_hb_pending = 0; s->tlsext_hb_seq++; } #endif for (;;) { state = s->state; switch (s->state) { case SSL_ST_RENEGOTIATE: s->renegotiate = 1; s->state = SSL_ST_CONNECT; s->ctx->stats.sess_connect_renegotiate++; /* break */ case SSL_ST_BEFORE: case SSL_ST_CONNECT: case SSL_ST_BEFORE | SSL_ST_CONNECT: case SSL_ST_OK | SSL_ST_CONNECT: s->server = 0; if (cb != NULL) cb(s, SSL_CB_HANDSHAKE_START, 1); if ((s->version >> 8) != SSL3_VERSION_MAJOR && s->version != TLS_ANY_VERSION) { SSLerr(SSL_F_SSL3_CONNECT, ERR_R_INTERNAL_ERROR); s->state = SSL_ST_ERR; ret = -1; goto end; } if (s->version != TLS_ANY_VERSION && !ssl_security(s, SSL_SECOP_VERSION, 0, s->version, NULL)) { SSLerr(SSL_F_SSL3_CONNECT, SSL_R_VERSION_TOO_LOW); return -1; } /* s->version=SSL3_VERSION; */ s->type = SSL_ST_CONNECT; if (s->init_buf == NULL) { if ((buf = BUF_MEM_new()) == NULL) { ret = -1; s->state = SSL_ST_ERR; goto end; } if (!BUF_MEM_grow(buf, SSL3_RT_MAX_PLAIN_LENGTH)) { ret = -1; s->state = SSL_ST_ERR; goto end; } s->init_buf = buf; buf = NULL; } if (!ssl3_setup_buffers(s)) { ret = -1; goto end; } /* setup buffing BIO */ if (!ssl_init_wbio_buffer(s, 0)) { ret = -1; s->state = SSL_ST_ERR; goto end; } /* don't push the buffering BIO quite yet */ ssl3_init_finished_mac(s); s->state = SSL3_ST_CW_CLNT_HELLO_A; s->ctx->stats.sess_connect++; s->init_num = 0; s->s3->flags &= ~SSL3_FLAGS_CCS_OK; /* * Should have been reset by ssl3_get_finished, too. */ s->s3->change_cipher_spec = 0; break; case SSL3_ST_CW_CLNT_HELLO_A: case SSL3_ST_CW_CLNT_HELLO_B: s->shutdown = 0; ret = ssl3_client_hello(s); if (ret <= 0) goto end; s->state = SSL3_ST_CR_SRVR_HELLO_A; s->init_num = 0; /* turn on buffering for the next lot of output */ if (s->bbio != s->wbio) s->wbio = BIO_push(s->bbio, s->wbio); break; case SSL3_ST_CR_SRVR_HELLO_A: case SSL3_ST_CR_SRVR_HELLO_B: ret = ssl3_get_server_hello(s); if (ret <= 0) goto end; if (s->hit) { s->state = SSL3_ST_CR_FINISHED_A; if (s->tlsext_ticket_expected) { /* receive renewed session ticket */ s->state = SSL3_ST_CR_SESSION_TICKET_A; } } else { s->state = SSL3_ST_CR_CERT_A; } s->init_num = 0; break; case SSL3_ST_CR_CERT_A: case SSL3_ST_CR_CERT_B: /* Noop (ret = 0) for everything but EAP-FAST. */ ret = ssl3_check_finished(s); if (ret < 0) goto end; if (ret == 1) { s->hit = 1; s->state = SSL3_ST_CR_FINISHED_A; s->init_num = 0; break; } /* Check if it is anon DH/ECDH, SRP auth */ /* or PSK */ if (! (s->s3->tmp. new_cipher->algorithm_auth & (SSL_aNULL | SSL_aSRP)) && !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK)) { ret = ssl3_get_server_certificate(s); if (ret <= 0) goto end; if (s->tlsext_status_expected) s->state = SSL3_ST_CR_CERT_STATUS_A; else s->state = SSL3_ST_CR_KEY_EXCH_A; } else { skip = 1; s->state = SSL3_ST_CR_KEY_EXCH_A; } s->init_num = 0; break; case SSL3_ST_CR_KEY_EXCH_A: case SSL3_ST_CR_KEY_EXCH_B: ret = ssl3_get_key_exchange(s); if (ret <= 0) goto end; s->state = SSL3_ST_CR_CERT_REQ_A; s->init_num = 0; /* * at this point we check that we have the required stuff from * the server */ if (!ssl3_check_cert_and_algorithm(s)) { ret = -1; s->state = SSL_ST_ERR; goto end; } break; case SSL3_ST_CR_CERT_REQ_A: case SSL3_ST_CR_CERT_REQ_B: ret = ssl3_get_certificate_request(s); if (ret <= 0) goto end; s->state = SSL3_ST_CR_SRVR_DONE_A; s->init_num = 0; break; case SSL3_ST_CR_SRVR_DONE_A: case SSL3_ST_CR_SRVR_DONE_B: ret = ssl3_get_server_done(s); if (ret <= 0) goto end; #ifndef OPENSSL_NO_SRP if (s->s3->tmp.new_cipher->algorithm_mkey & SSL_kSRP) { if ((ret = SRP_Calc_A_param(s)) <= 0) { SSLerr(SSL_F_SSL3_CONNECT, SSL_R_SRP_A_CALC); ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); s->state = SSL_ST_ERR; goto end; } } #endif if (s->s3->tmp.cert_req) s->state = SSL3_ST_CW_CERT_A; else s->state = SSL3_ST_CW_KEY_EXCH_A; s->init_num = 0; break; case SSL3_ST_CW_CERT_A: case SSL3_ST_CW_CERT_B: case SSL3_ST_CW_CERT_C: case SSL3_ST_CW_CERT_D: ret = ssl3_send_client_certificate(s); if (ret <= 0) goto end; s->state = SSL3_ST_CW_KEY_EXCH_A; s->init_num = 0; break; case SSL3_ST_CW_KEY_EXCH_A: case SSL3_ST_CW_KEY_EXCH_B: ret = ssl3_send_client_key_exchange(s); if (ret <= 0) goto end; /* * EAY EAY EAY need to check for DH fix cert sent back */ /* * For TLS, cert_req is set to 2, so a cert chain of nothing is * sent, but no verify packet is sent */ /* * XXX: For now, we do not support client authentication in ECDH * cipher suites with ECDH (rather than ECDSA) certificates. We * need to skip the certificate verify message when client's * ECDH public key is sent inside the client certificate. */ if (s->s3->tmp.cert_req == 1) { s->state = SSL3_ST_CW_CERT_VRFY_A; } else { s->state = SSL3_ST_CW_CHANGE_A; } if (s->s3->flags & TLS1_FLAGS_SKIP_CERT_VERIFY) { s->state = SSL3_ST_CW_CHANGE_A; } s->init_num = 0; break; case SSL3_ST_CW_CERT_VRFY_A: case SSL3_ST_CW_CERT_VRFY_B: ret = ssl3_send_client_verify(s); if (ret <= 0) goto end; s->state = SSL3_ST_CW_CHANGE_A; s->init_num = 0; break; case SSL3_ST_CW_CHANGE_A: case SSL3_ST_CW_CHANGE_B: ret = ssl3_send_change_cipher_spec(s, SSL3_ST_CW_CHANGE_A, SSL3_ST_CW_CHANGE_B); if (ret <= 0) goto end; #if defined(OPENSSL_NO_NEXTPROTONEG) s->state = SSL3_ST_CW_FINISHED_A; #else if (s->s3->next_proto_neg_seen) s->state = SSL3_ST_CW_NEXT_PROTO_A; else s->state = SSL3_ST_CW_FINISHED_A; #endif s->init_num = 0; s->session->cipher = s->s3->tmp.new_cipher; #ifdef OPENSSL_NO_COMP s->session->compress_meth = 0; #else if (s->s3->tmp.new_compression == NULL) s->session->compress_meth = 0; else s->session->compress_meth = s->s3->tmp.new_compression->id; #endif if (!s->method->ssl3_enc->setup_key_block(s)) { ret = -1; s->state = SSL_ST_ERR; goto end; } if (!s->method->ssl3_enc->change_cipher_state(s, SSL3_CHANGE_CIPHER_CLIENT_WRITE)) { ret = -1; s->state = SSL_ST_ERR; goto end; } break; #if !defined(OPENSSL_NO_NEXTPROTONEG) case SSL3_ST_CW_NEXT_PROTO_A: case SSL3_ST_CW_NEXT_PROTO_B: ret = ssl3_send_next_proto(s); if (ret <= 0) goto end; s->state = SSL3_ST_CW_FINISHED_A; break; #endif case SSL3_ST_CW_FINISHED_A: case SSL3_ST_CW_FINISHED_B: ret = ssl3_send_finished(s, SSL3_ST_CW_FINISHED_A, SSL3_ST_CW_FINISHED_B, s->method-> ssl3_enc->client_finished_label, s->method-> ssl3_enc->client_finished_label_len); if (ret <= 0) goto end; s->state = SSL3_ST_CW_FLUSH; /* clear flags */ s->s3->flags &= ~SSL3_FLAGS_POP_BUFFER; if (s->hit) { s->s3->tmp.next_state = SSL_ST_OK; if (s->s3->flags & SSL3_FLAGS_DELAY_CLIENT_FINISHED) { s->state = SSL_ST_OK; s->s3->flags |= SSL3_FLAGS_POP_BUFFER; s->s3->delay_buf_pop_ret = 0; } } else { /* * Allow NewSessionTicket if ticket expected */ if (s->tlsext_ticket_expected) s->s3->tmp.next_state = SSL3_ST_CR_SESSION_TICKET_A; else s->s3->tmp.next_state = SSL3_ST_CR_FINISHED_A; } s->init_num = 0; break; case SSL3_ST_CR_SESSION_TICKET_A: case SSL3_ST_CR_SESSION_TICKET_B: ret = ssl3_get_new_session_ticket(s); if (ret <= 0) goto end; s->state = SSL3_ST_CR_FINISHED_A; s->init_num = 0; break; case SSL3_ST_CR_CERT_STATUS_A: case SSL3_ST_CR_CERT_STATUS_B: ret = ssl3_get_cert_status(s); if (ret <= 0) goto end; s->state = SSL3_ST_CR_KEY_EXCH_A; s->init_num = 0; break; case SSL3_ST_CR_FINISHED_A: case SSL3_ST_CR_FINISHED_B: if (!s->s3->change_cipher_spec) s->s3->flags |= SSL3_FLAGS_CCS_OK; ret = ssl3_get_finished(s, SSL3_ST_CR_FINISHED_A, SSL3_ST_CR_FINISHED_B); if (ret <= 0) goto end; if (s->hit) s->state = SSL3_ST_CW_CHANGE_A; else s->state = SSL_ST_OK; s->init_num = 0; break; case SSL3_ST_CW_FLUSH: s->rwstate = SSL_WRITING; if (BIO_flush(s->wbio) <= 0) { ret = -1; goto end; } s->rwstate = SSL_NOTHING; s->state = s->s3->tmp.next_state; break; case SSL_ST_OK: /* clean a few things up */ ssl3_cleanup_key_block(s); BUF_MEM_free(s->init_buf); s->init_buf = NULL; /* * If we are not 'joining' the last two packets, remove the * buffering now */ if (!(s->s3->flags & SSL3_FLAGS_POP_BUFFER)) ssl_free_wbio_buffer(s); /* else do it later in ssl3_write */ s->init_num = 0; s->renegotiate = 0; s->new_session = 0; ssl_update_cache(s, SSL_SESS_CACHE_CLIENT); if (s->hit) s->ctx->stats.sess_hit++; ret = 1; /* s->server=0; */ s->handshake_func = ssl3_connect; s->ctx->stats.sess_connect_good++; if (cb != NULL) cb(s, SSL_CB_HANDSHAKE_DONE, 1); goto end; /* break; */ case SSL_ST_ERR: default: SSLerr(SSL_F_SSL3_CONNECT, SSL_R_UNKNOWN_STATE); ret = -1; goto end; /* break; */ } /* did we do anything */ if (!s->s3->tmp.reuse_message && !skip) { if (s->debug) { if ((ret = BIO_flush(s->wbio)) <= 0) goto end; } if ((cb != NULL) && (s->state != state)) { new_state = s->state; s->state = state; cb(s, SSL_CB_CONNECT_LOOP, 1); s->state = new_state; } } skip = 0; } end: s->in_handshake--; BUF_MEM_free(buf); if (cb != NULL) cb(s, SSL_CB_CONNECT_EXIT, ret); return (ret); } /* * Work out what version we should be using for the initial ClientHello if * the version is currently set to (D)TLS_ANY_VERSION. * Returns 1 on success * Returns 0 on error */ static int ssl_set_version(SSL *s) { unsigned long mask, options = s->options; if (s->method->version == TLS_ANY_VERSION) { /* * SSL_OP_NO_X disables all protocols above X *if* there are * some protocols below X enabled. This is required in order * to maintain "version capability" vector contiguous. So * that if application wants to disable TLS1.0 in favour of * TLS1>=1, it would be insufficient to pass SSL_NO_TLSv1, the * answer is SSL_OP_NO_TLSv1|SSL_OP_NO_SSLv3. */ mask = SSL_OP_NO_TLSv1_1 | SSL_OP_NO_TLSv1 #if !defined(OPENSSL_NO_SSL3) | SSL_OP_NO_SSLv3 #endif ; #if !defined(OPENSSL_NO_TLS1_2_CLIENT) if (options & SSL_OP_NO_TLSv1_2) { if ((options & mask) != mask) { s->version = TLS1_1_VERSION; } else { SSLerr(SSL_F_SSL_SET_VERSION, SSL_R_NO_PROTOCOLS_AVAILABLE); return 0; } } else { s->version = TLS1_2_VERSION; } #else if ((options & mask) == mask) { SSLerr(SSL_F_SSL_SET_VERSION, SSL_R_NO_PROTOCOLS_AVAILABLE); return 0; } s->version = TLS1_1_VERSION; #endif mask &= ~SSL_OP_NO_TLSv1_1; if ((options & SSL_OP_NO_TLSv1_1) && (options & mask) != mask) s->version = TLS1_VERSION; mask &= ~SSL_OP_NO_TLSv1; #if !defined(OPENSSL_NO_SSL3) if ((options & SSL_OP_NO_TLSv1) && (options & mask) != mask) s->version = SSL3_VERSION; #endif if (s->version != TLS1_2_VERSION && tls1_suiteb(s)) { SSLerr(SSL_F_SSL_SET_VERSION, SSL_R_ONLY_TLS_1_2_ALLOWED_IN_SUITEB_MODE); return 0; } if (s->version == SSL3_VERSION && FIPS_mode()) { SSLerr(SSL_F_SSL_SET_VERSION, SSL_R_ONLY_TLS_ALLOWED_IN_FIPS_MODE); return 0; } } else if (s->method->version == DTLS_ANY_VERSION) { /* Determine which DTLS version to use */ /* If DTLS 1.2 disabled correct the version number */ if (options & SSL_OP_NO_DTLSv1_2) { if (tls1_suiteb(s)) { SSLerr(SSL_F_SSL_SET_VERSION, SSL_R_ONLY_DTLS_1_2_ALLOWED_IN_SUITEB_MODE); return 0; } /* * Disabling all versions is silly: return an error. */ if (options & SSL_OP_NO_DTLSv1) { SSLerr(SSL_F_SSL_SET_VERSION, SSL_R_WRONG_SSL_VERSION); return 0; } /* * Update method so we don't use any DTLS 1.2 features. */ s->method = DTLSv1_client_method(); s->version = DTLS1_VERSION; } else { /* * We only support one version: update method */ if (options & SSL_OP_NO_DTLSv1) s->method = DTLSv1_2_client_method(); s->version = DTLS1_2_VERSION; } } s->client_version = s->version; return 1; } int ssl3_client_hello(SSL *s) { unsigned char *buf; unsigned char *p, *d; int i; unsigned long l; int al = 0; #ifndef OPENSSL_NO_COMP int j; SSL_COMP *comp; #endif buf = (unsigned char *)s->init_buf->data; if (s->state == SSL3_ST_CW_CLNT_HELLO_A) { SSL_SESSION *sess = s->session; /* Work out what SSL/TLS/DTLS version to use */ if (ssl_set_version(s) == 0) goto err; if ((sess == NULL) || (sess->ssl_version != s->version) || /* * In the case of EAP-FAST, we can have a pre-shared * "ticket" without a session ID. */ (!sess->session_id_length && !sess->tlsext_tick) || (sess->not_resumable)) { if (!ssl_get_new_session(s, 0)) goto err; } /* else use the pre-loaded session */ p = s->s3->client_random; /* * for DTLS if client_random is initialized, reuse it, we are * required to use same upon reply to HelloVerify */ if (SSL_IS_DTLS(s)) { size_t idx; i = 1; for (idx = 0; idx < sizeof(s->s3->client_random); idx++) { if (p[idx]) { i = 0; break; } } } else i = 1; if (i && ssl_fill_hello_random(s, 0, p, sizeof(s->s3->client_random)) <= 0) goto err; /* Do the message type and length last */ d = p = ssl_handshake_start(s); /*- * version indicates the negotiated version: for example from * an SSLv2/v3 compatible client hello). The client_version * field is the maximum version we permit and it is also * used in RSA encrypted premaster secrets. Some servers can * choke if we initially report a higher version then * renegotiate to a lower one in the premaster secret. This * didn't happen with TLS 1.0 as most servers supported it * but it can with TLS 1.1 or later if the server only supports * 1.0. * * Possible scenario with previous logic: * 1. Client hello indicates TLS 1.2 * 2. Server hello says TLS 1.0 * 3. RSA encrypted premaster secret uses 1.2. * 4. Handhaked proceeds using TLS 1.0. * 5. Server sends hello request to renegotiate. * 6. Client hello indicates TLS v1.0 as we now * know that is maximum server supports. * 7. Server chokes on RSA encrypted premaster secret * containing version 1.0. * * For interoperability it should be OK to always use the * maximum version we support in client hello and then rely * on the checking of version to ensure the servers isn't * being inconsistent: for example initially negotiating with * TLS 1.0 and renegotiating with TLS 1.2. We do this by using * client_version in client hello and not resetting it to * the negotiated version. */ *(p++) = s->client_version >> 8; *(p++) = s->client_version & 0xff; /* Random stuff */ memcpy(p, s->s3->client_random, SSL3_RANDOM_SIZE); p += SSL3_RANDOM_SIZE; /* Session ID */ if (s->new_session) i = 0; else i = s->session->session_id_length; *(p++) = i; if (i != 0) { if (i > (int)sizeof(s->session->session_id)) { SSLerr(SSL_F_SSL3_CLIENT_HELLO, ERR_R_INTERNAL_ERROR); goto err; } memcpy(p, s->session->session_id, i); p += i; } /* cookie stuff for DTLS */ if (SSL_IS_DTLS(s)) { if (s->d1->cookie_len > sizeof(s->d1->cookie)) { SSLerr(SSL_F_SSL3_CLIENT_HELLO, ERR_R_INTERNAL_ERROR); goto err; } *(p++) = s->d1->cookie_len; memcpy(p, s->d1->cookie, s->d1->cookie_len); p += s->d1->cookie_len; } /* Ciphers supported */ i = ssl_cipher_list_to_bytes(s, SSL_get_ciphers(s), &(p[2]), 0); if (i == 0) { SSLerr(SSL_F_SSL3_CLIENT_HELLO, SSL_R_NO_CIPHERS_AVAILABLE); goto err; } #ifdef OPENSSL_MAX_TLS1_2_CIPHER_LENGTH /* * Some servers hang if client hello > 256 bytes as hack workaround * chop number of supported ciphers to keep it well below this if we * use TLS v1.2 */ if (TLS1_get_version(s) >= TLS1_2_VERSION && i > OPENSSL_MAX_TLS1_2_CIPHER_LENGTH) i = OPENSSL_MAX_TLS1_2_CIPHER_LENGTH & ~1; #endif s2n(i, p); p += i; /* COMPRESSION */ #ifdef OPENSSL_NO_COMP *(p++) = 1; #else if (!ssl_allow_compression(s) || !s->ctx->comp_methods) j = 0; else j = sk_SSL_COMP_num(s->ctx->comp_methods); *(p++) = 1 + j; for (i = 0; i < j; i++) { comp = sk_SSL_COMP_value(s->ctx->comp_methods, i); *(p++) = comp->id; } #endif *(p++) = 0; /* Add the NULL method */ /* TLS extensions */ if (ssl_prepare_clienthello_tlsext(s) <= 0) { SSLerr(SSL_F_SSL3_CLIENT_HELLO, SSL_R_CLIENTHELLO_TLSEXT); goto err; } if ((p = ssl_add_clienthello_tlsext(s, p, buf + SSL3_RT_MAX_PLAIN_LENGTH, &al)) == NULL) { ssl3_send_alert(s, SSL3_AL_FATAL, al); SSLerr(SSL_F_SSL3_CLIENT_HELLO, ERR_R_INTERNAL_ERROR); goto err; } l = p - d; if (!ssl_set_handshake_header(s, SSL3_MT_CLIENT_HELLO, l)) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); SSLerr(SSL_F_SSL3_CLIENT_HELLO, ERR_R_INTERNAL_ERROR); goto err; } s->state = SSL3_ST_CW_CLNT_HELLO_B; } /* SSL3_ST_CW_CLNT_HELLO_B */ return ssl_do_write(s); err: s->state = SSL_ST_ERR; return (-1); } int ssl3_get_server_hello(SSL *s) { STACK_OF(SSL_CIPHER) *sk; const SSL_CIPHER *c; unsigned char *p, *d; int i, al = SSL_AD_INTERNAL_ERROR, ok; unsigned int j; long n; #ifndef OPENSSL_NO_COMP SSL_COMP *comp; #endif /* * Hello verify request and/or server hello version may not match so set * first packet if we're negotiating version. */ s->first_packet = 1; n = s->method->ssl_get_message(s, SSL3_ST_CR_SRVR_HELLO_A, SSL3_ST_CR_SRVR_HELLO_B, -1, 20000, &ok); if (!ok) return ((int)n); s->first_packet = 0; if (SSL_IS_DTLS(s)) { if (s->s3->tmp.message_type == DTLS1_MT_HELLO_VERIFY_REQUEST) { if (s->d1->send_cookie == 0) { s->s3->tmp.reuse_message = 1; return 1; } else { /* already sent a cookie */ al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_BAD_MESSAGE_TYPE); goto f_err; } } } if (s->s3->tmp.message_type != SSL3_MT_SERVER_HELLO) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_BAD_MESSAGE_TYPE); goto f_err; } d = p = (unsigned char *)s->init_msg; if (s->method->version == TLS_ANY_VERSION) { int sversion = (p[0] << 8) | p[1]; #if TLS_MAX_VERSION != TLS1_2_VERSION #error Code needs updating for new TLS version #endif #ifndef OPENSSL_NO_SSL3 if ((sversion == SSL3_VERSION) && !(s->options & SSL_OP_NO_SSLv3)) { if (FIPS_mode()) { SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_ONLY_TLS_ALLOWED_IN_FIPS_MODE); al = SSL_AD_PROTOCOL_VERSION; goto f_err; } s->method = SSLv3_client_method(); } else #endif if ((sversion == TLS1_VERSION) && !(s->options & SSL_OP_NO_TLSv1)) { s->method = TLSv1_client_method(); } else if ((sversion == TLS1_1_VERSION) && !(s->options & SSL_OP_NO_TLSv1_1)) { s->method = TLSv1_1_client_method(); } else if ((sversion == TLS1_2_VERSION) && !(s->options & SSL_OP_NO_TLSv1_2)) { s->method = TLSv1_2_client_method(); } else { SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_UNSUPPORTED_PROTOCOL); al = SSL_AD_PROTOCOL_VERSION; goto f_err; } s->session->ssl_version = s->version = s->method->version; if (!ssl_security(s, SSL_SECOP_VERSION, 0, s->version, NULL)) { SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_VERSION_TOO_LOW); al = SSL_AD_PROTOCOL_VERSION; goto f_err; } } else if (s->method->version == DTLS_ANY_VERSION) { /* Work out correct protocol version to use */ int hversion = (p[0] << 8) | p[1]; int options = s->options; if (hversion == DTLS1_2_VERSION && !(options & SSL_OP_NO_DTLSv1_2)) s->method = DTLSv1_2_client_method(); else if (tls1_suiteb(s)) { SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_ONLY_DTLS_1_2_ALLOWED_IN_SUITEB_MODE); s->version = hversion; al = SSL_AD_PROTOCOL_VERSION; goto f_err; } else if (hversion == DTLS1_VERSION && !(options & SSL_OP_NO_DTLSv1)) s->method = DTLSv1_client_method(); else { SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_WRONG_SSL_VERSION); s->version = hversion; al = SSL_AD_PROTOCOL_VERSION; goto f_err; } s->version = s->method->version; } else if ((p[0] != (s->version >> 8)) || (p[1] != (s->version & 0xff))) { SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_WRONG_SSL_VERSION); s->version = (s->version & 0xff00) | p[1]; al = SSL_AD_PROTOCOL_VERSION; goto f_err; } p += 2; /* load the server hello data */ /* load the server random */ memcpy(s->s3->server_random, p, SSL3_RANDOM_SIZE); p += SSL3_RANDOM_SIZE; s->hit = 0; /* get the session-id */ j = *(p++); if ((j > sizeof s->session->session_id) || (j > SSL3_SESSION_ID_SIZE)) { al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_SSL3_SESSION_ID_TOO_LONG); goto f_err; } /* * Check if we can resume the session based on external pre-shared secret. * EAP-FAST (RFC 4851) supports two types of session resumption. * Resumption based on server-side state works with session IDs. * Resumption based on pre-shared Protected Access Credentials (PACs) * works by overriding the SessionTicket extension at the application * layer, and does not send a session ID. (We do not know whether EAP-FAST * servers would honour the session ID.) Therefore, the session ID alone * is not a reliable indicator of session resumption, so we first check if * we can resume, and later peek at the next handshake message to see if the * server wants to resume. */ if (s->version >= TLS1_VERSION && s->tls_session_secret_cb && s->session->tlsext_tick) { SSL_CIPHER *pref_cipher = NULL; s->session->master_key_length = sizeof(s->session->master_key); if (s->tls_session_secret_cb(s, s->session->master_key, &s->session->master_key_length, NULL, &pref_cipher, s->tls_session_secret_cb_arg)) { s->session->cipher = pref_cipher ? pref_cipher : ssl_get_cipher_by_char(s, p + j); } else { SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, ERR_R_INTERNAL_ERROR); al = SSL_AD_INTERNAL_ERROR; goto f_err; } } if (j != 0 && j == s->session->session_id_length && memcmp(p, s->session->session_id, j) == 0) { if (s->sid_ctx_length != s->session->sid_ctx_length || memcmp(s->session->sid_ctx, s->sid_ctx, s->sid_ctx_length)) { /* actually a client application bug */ al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT); goto f_err; } s->hit = 1; } else { /* * If we were trying for session-id reuse but the server * didn't echo the ID, make a new SSL_SESSION. * In the case of EAP-FAST and PAC, we do not send a session ID, * so the PAC-based session secret is always preserved. It'll be * overwritten if the server refuses resumption. */ if (s->session->session_id_length > 0) { if (!ssl_get_new_session(s, 0)) { goto f_err; } } s->session->session_id_length = j; memcpy(s->session->session_id, p, j); /* j could be 0 */ } p += j; c = ssl_get_cipher_by_char(s, p); if (c == NULL) { /* unknown cipher */ al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_UNKNOWN_CIPHER_RETURNED); goto f_err; } /* Set version disabled mask now we know version */ if (!SSL_USE_TLS1_2_CIPHERS(s)) s->s3->tmp.mask_ssl = SSL_TLSV1_2; else s->s3->tmp.mask_ssl = 0; /* * If it is a disabled cipher we didn't send it in client hello, so * return an error. */ if (ssl_cipher_disabled(s, c, SSL_SECOP_CIPHER_CHECK)) { al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_WRONG_CIPHER_RETURNED); goto f_err; } p += ssl_put_cipher_by_char(s, NULL, NULL); sk = ssl_get_ciphers_by_id(s); i = sk_SSL_CIPHER_find(sk, c); if (i < 0) { /* we did not say we would use this cipher */ al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_WRONG_CIPHER_RETURNED); goto f_err; } /* * Depending on the session caching (internal/external), the cipher * and/or cipher_id values may not be set. Make sure that cipher_id is * set and use it for comparison. */ if (s->session->cipher) s->session->cipher_id = s->session->cipher->id; if (s->hit && (s->session->cipher_id != c->id)) { al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_OLD_SESSION_CIPHER_NOT_RETURNED); goto f_err; } s->s3->tmp.new_cipher = c; /* * Don't digest cached records if no sigalgs: we may need them for client * authentication. */ if (!SSL_USE_SIGALGS(s) && !ssl3_digest_cached_records(s)) goto f_err; /* lets get the compression algorithm */ /* COMPRESSION */ #ifdef OPENSSL_NO_COMP if (*(p++) != 0) { al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_UNSUPPORTED_COMPRESSION_ALGORITHM); goto f_err; } /* * If compression is disabled we'd better not try to resume a session * using compression. */ if (s->session->compress_meth != 0) { SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_INCONSISTENT_COMPRESSION); goto f_err; } #else j = *(p++); if (s->hit && j != s->session->compress_meth) { al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_OLD_SESSION_COMPRESSION_ALGORITHM_NOT_RETURNED); goto f_err; } if (j == 0) comp = NULL; else if (!ssl_allow_compression(s)) { al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_COMPRESSION_DISABLED); goto f_err; } else comp = ssl3_comp_find(s->ctx->comp_methods, j); if ((j != 0) && (comp == NULL)) { al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_UNSUPPORTED_COMPRESSION_ALGORITHM); goto f_err; } else { s->s3->tmp.new_compression = comp; } #endif /* TLS extensions */ if (!ssl_parse_serverhello_tlsext(s, &p, d, n)) { SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_PARSE_TLSEXT); goto err; } if (p != (d + n)) { /* wrong packet length */ al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_BAD_PACKET_LENGTH); goto f_err; } return (1); f_err: ssl3_send_alert(s, SSL3_AL_FATAL, al); err: s->state = SSL_ST_ERR; return (-1); } int ssl3_get_server_certificate(SSL *s) { int al, i, ok, ret = -1, exp_idx; unsigned long n, nc, llen, l; X509 *x = NULL; const unsigned char *q, *p; unsigned char *d; STACK_OF(X509) *sk = NULL; SESS_CERT *sc; EVP_PKEY *pkey = NULL; n = s->method->ssl_get_message(s, SSL3_ST_CR_CERT_A, SSL3_ST_CR_CERT_B, -1, s->max_cert_list, &ok); if (!ok) return ((int)n); if (s->s3->tmp.message_type == SSL3_MT_SERVER_KEY_EXCHANGE) { s->s3->tmp.reuse_message = 1; return (1); } if (s->s3->tmp.message_type != SSL3_MT_CERTIFICATE) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE, SSL_R_BAD_MESSAGE_TYPE); goto f_err; } p = d = (unsigned char *)s->init_msg; if ((sk = sk_X509_new_null()) == NULL) { SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE, ERR_R_MALLOC_FAILURE); goto err; } n2l3(p, llen); if (llen + 3 != n) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE, SSL_R_LENGTH_MISMATCH); goto f_err; } for (nc = 0; nc < llen;) { n2l3(p, l); if ((l + nc + 3) > llen) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE, SSL_R_CERT_LENGTH_MISMATCH); goto f_err; } q = p; x = d2i_X509(NULL, &q, l); if (x == NULL) { al = SSL_AD_BAD_CERTIFICATE; SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE, ERR_R_ASN1_LIB); goto f_err; } if (q != (p + l)) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE, SSL_R_CERT_LENGTH_MISMATCH); goto f_err; } if (!sk_X509_push(sk, x)) { SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE, ERR_R_MALLOC_FAILURE); goto err; } x = NULL; nc += l + 3; p = q; } i = ssl_verify_cert_chain(s, sk); if (s->verify_mode != SSL_VERIFY_NONE && i <= 0) { al = ssl_verify_alarm_type(s->verify_result); SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE, SSL_R_CERTIFICATE_VERIFY_FAILED); goto f_err; } ERR_clear_error(); /* but we keep s->verify_result */ if (i > 1) { SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE, i); al = SSL_AD_HANDSHAKE_FAILURE; goto f_err; } sc = ssl_sess_cert_new(); if (sc == NULL) goto err; ssl_sess_cert_free(s->session->sess_cert); s->session->sess_cert = sc; sc->cert_chain = sk; /* * Inconsistency alert: cert_chain does include the peer's certificate, * which we don't include in s3_srvr.c */ x = sk_X509_value(sk, 0); sk = NULL; /* * VRS 19990621: possible memory leak; sk=null ==> !sk_pop_free() @end */ pkey = X509_get_pubkey(x); if (pkey == NULL || EVP_PKEY_missing_parameters(pkey)) { x = NULL; al = SSL3_AL_FATAL; SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE, SSL_R_UNABLE_TO_FIND_PUBLIC_KEY_PARAMETERS); goto f_err; } i = ssl_cert_type(x, pkey); if (i < 0) { x = NULL; al = SSL3_AL_FATAL; SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE, SSL_R_UNKNOWN_CERTIFICATE_TYPE); goto f_err; } exp_idx = ssl_cipher_get_cert_index(s->s3->tmp.new_cipher); if (exp_idx >= 0 && i != exp_idx) { x = NULL; al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE, SSL_R_WRONG_CERTIFICATE_TYPE); goto f_err; } sc->peer_cert_type = i; CRYPTO_add(&x->references, 1, CRYPTO_LOCK_X509); /* * Why would the following ever happen? We just created sc a couple * of lines ago. */ X509_free(sc->peer_pkeys[i].x509); sc->peer_pkeys[i].x509 = x; sc->peer_key = &(sc->peer_pkeys[i]); X509_free(s->session->peer); CRYPTO_add(&x->references, 1, CRYPTO_LOCK_X509); s->session->peer = x; s->session->verify_result = s->verify_result; x = NULL; ret = 1; goto done; f_err: ssl3_send_alert(s, SSL3_AL_FATAL, al); err: s->state = SSL_ST_ERR; done: EVP_PKEY_free(pkey); X509_free(x); sk_X509_pop_free(sk, X509_free); return (ret); } int ssl3_get_key_exchange(SSL *s) { #ifndef OPENSSL_NO_RSA unsigned char *q, md_buf[EVP_MAX_MD_SIZE * 2]; #endif EVP_MD_CTX md_ctx; unsigned char *param, *p; int al, j, ok; long i, param_len, n, alg_k, alg_a; EVP_PKEY *pkey = NULL; const EVP_MD *md = NULL; #ifndef OPENSSL_NO_RSA RSA *rsa = NULL; #endif #ifndef OPENSSL_NO_DH DH *dh = NULL; #endif #ifndef OPENSSL_NO_EC EC_KEY *ecdh = NULL; BN_CTX *bn_ctx = NULL; EC_POINT *srvr_ecpoint = NULL; int curve_nid = 0; int encoded_pt_len = 0; #endif EVP_MD_CTX_init(&md_ctx); /* * use same message size as in ssl3_get_certificate_request() as * ServerKeyExchange message may be skipped */ n = s->method->ssl_get_message(s, SSL3_ST_CR_KEY_EXCH_A, SSL3_ST_CR_KEY_EXCH_B, -1, s->max_cert_list, &ok); if (!ok) return ((int)n); alg_k = s->s3->tmp.new_cipher->algorithm_mkey; if (s->s3->tmp.message_type != SSL3_MT_SERVER_KEY_EXCHANGE) { /* * Can't skip server key exchange if this is an ephemeral * ciphersuite. */ if (alg_k & (SSL_kDHE | SSL_kECDHE)) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_UNEXPECTED_MESSAGE); al = SSL_AD_UNEXPECTED_MESSAGE; goto f_err; } #ifndef OPENSSL_NO_PSK /* * In plain PSK ciphersuite, ServerKeyExchange can be omitted if no * identity hint is sent. Set session->sess_cert anyway to avoid * problems later. */ if (alg_k & SSL_kPSK) { s->session->sess_cert = ssl_sess_cert_new(); OPENSSL_free(s->ctx->psk_identity_hint); s->ctx->psk_identity_hint = NULL; } #endif s->s3->tmp.reuse_message = 1; return (1); } param = p = (unsigned char *)s->init_msg; if (s->session->sess_cert != NULL) { #ifndef OPENSSL_NO_RSA RSA_free(s->session->sess_cert->peer_rsa_tmp); s->session->sess_cert->peer_rsa_tmp = NULL; #endif #ifndef OPENSSL_NO_DH DH_free(s->session->sess_cert->peer_dh_tmp); s->session->sess_cert->peer_dh_tmp = NULL; #endif #ifndef OPENSSL_NO_EC EC_KEY_free(s->session->sess_cert->peer_ecdh_tmp); s->session->sess_cert->peer_ecdh_tmp = NULL; #endif } else { s->session->sess_cert = ssl_sess_cert_new(); } /* Total length of the parameters including the length prefix */ param_len = 0; alg_a = s->s3->tmp.new_cipher->algorithm_auth; al = SSL_AD_DECODE_ERROR; #ifndef OPENSSL_NO_PSK if (alg_k & SSL_kPSK) { char tmp_id_hint[PSK_MAX_IDENTITY_LEN + 1]; param_len = 2; if (param_len > n) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_LENGTH_TOO_SHORT); goto f_err; } n2s(p, i); /* * Store PSK identity hint for later use, hint is used in * ssl3_send_client_key_exchange. Assume that the maximum length of * a PSK identity hint can be as long as the maximum length of a PSK * identity. */ if (i > PSK_MAX_IDENTITY_LEN) { al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_DATA_LENGTH_TOO_LONG); goto f_err; } if (i > n - param_len) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_BAD_PSK_IDENTITY_HINT_LENGTH); goto f_err; } param_len += i; /* * If received PSK identity hint contains NULL characters, the hint * is truncated from the first NULL. p may not be ending with NULL, * so create a NULL-terminated string. */ memcpy(tmp_id_hint, p, i); memset(tmp_id_hint + i, 0, PSK_MAX_IDENTITY_LEN + 1 - i); OPENSSL_free(s->ctx->psk_identity_hint); s->ctx->psk_identity_hint = BUF_strdup(tmp_id_hint); if (s->ctx->psk_identity_hint == NULL) { al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE); goto f_err; } p += i; n -= param_len; } else #endif /* !OPENSSL_NO_PSK */ #ifndef OPENSSL_NO_SRP if (alg_k & SSL_kSRP) { param_len = 2; if (param_len > n) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_LENGTH_TOO_SHORT); goto f_err; } n2s(p, i); if (i > n - param_len) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_BAD_SRP_N_LENGTH); goto f_err; } param_len += i; if ((s->srp_ctx.N = BN_bin2bn(p, i, NULL)) == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_BN_LIB); goto err; } p += i; if (2 > n - param_len) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_LENGTH_TOO_SHORT); goto f_err; } param_len += 2; n2s(p, i); if (i > n - param_len) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_BAD_SRP_G_LENGTH); goto f_err; } param_len += i; if ((s->srp_ctx.g = BN_bin2bn(p, i, NULL)) == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_BN_LIB); goto err; } p += i; if (1 > n - param_len) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_LENGTH_TOO_SHORT); goto f_err; } param_len += 1; i = (unsigned int)(p[0]); p++; if (i > n - param_len) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_BAD_SRP_S_LENGTH); goto f_err; } param_len += i; if ((s->srp_ctx.s = BN_bin2bn(p, i, NULL)) == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_BN_LIB); goto err; } p += i; if (2 > n - param_len) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_LENGTH_TOO_SHORT); goto f_err; } param_len += 2; n2s(p, i); if (i > n - param_len) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_BAD_SRP_B_LENGTH); goto f_err; } param_len += i; if ((s->srp_ctx.B = BN_bin2bn(p, i, NULL)) == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_BN_LIB); goto err; } p += i; n -= param_len; if (!srp_verify_server_param(s, &al)) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_BAD_SRP_PARAMETERS); goto f_err; } /* We must check if there is a certificate */ # ifndef OPENSSL_NO_RSA if (alg_a & SSL_aRSA) pkey = X509_get_pubkey(s->session-> sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509); # else if (0) ; # endif # ifndef OPENSSL_NO_DSA else if (alg_a & SSL_aDSS) pkey = X509_get_pubkey(s->session-> sess_cert->peer_pkeys[SSL_PKEY_DSA_SIGN]. x509); # endif } else #endif /* !OPENSSL_NO_SRP */ #ifndef OPENSSL_NO_RSA if (alg_k & SSL_kRSA) { /* Temporary RSA keys only allowed in export ciphersuites */ if (!SSL_C_IS_EXPORT(s->s3->tmp.new_cipher)) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_UNEXPECTED_MESSAGE); goto f_err; } if ((rsa = RSA_new()) == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE); goto err; } param_len = 2; if (param_len > n) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_LENGTH_TOO_SHORT); goto f_err; } n2s(p, i); if (i > n - param_len) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_BAD_RSA_MODULUS_LENGTH); goto f_err; } param_len += i; if ((rsa->n = BN_bin2bn(p, i, rsa->n)) == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_BN_LIB); goto err; } p += i; if (2 > n - param_len) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_LENGTH_TOO_SHORT); goto f_err; } param_len += 2; n2s(p, i); if (i > n - param_len) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_BAD_RSA_E_LENGTH); goto f_err; } param_len += i; if ((rsa->e = BN_bin2bn(p, i, rsa->e)) == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_BN_LIB); goto err; } p += i; n -= param_len; /* this should be because we are using an export cipher */ if (alg_a & SSL_aRSA) pkey = X509_get_pubkey(s->session-> sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509); else { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto err; } s->session->sess_cert->peer_rsa_tmp = rsa; rsa = NULL; } #else /* OPENSSL_NO_RSA */ if (0) ; #endif #ifndef OPENSSL_NO_DH else if (alg_k & SSL_kDHE) { if ((dh = DH_new()) == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_DH_LIB); goto err; } param_len = 2; if (param_len > n) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_LENGTH_TOO_SHORT); goto f_err; } n2s(p, i); if (i > n - param_len) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_BAD_DH_P_LENGTH); goto f_err; } param_len += i; if ((dh->p = BN_bin2bn(p, i, NULL)) == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_BN_LIB); goto err; } p += i; if (2 > n - param_len) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_LENGTH_TOO_SHORT); goto f_err; } param_len += 2; n2s(p, i); if (i > n - param_len) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_BAD_DH_G_LENGTH); goto f_err; } param_len += i; if ((dh->g = BN_bin2bn(p, i, NULL)) == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_BN_LIB); goto err; } p += i; if (2 > n - param_len) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_LENGTH_TOO_SHORT); goto f_err; } param_len += 2; n2s(p, i); if (i > n - param_len) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_BAD_DH_PUB_KEY_LENGTH); goto f_err; } param_len += i; if ((dh->pub_key = BN_bin2bn(p, i, NULL)) == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_BN_LIB); goto err; } p += i; n -= param_len; if (!ssl_security(s, SSL_SECOP_TMP_DH, DH_security_bits(dh), 0, dh)) { al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_DH_KEY_TOO_SMALL); goto f_err; } # ifndef OPENSSL_NO_RSA if (alg_a & SSL_aRSA) pkey = X509_get_pubkey(s->session-> sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509); # else if (0) ; # endif # ifndef OPENSSL_NO_DSA else if (alg_a & SSL_aDSS) pkey = X509_get_pubkey(s->session-> sess_cert->peer_pkeys[SSL_PKEY_DSA_SIGN]. x509); # endif /* else anonymous DH, so no certificate or pkey. */ s->session->sess_cert->peer_dh_tmp = dh; dh = NULL; } #endif /* !OPENSSL_NO_DH */ #ifndef OPENSSL_NO_EC else if (alg_k & SSL_kECDHE) { EC_GROUP *ngroup; const EC_GROUP *group; if ((ecdh = EC_KEY_new()) == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE); goto err; } /* * Extract elliptic curve parameters and the server's ephemeral ECDH * public key. Keep accumulating lengths of various components in * param_len and make sure it never exceeds n. */ /* * XXX: For now we only support named (not generic) curves and the * ECParameters in this case is just three bytes. We also need one * byte for the length of the encoded point */ param_len = 4; if (param_len > n) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_LENGTH_TOO_SHORT); goto f_err; } /* * Check curve is one of our preferences, if not server has sent an * invalid curve. ECParameters is 3 bytes. */ if (!tls1_check_curve(s, p, 3)) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_WRONG_CURVE); goto f_err; } if ((curve_nid = tls1_ec_curve_id2nid(*(p + 2))) == 0) { al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_UNABLE_TO_FIND_ECDH_PARAMETERS); goto f_err; } ngroup = EC_GROUP_new_by_curve_name(curve_nid); if (ngroup == NULL) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_EC_LIB); goto err; } if (EC_KEY_set_group(ecdh, ngroup) == 0) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_EC_LIB); goto err; } EC_GROUP_free(ngroup); group = EC_KEY_get0_group(ecdh); if (SSL_C_IS_EXPORT(s->s3->tmp.new_cipher) && (EC_GROUP_get_degree(group) > 163)) { al = SSL_AD_EXPORT_RESTRICTION; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_ECGROUP_TOO_LARGE_FOR_CIPHER); goto f_err; } p += 3; /* Next, get the encoded ECPoint */ if (((srvr_ecpoint = EC_POINT_new(group)) == NULL) || ((bn_ctx = BN_CTX_new()) == NULL)) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE); goto err; } encoded_pt_len = *p; /* length of encoded point */ p += 1; if ((encoded_pt_len > n - param_len) || (EC_POINT_oct2point(group, srvr_ecpoint, p, encoded_pt_len, bn_ctx) == 0)) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_BAD_ECPOINT); goto f_err; } param_len += encoded_pt_len; n -= param_len; p += encoded_pt_len; /* * The ECC/TLS specification does not mention the use of DSA to sign * ECParameters in the server key exchange message. We do support RSA * and ECDSA. */ if (0) ; # ifndef OPENSSL_NO_RSA else if (alg_a & SSL_aRSA) pkey = X509_get_pubkey(s->session-> sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509); # endif # ifndef OPENSSL_NO_EC else if (alg_a & SSL_aECDSA) pkey = X509_get_pubkey(s->session-> sess_cert->peer_pkeys[SSL_PKEY_ECC].x509); # endif /* else anonymous ECDH, so no certificate or pkey. */ EC_KEY_set_public_key(ecdh, srvr_ecpoint); s->session->sess_cert->peer_ecdh_tmp = ecdh; ecdh = NULL; BN_CTX_free(bn_ctx); bn_ctx = NULL; EC_POINT_free(srvr_ecpoint); srvr_ecpoint = NULL; } else if (alg_k) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_UNEXPECTED_MESSAGE); goto f_err; } #endif /* !OPENSSL_NO_EC */ /* p points to the next byte, there are 'n' bytes left */ /* if it was signed, check the signature */ if (pkey != NULL) { if (SSL_USE_SIGALGS(s)) { int rv; if (2 > n) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_LENGTH_TOO_SHORT); goto f_err; } rv = tls12_check_peer_sigalg(&md, s, p, pkey); if (rv == -1) goto err; else if (rv == 0) { goto f_err; } #ifdef SSL_DEBUG fprintf(stderr, "USING TLSv1.2 HASH %s\n", EVP_MD_name(md)); #endif p += 2; n -= 2; } else md = EVP_sha1(); if (2 > n) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_LENGTH_TOO_SHORT); goto f_err; } n2s(p, i); n -= 2; j = EVP_PKEY_size(pkey); /* * Check signature length. If n is 0 then signature is empty */ if ((i != n) || (n > j) || (n <= 0)) { /* wrong packet length */ SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_WRONG_SIGNATURE_LENGTH); goto f_err; } #ifndef OPENSSL_NO_RSA if (pkey->type == EVP_PKEY_RSA && !SSL_USE_SIGALGS(s)) { int num; unsigned int size; j = 0; q = md_buf; for (num = 2; num > 0; num--) { EVP_MD_CTX_set_flags(&md_ctx, EVP_MD_CTX_FLAG_NON_FIPS_ALLOW); EVP_DigestInit_ex(&md_ctx, (num == 2) ? s->ctx->md5 : s->ctx->sha1, NULL); EVP_DigestUpdate(&md_ctx, &(s->s3->client_random[0]), SSL3_RANDOM_SIZE); EVP_DigestUpdate(&md_ctx, &(s->s3->server_random[0]), SSL3_RANDOM_SIZE); EVP_DigestUpdate(&md_ctx, param, param_len); EVP_DigestFinal_ex(&md_ctx, q, &size); q += size; j += size; } i = RSA_verify(NID_md5_sha1, md_buf, j, p, n, pkey->pkey.rsa); if (i < 0) { al = SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_BAD_RSA_DECRYPT); goto f_err; } if (i == 0) { /* bad signature */ al = SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_BAD_SIGNATURE); goto f_err; } } else #endif { EVP_VerifyInit_ex(&md_ctx, md, NULL); EVP_VerifyUpdate(&md_ctx, &(s->s3->client_random[0]), SSL3_RANDOM_SIZE); EVP_VerifyUpdate(&md_ctx, &(s->s3->server_random[0]), SSL3_RANDOM_SIZE); EVP_VerifyUpdate(&md_ctx, param, param_len); if (EVP_VerifyFinal(&md_ctx, p, (int)n, pkey) <= 0) { /* bad signature */ al = SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_BAD_SIGNATURE); goto f_err; } } } else { /* aNULL, aSRP or kPSK do not need public keys */ if (!(alg_a & (SSL_aNULL | SSL_aSRP)) && !(alg_k & SSL_kPSK)) { /* Might be wrong key type, check it */ if (ssl3_check_cert_and_algorithm(s)) /* Otherwise this shouldn't happen */ SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto err; } /* still data left over */ if (n != 0) { SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, SSL_R_EXTRA_DATA_IN_MESSAGE); goto f_err; } } EVP_PKEY_free(pkey); EVP_MD_CTX_cleanup(&md_ctx); return (1); f_err: ssl3_send_alert(s, SSL3_AL_FATAL, al); err: EVP_PKEY_free(pkey); #ifndef OPENSSL_NO_RSA RSA_free(rsa); #endif #ifndef OPENSSL_NO_DH DH_free(dh); #endif #ifndef OPENSSL_NO_EC BN_CTX_free(bn_ctx); EC_POINT_free(srvr_ecpoint); EC_KEY_free(ecdh); #endif EVP_MD_CTX_cleanup(&md_ctx); s->state = SSL_ST_ERR; return (-1); } int ssl3_get_certificate_request(SSL *s) { int ok, ret = 0; unsigned long n, nc, l; unsigned int llen, ctype_num, i; X509_NAME *xn = NULL; const unsigned char *p, *q; unsigned char *d; STACK_OF(X509_NAME) *ca_sk = NULL; n = s->method->ssl_get_message(s, SSL3_ST_CR_CERT_REQ_A, SSL3_ST_CR_CERT_REQ_B, -1, s->max_cert_list, &ok); if (!ok) return ((int)n); s->s3->tmp.cert_req = 0; if (s->s3->tmp.message_type == SSL3_MT_SERVER_DONE) { s->s3->tmp.reuse_message = 1; /* * If we get here we don't need any cached handshake records as we * wont be doing client auth. */ if (s->s3->handshake_buffer) { if (!ssl3_digest_cached_records(s)) goto err; } return (1); } if (s->s3->tmp.message_type != SSL3_MT_CERTIFICATE_REQUEST) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST, SSL_R_WRONG_MESSAGE_TYPE); goto err; } /* TLS does not like anon-DH with client cert */ if (s->version > SSL3_VERSION) { if (s->s3->tmp.new_cipher->algorithm_auth & SSL_aNULL) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST, SSL_R_TLS_CLIENT_CERT_REQ_WITH_ANON_CIPHER); goto err; } } p = d = (unsigned char *)s->init_msg; if ((ca_sk = sk_X509_NAME_new(ca_dn_cmp)) == NULL) { SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST, ERR_R_MALLOC_FAILURE); goto err; } /* get the certificate types */ ctype_num = *(p++); OPENSSL_free(s->cert->ctypes); s->cert->ctypes = NULL; if (ctype_num > SSL3_CT_NUMBER) { /* If we exceed static buffer copy all to cert structure */ s->cert->ctypes = OPENSSL_malloc(ctype_num); if (s->cert->ctypes == NULL) { SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST, ERR_R_MALLOC_FAILURE); goto err; } memcpy(s->cert->ctypes, p, ctype_num); s->cert->ctype_num = (size_t)ctype_num; ctype_num = SSL3_CT_NUMBER; } for (i = 0; i < ctype_num; i++) s->s3->tmp.ctype[i] = p[i]; p += p[-1]; if (SSL_USE_SIGALGS(s)) { n2s(p, llen); /* * Check we have enough room for signature algorithms and following * length value. */ if ((unsigned long)(p - d + llen + 2) > n) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST, SSL_R_DATA_LENGTH_TOO_LONG); goto err; } /* Clear certificate digests and validity flags */ for (i = 0; i < SSL_PKEY_NUM; i++) { s->s3->tmp.md[i] = NULL; s->s3->tmp.valid_flags[i] = 0; } if ((llen & 1) || !tls1_save_sigalgs(s, p, llen)) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST, SSL_R_SIGNATURE_ALGORITHMS_ERROR); goto err; } if (!tls1_process_sigalgs(s)) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST, ERR_R_MALLOC_FAILURE); goto err; } p += llen; } /* get the CA RDNs */ n2s(p, llen); if ((unsigned long)(p - d + llen) != n) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST, SSL_R_LENGTH_MISMATCH); goto err; } for (nc = 0; nc < llen;) { n2s(p, l); if ((l + nc + 2) > llen) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST, SSL_R_CA_DN_TOO_LONG); goto err; } q = p; if ((xn = d2i_X509_NAME(NULL, &q, l)) == NULL) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST, ERR_R_ASN1_LIB); goto err; } if (q != (p + l)) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST, SSL_R_CA_DN_LENGTH_MISMATCH); goto err; } if (!sk_X509_NAME_push(ca_sk, xn)) { SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST, ERR_R_MALLOC_FAILURE); goto err; } p += l; nc += l + 2; } /* we should setup a certificate to return.... */ s->s3->tmp.cert_req = 1; s->s3->tmp.ctype_num = ctype_num; sk_X509_NAME_pop_free(s->s3->tmp.ca_names, X509_NAME_free); s->s3->tmp.ca_names = ca_sk; ca_sk = NULL; ret = 1; goto done; err: s->state = SSL_ST_ERR; done: sk_X509_NAME_pop_free(ca_sk, X509_NAME_free); return (ret); } static int ca_dn_cmp(const X509_NAME *const *a, const X509_NAME *const *b) { return (X509_NAME_cmp(*a, *b)); } int ssl3_get_new_session_ticket(SSL *s) { int ok, al, ret = 0, ticklen; long n; const unsigned char *p; unsigned char *d; n = s->method->ssl_get_message(s, SSL3_ST_CR_SESSION_TICKET_A, SSL3_ST_CR_SESSION_TICKET_B, SSL3_MT_NEWSESSION_TICKET, 16384, &ok); if (!ok) return ((int)n); if (n < 6) { /* need at least ticket_lifetime_hint + ticket length */ al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_NEW_SESSION_TICKET, SSL_R_LENGTH_MISMATCH); goto f_err; } p = d = (unsigned char *)s->init_msg; if (s->session->session_id_length > 0) { int i = s->session_ctx->session_cache_mode; SSL_SESSION *new_sess; /* * We reused an existing session, so we need to replace it with a new * one */ if (i & SSL_SESS_CACHE_CLIENT) { /* * Remove the old session from the cache */ if (i & SSL_SESS_CACHE_NO_INTERNAL_STORE) { if (s->session_ctx->remove_session_cb != NULL) s->session_ctx->remove_session_cb(s->session_ctx, s->session); } else { /* We carry on if this fails */ SSL_CTX_remove_session(s->session_ctx, s->session); } } if ((new_sess = ssl_session_dup(s->session, 0)) == 0) { al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_SSL3_GET_NEW_SESSION_TICKET, ERR_R_MALLOC_FAILURE); goto f_err; } SSL_SESSION_free(s->session); s->session = new_sess; } n2l(p, s->session->tlsext_tick_lifetime_hint); n2s(p, ticklen); /* ticket_lifetime_hint + ticket_length + ticket */ if (ticklen + 6 != n) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_NEW_SESSION_TICKET, SSL_R_LENGTH_MISMATCH); goto f_err; } OPENSSL_free(s->session->tlsext_tick); s->session->tlsext_ticklen = 0; s->session->tlsext_tick = OPENSSL_malloc(ticklen); if (!s->session->tlsext_tick) { SSLerr(SSL_F_SSL3_GET_NEW_SESSION_TICKET, ERR_R_MALLOC_FAILURE); goto err; } memcpy(s->session->tlsext_tick, p, ticklen); s->session->tlsext_ticklen = ticklen; /* * There are two ways to detect a resumed ticket session. One is to set * an appropriate session ID and then the server must return a match in * ServerHello. This allows the normal client session ID matching to work * and we know much earlier that the ticket has been accepted. The * other way is to set zero length session ID when the ticket is * presented and rely on the handshake to determine session resumption. * We choose the former approach because this fits in with assumptions * elsewhere in OpenSSL. The session ID is set to the SHA256 (or SHA1 is * SHA256 is disabled) hash of the ticket. */ EVP_Digest(p, ticklen, s->session->session_id, &s->session->session_id_length, EVP_sha256(), NULL); ret = 1; return (ret); f_err: ssl3_send_alert(s, SSL3_AL_FATAL, al); err: s->state = SSL_ST_ERR; return (-1); } int ssl3_get_cert_status(SSL *s) { int ok, al; unsigned long resplen, n; const unsigned char *p; n = s->method->ssl_get_message(s, SSL3_ST_CR_CERT_STATUS_A, SSL3_ST_CR_CERT_STATUS_B, SSL3_MT_CERTIFICATE_STATUS, 16384, &ok); if (!ok) return ((int)n); if (n < 4) { /* need at least status type + length */ al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_STATUS, SSL_R_LENGTH_MISMATCH); goto f_err; } p = (unsigned char *)s->init_msg; if (*p++ != TLSEXT_STATUSTYPE_ocsp) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_STATUS, SSL_R_UNSUPPORTED_STATUS_TYPE); goto f_err; } n2l3(p, resplen); if (resplen + 4 != n) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_STATUS, SSL_R_LENGTH_MISMATCH); goto f_err; } OPENSSL_free(s->tlsext_ocsp_resp); s->tlsext_ocsp_resp = BUF_memdup(p, resplen); if (!s->tlsext_ocsp_resp) { al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_STATUS, ERR_R_MALLOC_FAILURE); goto f_err; } s->tlsext_ocsp_resplen = resplen; if (s->ctx->tlsext_status_cb) { int ret; ret = s->ctx->tlsext_status_cb(s, s->ctx->tlsext_status_arg); if (ret == 0) { al = SSL_AD_BAD_CERTIFICATE_STATUS_RESPONSE; SSLerr(SSL_F_SSL3_GET_CERT_STATUS, SSL_R_INVALID_STATUS_RESPONSE); goto f_err; } if (ret < 0) { al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_STATUS, ERR_R_MALLOC_FAILURE); goto f_err; } } return 1; f_err: ssl3_send_alert(s, SSL3_AL_FATAL, al); s->state = SSL_ST_ERR; return (-1); } int ssl3_get_server_done(SSL *s) { int ok, ret = 0; long n; /* Second to last param should be very small, like 0 :-) */ n = s->method->ssl_get_message(s, SSL3_ST_CR_SRVR_DONE_A, SSL3_ST_CR_SRVR_DONE_B, SSL3_MT_SERVER_DONE, 30, &ok); if (!ok) return ((int)n); if (n > 0) { /* should contain no data */ ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); SSLerr(SSL_F_SSL3_GET_SERVER_DONE, SSL_R_LENGTH_MISMATCH); s->state = SSL_ST_ERR; return -1; } ret = 1; return (ret); } int ssl3_send_client_key_exchange(SSL *s) { unsigned char *p; int n; unsigned long alg_k; #ifndef OPENSSL_NO_RSA unsigned char *q; EVP_PKEY *pkey = NULL; #endif #ifndef OPENSSL_NO_EC EC_KEY *clnt_ecdh = NULL; const EC_POINT *srvr_ecpoint = NULL; EVP_PKEY *srvr_pub_pkey = NULL; unsigned char *encodedPoint = NULL; int encoded_pt_len = 0; BN_CTX *bn_ctx = NULL; #endif unsigned char *pms = NULL; size_t pmslen = 0; if (s->state == SSL3_ST_CW_KEY_EXCH_A) { p = ssl_handshake_start(s); alg_k = s->s3->tmp.new_cipher->algorithm_mkey; /* Fool emacs indentation */ if (0) { } #ifndef OPENSSL_NO_RSA else if (alg_k & SSL_kRSA) { RSA *rsa; pmslen = SSL_MAX_MASTER_KEY_LENGTH; pms = OPENSSL_malloc(pmslen); if (!pms) goto memerr; if (s->session->sess_cert == NULL) { /* * We should always have a server certificate with SSL_kRSA. */ SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto err; } if (s->session->sess_cert->peer_rsa_tmp != NULL) rsa = s->session->sess_cert->peer_rsa_tmp; else { pkey = X509_get_pubkey(s->session-> sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC]. x509); if ((pkey == NULL) || (pkey->type != EVP_PKEY_RSA) || (pkey->pkey.rsa == NULL)) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto err; } rsa = pkey->pkey.rsa; EVP_PKEY_free(pkey); } pms[0] = s->client_version >> 8; pms[1] = s->client_version & 0xff; if (RAND_bytes(pms + 2, pmslen - 2) <= 0) goto err; q = p; /* Fix buf for TLS and beyond */ if (s->version > SSL3_VERSION) p += 2; n = RSA_public_encrypt(pmslen, pms, p, rsa, RSA_PKCS1_PADDING); # ifdef PKCS1_CHECK if (s->options & SSL_OP_PKCS1_CHECK_1) p[1]++; if (s->options & SSL_OP_PKCS1_CHECK_2) tmp_buf[0] = 0x70; # endif if (n <= 0) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, SSL_R_BAD_RSA_ENCRYPT); goto err; } /* Fix buf for TLS and beyond */ if (s->version > SSL3_VERSION) { s2n(n, q); n += 2; } } #endif #ifndef OPENSSL_NO_DH else if (alg_k & (SSL_kDHE | SSL_kDHr | SSL_kDHd)) { DH *dh_srvr, *dh_clnt; SESS_CERT *scert = s->session->sess_cert; if (scert == NULL) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, SSL_R_UNEXPECTED_MESSAGE); goto err; } if (scert->peer_dh_tmp != NULL) dh_srvr = scert->peer_dh_tmp; else { /* we get them from the cert */ int idx = scert->peer_cert_type; EVP_PKEY *spkey = NULL; dh_srvr = NULL; if (idx >= 0) spkey = X509_get_pubkey(scert->peer_pkeys[idx].x509); if (spkey) { dh_srvr = EVP_PKEY_get1_DH(spkey); EVP_PKEY_free(spkey); } if (dh_srvr == NULL) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto err; } } if (s->s3->flags & TLS1_FLAGS_SKIP_CERT_VERIFY) { /* Use client certificate key */ EVP_PKEY *clkey = s->cert->key->privatekey; dh_clnt = NULL; if (clkey) dh_clnt = EVP_PKEY_get1_DH(clkey); if (dh_clnt == NULL) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto err; } } else { /* generate a new random key */ if ((dh_clnt = DHparams_dup(dh_srvr)) == NULL) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_DH_LIB); goto err; } if (!DH_generate_key(dh_clnt)) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_DH_LIB); DH_free(dh_clnt); goto err; } } pmslen = DH_size(dh_clnt); pms = OPENSSL_malloc(pmslen); if (!pms) goto memerr; /* * use the 'p' output buffer for the DH key, but make sure to * clear it out afterwards */ n = DH_compute_key(pms, dh_srvr->pub_key, dh_clnt); if (scert->peer_dh_tmp == NULL) DH_free(dh_srvr); if (n <= 0) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_DH_LIB); DH_free(dh_clnt); goto err; } pmslen = n; if (s->s3->flags & TLS1_FLAGS_SKIP_CERT_VERIFY) n = 0; else { /* send off the data */ n = BN_num_bytes(dh_clnt->pub_key); s2n(n, p); BN_bn2bin(dh_clnt->pub_key, p); n += 2; } DH_free(dh_clnt); /* perhaps clean things up a bit EAY EAY EAY EAY */ } #endif #ifndef OPENSSL_NO_EC else if (alg_k & (SSL_kECDHE | SSL_kECDHr | SSL_kECDHe)) { const EC_GROUP *srvr_group = NULL; EC_KEY *tkey; int ecdh_clnt_cert = 0; int field_size = 0; if (s->session->sess_cert == NULL) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, SSL_R_UNEXPECTED_MESSAGE); goto err; } /* * Did we send out the client's ECDH share for use in premaster * computation as part of client certificate? If so, set * ecdh_clnt_cert to 1. */ if ((alg_k & (SSL_kECDHr | SSL_kECDHe)) && (s->cert != NULL)) { /*- * XXX: For now, we do not support client * authentication using ECDH certificates. * To add such support, one needs to add * code that checks for appropriate * conditions and sets ecdh_clnt_cert to 1. * For example, the cert have an ECC * key on the same curve as the server's * and the key should be authorized for * key agreement. * * One also needs to add code in ssl3_connect * to skip sending the certificate verify * message. * * if ((s->cert->key->privatekey != NULL) && * (s->cert->key->privatekey->type == * EVP_PKEY_EC) && ...) * ecdh_clnt_cert = 1; */ } if (s->session->sess_cert->peer_ecdh_tmp != NULL) { tkey = s->session->sess_cert->peer_ecdh_tmp; } else { /* Get the Server Public Key from Cert */ srvr_pub_pkey = X509_get_pubkey(s->session-> sess_cert->peer_pkeys[SSL_PKEY_ECC].x509); if ((srvr_pub_pkey == NULL) || (srvr_pub_pkey->type != EVP_PKEY_EC) || (srvr_pub_pkey->pkey.ec == NULL)) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto err; } tkey = srvr_pub_pkey->pkey.ec; } srvr_group = EC_KEY_get0_group(tkey); srvr_ecpoint = EC_KEY_get0_public_key(tkey); if ((srvr_group == NULL) || (srvr_ecpoint == NULL)) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto err; } if ((clnt_ecdh = EC_KEY_new()) == NULL) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE); goto err; } if (!EC_KEY_set_group(clnt_ecdh, srvr_group)) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_EC_LIB); goto err; } if (ecdh_clnt_cert) { /* * Reuse key info from our certificate We only need our * private key to perform the ECDH computation. */ const BIGNUM *priv_key; tkey = s->cert->key->privatekey->pkey.ec; priv_key = EC_KEY_get0_private_key(tkey); if (priv_key == NULL) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE); goto err; } if (!EC_KEY_set_private_key(clnt_ecdh, priv_key)) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_EC_LIB); goto err; } } else { /* Generate a new ECDH key pair */ if (!(EC_KEY_generate_key(clnt_ecdh))) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_ECDH_LIB); goto err; } } /* * use the 'p' output buffer for the ECDH key, but make sure to * clear it out afterwards */ field_size = EC_GROUP_get_degree(srvr_group); if (field_size <= 0) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_ECDH_LIB); goto err; } pmslen = (field_size + 7) / 8; pms = OPENSSL_malloc(pmslen); if (!pms) goto memerr; n = ECDH_compute_key(pms, pmslen, srvr_ecpoint, clnt_ecdh, NULL); if (n <= 0 || pmslen != (size_t)n) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_ECDH_LIB); goto err; } if (ecdh_clnt_cert) { /* Send empty client key exch message */ n = 0; } else { /* * First check the size of encoding and allocate memory * accordingly. */ encoded_pt_len = EC_POINT_point2oct(srvr_group, EC_KEY_get0_public_key(clnt_ecdh), POINT_CONVERSION_UNCOMPRESSED, NULL, 0, NULL); encodedPoint = (unsigned char *) OPENSSL_malloc(encoded_pt_len * sizeof(unsigned char)); bn_ctx = BN_CTX_new(); if ((encodedPoint == NULL) || (bn_ctx == NULL)) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE); goto err; } /* Encode the public key */ n = EC_POINT_point2oct(srvr_group, EC_KEY_get0_public_key(clnt_ecdh), POINT_CONVERSION_UNCOMPRESSED, encodedPoint, encoded_pt_len, bn_ctx); *p = n; /* length of encoded point */ /* Encoded point will be copied here */ p += 1; /* copy the point */ memcpy(p, encodedPoint, n); /* increment n to account for length field */ n += 1; } /* Free allocated memory */ BN_CTX_free(bn_ctx); OPENSSL_free(encodedPoint); EC_KEY_free(clnt_ecdh); EVP_PKEY_free(srvr_pub_pkey); } #endif /* !OPENSSL_NO_EC */ else if (alg_k & SSL_kGOST) { /* GOST key exchange message creation */ EVP_PKEY_CTX *pkey_ctx; X509 *peer_cert; size_t msglen; unsigned int md_len; int keytype; unsigned char shared_ukm[32], tmp[256]; EVP_MD_CTX *ukm_hash; EVP_PKEY *pub_key; pmslen = 32; pms = OPENSSL_malloc(pmslen); if (!pms) goto memerr; /* * Get server sertificate PKEY and create ctx from it */ peer_cert = s->session-> sess_cert->peer_pkeys[(keytype = SSL_PKEY_GOST01)].x509; if (!peer_cert) peer_cert = s->session-> sess_cert->peer_pkeys[(keytype = SSL_PKEY_GOST94)].x509; if (!peer_cert) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, SSL_R_NO_GOST_CERTIFICATE_SENT_BY_PEER); goto err; } pkey_ctx = EVP_PKEY_CTX_new(pub_key = X509_get_pubkey(peer_cert), NULL); /* * If we have send a certificate, and certificate key * * * parameters match those of server certificate, use * certificate key for key exchange */ /* Otherwise, generate ephemeral key pair */ EVP_PKEY_encrypt_init(pkey_ctx); /* Generate session key */ if (RAND_bytes(pms, pmslen) <= 0) { EVP_PKEY_CTX_free(pkey_ctx); SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto err; }; /* * If we have client certificate, use its secret as peer key */ if (s->s3->tmp.cert_req && s->cert->key->privatekey) { if (EVP_PKEY_derive_set_peer (pkey_ctx, s->cert->key->privatekey) <= 0) { /* * If there was an error - just ignore it. Ephemeral key * * would be used */ ERR_clear_error(); } } /* * Compute shared IV and store it in algorithm-specific context * data */ ukm_hash = EVP_MD_CTX_create(); EVP_DigestInit(ukm_hash, EVP_get_digestbynid(NID_id_GostR3411_94)); EVP_DigestUpdate(ukm_hash, s->s3->client_random, SSL3_RANDOM_SIZE); EVP_DigestUpdate(ukm_hash, s->s3->server_random, SSL3_RANDOM_SIZE); EVP_DigestFinal_ex(ukm_hash, shared_ukm, &md_len); EVP_MD_CTX_destroy(ukm_hash); if (EVP_PKEY_CTX_ctrl (pkey_ctx, -1, EVP_PKEY_OP_ENCRYPT, EVP_PKEY_CTRL_SET_IV, 8, shared_ukm) < 0) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, SSL_R_LIBRARY_BUG); goto err; } /* Make GOST keytransport blob message */ /* * Encapsulate it into sequence */ *(p++) = V_ASN1_SEQUENCE | V_ASN1_CONSTRUCTED; msglen = 255; if (EVP_PKEY_encrypt(pkey_ctx, tmp, &msglen, pms, pmslen) < 0) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, SSL_R_LIBRARY_BUG); goto err; } if (msglen >= 0x80) { *(p++) = 0x81; *(p++) = msglen & 0xff; n = msglen + 3; } else { *(p++) = msglen & 0xff; n = msglen + 2; } memcpy(p, tmp, msglen); /* Check if pubkey from client certificate was used */ if (EVP_PKEY_CTX_ctrl (pkey_ctx, -1, -1, EVP_PKEY_CTRL_PEER_KEY, 2, NULL) > 0) { /* Set flag "skip certificate verify" */ s->s3->flags |= TLS1_FLAGS_SKIP_CERT_VERIFY; } EVP_PKEY_CTX_free(pkey_ctx); EVP_PKEY_free(pub_key); } #ifndef OPENSSL_NO_SRP else if (alg_k & SSL_kSRP) { if (s->srp_ctx.A != NULL) { /* send off the data */ n = BN_num_bytes(s->srp_ctx.A); s2n(n, p); BN_bn2bin(s->srp_ctx.A, p); n += 2; } else { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto err; } OPENSSL_free(s->session->srp_username); s->session->srp_username = BUF_strdup(s->srp_ctx.login); if (s->session->srp_username == NULL) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE); goto err; } } #endif #ifndef OPENSSL_NO_PSK else if (alg_k & SSL_kPSK) { /* * The callback needs PSK_MAX_IDENTITY_LEN + 1 bytes to return a * \0-terminated identity. The last byte is for us for simulating * strnlen. */ char identity[PSK_MAX_IDENTITY_LEN + 2]; size_t identity_len; unsigned char *t = NULL; unsigned int psk_len = 0; int psk_err = 1; n = 0; if (s->psk_client_callback == NULL) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, SSL_R_PSK_NO_CLIENT_CB); goto err; } memset(identity, 0, sizeof(identity)); /* Allocate maximum size buffer */ pmslen = PSK_MAX_PSK_LEN * 2 + 4; pms = OPENSSL_malloc(pmslen); if (!pms) goto memerr; psk_len = s->psk_client_callback(s, s->ctx->psk_identity_hint, identity, sizeof(identity) - 1, pms, pmslen); if (psk_len > PSK_MAX_PSK_LEN) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto psk_err; } else if (psk_len == 0) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, SSL_R_PSK_IDENTITY_NOT_FOUND); goto psk_err; } /* Change pmslen to real length */ pmslen = 2 + psk_len + 2 + psk_len; identity[PSK_MAX_IDENTITY_LEN + 1] = '\0'; identity_len = strlen(identity); if (identity_len > PSK_MAX_IDENTITY_LEN) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto psk_err; } /* create PSK pre_master_secret */ t = pms; memmove(pms + psk_len + 4, pms, psk_len); s2n(psk_len, t); memset(t, 0, psk_len); t += psk_len; s2n(psk_len, t); OPENSSL_free(s->session->psk_identity_hint); s->session->psk_identity_hint = BUF_strdup(s->ctx->psk_identity_hint); if (s->ctx->psk_identity_hint != NULL && s->session->psk_identity_hint == NULL) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE); goto psk_err; } OPENSSL_free(s->session->psk_identity); s->session->psk_identity = BUF_strdup(identity); if (s->session->psk_identity == NULL) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE); goto psk_err; } s2n(identity_len, p); memcpy(p, identity, identity_len); n = 2 + identity_len; psk_err = 0; psk_err: OPENSSL_cleanse(identity, sizeof(identity)); if (psk_err != 0) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); goto err; } } #endif else { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto err; } if (!ssl_set_handshake_header(s, SSL3_MT_CLIENT_KEY_EXCHANGE, n)) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto err; } s->state = SSL3_ST_CW_KEY_EXCH_B; } /* SSL3_ST_CW_KEY_EXCH_B */ n = ssl_do_write(s); #ifndef OPENSSL_NO_SRP /* Check for SRP */ if (s->s3->tmp.new_cipher->algorithm_mkey & SSL_kSRP) { /* * If everything written generate master key: no need to save PMS as * SRP_generate_client_master_secret generates it internally. */ if (n > 0) { if ((s->session->master_key_length = SRP_generate_client_master_secret(s, s->session->master_key)) < 0) { SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto err; } } } else #endif /* If we haven't written everything save PMS */ if (n <= 0) { s->s3->tmp.pms = pms; s->s3->tmp.pmslen = pmslen; } else { /* If we don't have a PMS restore */ if (pms == NULL) { pms = s->s3->tmp.pms; pmslen = s->s3->tmp.pmslen; } if (pms == NULL) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE); goto err; } s->session->master_key_length = s->method->ssl3_enc->generate_master_secret(s, s-> session->master_key, pms, pmslen); OPENSSL_clear_free(pms, pmslen); s->s3->tmp.pms = NULL; if (s->session->master_key_length < 0) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR); goto err; } } return n; memerr: ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE); err: OPENSSL_clear_free(pms, pmslen); s->s3->tmp.pms = NULL; #ifndef OPENSSL_NO_EC BN_CTX_free(bn_ctx); OPENSSL_free(encodedPoint); EC_KEY_free(clnt_ecdh); EVP_PKEY_free(srvr_pub_pkey); #endif s->state = SSL_ST_ERR; return (-1); } int ssl3_send_client_verify(SSL *s) { unsigned char *p; unsigned char data[MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH]; EVP_PKEY *pkey; EVP_PKEY_CTX *pctx = NULL; EVP_MD_CTX mctx; unsigned u = 0; unsigned long n; int j; EVP_MD_CTX_init(&mctx); if (s->state == SSL3_ST_CW_CERT_VRFY_A) { p = ssl_handshake_start(s); pkey = s->cert->key->privatekey; /* Create context from key and test if sha1 is allowed as digest */ pctx = EVP_PKEY_CTX_new(pkey, NULL); EVP_PKEY_sign_init(pctx); if (EVP_PKEY_CTX_set_signature_md(pctx, EVP_sha1()) > 0) { if (!SSL_USE_SIGALGS(s)) s->method->ssl3_enc->cert_verify_mac(s, NID_sha1, &(data [MD5_DIGEST_LENGTH])); } else { ERR_clear_error(); } /* * For TLS v1.2 send signature algorithm and signature using agreed * digest and cached handshake records. */ if (SSL_USE_SIGALGS(s)) { long hdatalen = 0; void *hdata; const EVP_MD *md = s->s3->tmp.md[s->cert->key - s->cert->pkeys]; hdatalen = BIO_get_mem_data(s->s3->handshake_buffer, &hdata); if (hdatalen <= 0 || !tls12_get_sigandhash(p, pkey, md)) { SSLerr(SSL_F_SSL3_SEND_CLIENT_VERIFY, ERR_R_INTERNAL_ERROR); goto err; } p += 2; #ifdef SSL_DEBUG fprintf(stderr, "Using TLS 1.2 with client alg %s\n", EVP_MD_name(md)); #endif if (!EVP_SignInit_ex(&mctx, md, NULL) || !EVP_SignUpdate(&mctx, hdata, hdatalen) || !EVP_SignFinal(&mctx, p + 2, &u, pkey)) { SSLerr(SSL_F_SSL3_SEND_CLIENT_VERIFY, ERR_R_EVP_LIB); goto err; } s2n(u, p); n = u + 4; /* * For extended master secret we've already digested cached * records. */ if (s->session->flags & SSL_SESS_FLAG_EXTMS) { BIO_free(s->s3->handshake_buffer); s->s3->handshake_buffer = NULL; s->s3->flags &= ~TLS1_FLAGS_KEEP_HANDSHAKE; } else if (!ssl3_digest_cached_records(s)) goto err; } else #ifndef OPENSSL_NO_RSA if (pkey->type == EVP_PKEY_RSA) { s->method->ssl3_enc->cert_verify_mac(s, NID_md5, &(data[0])); if (RSA_sign(NID_md5_sha1, data, MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH, &(p[2]), &u, pkey->pkey.rsa) <= 0) { SSLerr(SSL_F_SSL3_SEND_CLIENT_VERIFY, ERR_R_RSA_LIB); goto err; } s2n(u, p); n = u + 2; } else #endif #ifndef OPENSSL_NO_DSA if (pkey->type == EVP_PKEY_DSA) { if (!DSA_sign(pkey->save_type, &(data[MD5_DIGEST_LENGTH]), SHA_DIGEST_LENGTH, &(p[2]), (unsigned int *)&j, pkey->pkey.dsa)) { SSLerr(SSL_F_SSL3_SEND_CLIENT_VERIFY, ERR_R_DSA_LIB); goto err; } s2n(j, p); n = j + 2; } else #endif #ifndef OPENSSL_NO_EC if (pkey->type == EVP_PKEY_EC) { if (!ECDSA_sign(pkey->save_type, &(data[MD5_DIGEST_LENGTH]), SHA_DIGEST_LENGTH, &(p[2]), (unsigned int *)&j, pkey->pkey.ec)) { SSLerr(SSL_F_SSL3_SEND_CLIENT_VERIFY, ERR_R_ECDSA_LIB); goto err; } s2n(j, p); n = j + 2; } else #endif if (pkey->type == NID_id_GostR3410_94 || pkey->type == NID_id_GostR3410_2001) { unsigned char signbuf[64]; int i; size_t sigsize = 64; s->method->ssl3_enc->cert_verify_mac(s, NID_id_GostR3411_94, data); if (EVP_PKEY_sign(pctx, signbuf, &sigsize, data, 32) <= 0) { SSLerr(SSL_F_SSL3_SEND_CLIENT_VERIFY, ERR_R_INTERNAL_ERROR); goto err; } for (i = 63, j = 0; i >= 0; j++, i--) { p[2 + j] = signbuf[i]; } s2n(j, p); n = j + 2; } else { SSLerr(SSL_F_SSL3_SEND_CLIENT_VERIFY, ERR_R_INTERNAL_ERROR); goto err; } if (!ssl_set_handshake_header(s, SSL3_MT_CERTIFICATE_VERIFY, n)) { SSLerr(SSL_F_SSL3_SEND_CLIENT_VERIFY, ERR_R_INTERNAL_ERROR); goto err; } s->state = SSL3_ST_CW_CERT_VRFY_B; } EVP_MD_CTX_cleanup(&mctx); EVP_PKEY_CTX_free(pctx); return ssl_do_write(s); err: EVP_MD_CTX_cleanup(&mctx); EVP_PKEY_CTX_free(pctx); s->state = SSL_ST_ERR; return (-1); } /* * Check a certificate can be used for client authentication. Currently check * cert exists, if we have a suitable digest for TLS 1.2 if static DH client * certificates can be used and optionally checks suitability for Suite B. */ static int ssl3_check_client_certificate(SSL *s) { unsigned long alg_k; if (!s->cert || !s->cert->key->x509 || !s->cert->key->privatekey) return 0; /* If no suitable signature algorithm can't use certificate */ if (SSL_USE_SIGALGS(s) && !s->s3->tmp.md[s->cert->key - s->cert->pkeys]) return 0; /* * If strict mode check suitability of chain before using it. This also * adjusts suite B digest if necessary. */ if (s->cert->cert_flags & SSL_CERT_FLAGS_CHECK_TLS_STRICT && !tls1_check_chain(s, NULL, NULL, NULL, -2)) return 0; alg_k = s->s3->tmp.new_cipher->algorithm_mkey; /* See if we can use client certificate for fixed DH */ if (alg_k & (SSL_kDHr | SSL_kDHd)) { SESS_CERT *scert = s->session->sess_cert; int i = scert->peer_cert_type; EVP_PKEY *clkey = NULL, *spkey = NULL; clkey = s->cert->key->privatekey; /* If client key not DH assume it can be used */ if (EVP_PKEY_id(clkey) != EVP_PKEY_DH) return 1; if (i >= 0) spkey = X509_get_pubkey(scert->peer_pkeys[i].x509); if (spkey) { /* Compare server and client parameters */ i = EVP_PKEY_cmp_parameters(clkey, spkey); EVP_PKEY_free(spkey); if (i != 1) return 0; } s->s3->flags |= TLS1_FLAGS_SKIP_CERT_VERIFY; } return 1; } int ssl3_send_client_certificate(SSL *s) { X509 *x509 = NULL; EVP_PKEY *pkey = NULL; int i; if (s->state == SSL3_ST_CW_CERT_A) { /* Let cert callback update client certificates if required */ if (s->cert->cert_cb) { i = s->cert->cert_cb(s, s->cert->cert_cb_arg); if (i < 0) { s->rwstate = SSL_X509_LOOKUP; return -1; } if (i == 0) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); s->state = SSL_ST_ERR; return 0; } s->rwstate = SSL_NOTHING; } if (ssl3_check_client_certificate(s)) s->state = SSL3_ST_CW_CERT_C; else s->state = SSL3_ST_CW_CERT_B; } /* We need to get a client cert */ if (s->state == SSL3_ST_CW_CERT_B) { /* * If we get an error, we need to ssl->rwstate=SSL_X509_LOOKUP; * return(-1); We then get retied later */ i = 0; i = ssl_do_client_cert_cb(s, &x509, &pkey); if (i < 0) { s->rwstate = SSL_X509_LOOKUP; return (-1); } s->rwstate = SSL_NOTHING; if ((i == 1) && (pkey != NULL) && (x509 != NULL)) { s->state = SSL3_ST_CW_CERT_B; if (!SSL_use_certificate(s, x509) || !SSL_use_PrivateKey(s, pkey)) i = 0; } else if (i == 1) { i = 0; SSLerr(SSL_F_SSL3_SEND_CLIENT_CERTIFICATE, SSL_R_BAD_DATA_RETURNED_BY_CALLBACK); } X509_free(x509); EVP_PKEY_free(pkey); if (i && !ssl3_check_client_certificate(s)) i = 0; if (i == 0) { if (s->version == SSL3_VERSION) { s->s3->tmp.cert_req = 0; ssl3_send_alert(s, SSL3_AL_WARNING, SSL_AD_NO_CERTIFICATE); return (1); } else { s->s3->tmp.cert_req = 2; if (s->s3->handshake_buffer && !ssl3_digest_cached_records(s)) { ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); s->state = SSL_ST_ERR; return 0; } } } /* Ok, we have a cert */ s->state = SSL3_ST_CW_CERT_C; } if (s->state == SSL3_ST_CW_CERT_C) { s->state = SSL3_ST_CW_CERT_D; if (!ssl3_output_cert_chain(s, (s->s3->tmp.cert_req == 2) ? NULL : s->cert->key)) { SSLerr(SSL_F_SSL3_SEND_CLIENT_CERTIFICATE, ERR_R_INTERNAL_ERROR); ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); s->state = SSL_ST_ERR; return 0; } } /* SSL3_ST_CW_CERT_D */ return ssl_do_write(s); } #define has_bits(i,m) (((i)&(m)) == (m)) int ssl3_check_cert_and_algorithm(SSL *s) { int i, idx; long alg_k, alg_a; EVP_PKEY *pkey = NULL; int pkey_bits; SESS_CERT *sc; #ifndef OPENSSL_NO_RSA RSA *rsa; #endif #ifndef OPENSSL_NO_DH DH *dh; #endif alg_k = s->s3->tmp.new_cipher->algorithm_mkey; alg_a = s->s3->tmp.new_cipher->algorithm_auth; /* we don't have a certificate */ if ((alg_a & SSL_aNULL) || (alg_k & SSL_kPSK)) return (1); sc = s->session->sess_cert; if (sc == NULL) { SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM, ERR_R_INTERNAL_ERROR); goto err; } #ifndef OPENSSL_NO_RSA rsa = s->session->sess_cert->peer_rsa_tmp; #endif #ifndef OPENSSL_NO_DH dh = s->session->sess_cert->peer_dh_tmp; #endif /* This is the passed certificate */ idx = sc->peer_cert_type; #ifndef OPENSSL_NO_EC if (idx == SSL_PKEY_ECC) { if (ssl_check_srvr_ecc_cert_and_alg(sc->peer_pkeys[idx].x509, s) == 0) { /* check failed */ SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM, SSL_R_BAD_ECC_CERT); goto f_err; } else { return 1; } } else if (alg_a & SSL_aECDSA) { SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM, SSL_R_MISSING_ECDSA_SIGNING_CERT); goto f_err; } else if (alg_k & (SSL_kECDHr | SSL_kECDHe)) { SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM, SSL_R_MISSING_ECDH_CERT); goto f_err; } #endif pkey = X509_get_pubkey(sc->peer_pkeys[idx].x509); pkey_bits = EVP_PKEY_bits(pkey); i = X509_certificate_type(sc->peer_pkeys[idx].x509, pkey); EVP_PKEY_free(pkey); /* Check that we have a certificate if we require one */ if ((alg_a & SSL_aRSA) && !has_bits(i, EVP_PK_RSA | EVP_PKT_SIGN)) { SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM, SSL_R_MISSING_RSA_SIGNING_CERT); goto f_err; } #ifndef OPENSSL_NO_DSA else if ((alg_a & SSL_aDSS) && !has_bits(i, EVP_PK_DSA | EVP_PKT_SIGN)) { SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM, SSL_R_MISSING_DSA_SIGNING_CERT); goto f_err; } #endif #ifndef OPENSSL_NO_RSA if ((alg_k & SSL_kRSA) && !(has_bits(i, EVP_PK_RSA | EVP_PKT_ENC) || (rsa != NULL))) { SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM, SSL_R_MISSING_RSA_ENCRYPTING_CERT); goto f_err; } #endif #ifndef OPENSSL_NO_DH if ((alg_k & SSL_kDHE) && !(has_bits(i, EVP_PK_DH | EVP_PKT_EXCH) || (dh != NULL))) { SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM, SSL_R_MISSING_DH_KEY); goto f_err; } else if ((alg_k & SSL_kDHr) && !SSL_USE_SIGALGS(s) && !has_bits(i, EVP_PK_DH | EVP_PKS_RSA)) { SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM, SSL_R_MISSING_DH_RSA_CERT); goto f_err; } # ifndef OPENSSL_NO_DSA else if ((alg_k & SSL_kDHd) && !SSL_USE_SIGALGS(s) && !has_bits(i, EVP_PK_DH | EVP_PKS_DSA)) { SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM, SSL_R_MISSING_DH_DSA_CERT); goto f_err; } # endif #endif if (SSL_C_IS_EXPORT(s->s3->tmp.new_cipher) && pkey_bits > SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher)) { #ifndef OPENSSL_NO_RSA if (alg_k & SSL_kRSA) { if (rsa == NULL || RSA_size(rsa) * 8 > SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher)) { SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM, SSL_R_MISSING_EXPORT_TMP_RSA_KEY); goto f_err; } } else #endif #ifndef OPENSSL_NO_DH if (alg_k & (SSL_kDHE | SSL_kDHr | SSL_kDHd)) { if (dh == NULL || DH_size(dh) * 8 > SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher)) { SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM, SSL_R_MISSING_EXPORT_TMP_DH_KEY); goto f_err; } } else #endif { SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM, SSL_R_UNKNOWN_KEY_EXCHANGE_TYPE); goto f_err; } } return (1); f_err: ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); err: return (0); } /* * Normally, we can tell if the server is resuming the session from * the session ID. EAP-FAST (RFC 4851), however, relies on the next server * message after the ServerHello to determine if the server is resuming. * Therefore, we allow EAP-FAST to peek ahead. * ssl3_check_finished returns 1 if we are resuming from an external * pre-shared secret, we have a "ticket" and the next server handshake message * is Finished; and 0 otherwise. It returns -1 upon an error. */ static int ssl3_check_finished(SSL *s) { int ok = 0; if (s->version < TLS1_VERSION || !s->tls_session_secret_cb || !s->session->tlsext_tick) return 0; /* Need to permit this temporarily, in case the next message is Finished. */ s->s3->flags |= SSL3_FLAGS_CCS_OK; /* * This function is called when we might get a Certificate message instead, * so permit appropriate message length. * We ignore the return value as we're only interested in the message type * and not its length. */ s->method->ssl_get_message(s, SSL3_ST_CR_CERT_A, SSL3_ST_CR_CERT_B, -1, s->max_cert_list, &ok); s->s3->flags &= ~SSL3_FLAGS_CCS_OK; if (!ok) return -1; s->s3->tmp.reuse_message = 1; if (s->s3->tmp.message_type == SSL3_MT_FINISHED) return 1; /* If we're not done, then the CCS arrived early and we should bail. */ if (s->s3->change_cipher_spec) { SSLerr(SSL_F_SSL3_CHECK_FINISHED, SSL_R_CCS_RECEIVED_EARLY); ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); return -1; } return 0; } #ifndef OPENSSL_NO_NEXTPROTONEG int ssl3_send_next_proto(SSL *s) { unsigned int len, padding_len; unsigned char *d; if (s->state == SSL3_ST_CW_NEXT_PROTO_A) { len = s->next_proto_negotiated_len; padding_len = 32 - ((len + 2) % 32); d = (unsigned char *)s->init_buf->data; d[4] = len; memcpy(d + 5, s->next_proto_negotiated, len); d[5 + len] = padding_len; memset(d + 6 + len, 0, padding_len); *(d++) = SSL3_MT_NEXT_PROTO; l2n3(2 + len + padding_len, d); s->state = SSL3_ST_CW_NEXT_PROTO_B; s->init_num = 4 + 2 + len + padding_len; s->init_off = 0; } return ssl3_do_write(s, SSL3_RT_HANDSHAKE); } #endif int ssl_do_client_cert_cb(SSL *s, X509 **px509, EVP_PKEY **ppkey) { int i = 0; #ifndef OPENSSL_NO_ENGINE if (s->ctx->client_cert_engine) { i = ENGINE_load_ssl_client_cert(s->ctx->client_cert_engine, s, SSL_get_client_CA_list(s), px509, ppkey, NULL, NULL, NULL); if (i != 0) return i; } #endif if (s->ctx->client_cert_cb) i = s->ctx->client_cert_cb(s, px509, ppkey); return i; } int ssl_cipher_list_to_bytes(SSL *s, STACK_OF(SSL_CIPHER) *sk, unsigned char *p, int (*put_cb) (const SSL_CIPHER *, unsigned char *)) { int i, j = 0; SSL_CIPHER *c; unsigned char *q; int empty_reneg_info_scsv = !s->renegotiate; /* Set disabled masks for this session */ ssl_set_client_disabled(s); if (sk == NULL) return (0); q = p; if (put_cb == NULL) put_cb = s->method->put_cipher_by_char; for (i = 0; i < sk_SSL_CIPHER_num(sk); i++) { c = sk_SSL_CIPHER_value(sk, i); /* Skip disabled ciphers */ if (ssl_cipher_disabled(s, c, SSL_SECOP_CIPHER_SUPPORTED)) continue; #ifdef OPENSSL_SSL_DEBUG_BROKEN_PROTOCOL if (c->id == SSL3_CK_SCSV) { if (!empty_reneg_info_scsv) continue; else empty_reneg_info_scsv = 0; } #endif j = put_cb(c, p); p += j; } /* * If p == q, no ciphers; caller indicates an error. Otherwise, add * applicable SCSVs. */ if (p != q) { if (empty_reneg_info_scsv) { static SSL_CIPHER scsv = { 0, NULL, SSL3_CK_SCSV, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; j = put_cb(&scsv, p); p += j; #ifdef OPENSSL_RI_DEBUG fprintf(stderr, "TLS_EMPTY_RENEGOTIATION_INFO_SCSV sent by client\n"); #endif } if (s->mode & SSL_MODE_SEND_FALLBACK_SCSV) { static SSL_CIPHER scsv = { 0, NULL, SSL3_CK_FALLBACK_SCSV, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; j = put_cb(&scsv, p); p += j; } } return (p - q); }
./CrossVul/dataset_final_sorted/CWE-362/c/good_1496_1
crossvul-cpp_data_bad_1625_0
/* * Ldisc rw semaphore * * The ldisc semaphore is semantically a rw_semaphore but which enforces * an alternate policy, namely: * 1) Supports lock wait timeouts * 2) Write waiter has priority * 3) Downgrading is not supported * * Implementation notes: * 1) Upper half of semaphore count is a wait count (differs from rwsem * in that rwsem normalizes the upper half to the wait bias) * 2) Lacks overflow checking * * The generic counting was copied and modified from include/asm-generic/rwsem.h * by Paul Mackerras <paulus@samba.org>. * * The scheduling policy was copied and modified from lib/rwsem.c * Written by David Howells (dhowells@redhat.com). * * This implementation incorporates the write lock stealing work of * Michel Lespinasse <walken@google.com>. * * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com> * * This file may be redistributed under the terms of the GNU General Public * License v2. */ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/atomic.h> #include <linux/tty.h> #include <linux/sched.h> #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __acq(l, s, t, r, c, n, i) \ lock_acquire(&(l)->dep_map, s, t, r, c, n, i) # define __rel(l, n, i) \ lock_release(&(l)->dep_map, n, i) # ifdef CONFIG_PROVE_LOCKING # define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 2, NULL, i) # define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 2, n, i) # define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 2, NULL, i) # define lockdep_release(l, n, i) __rel(l, n, i) # else # define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i) # define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i) # define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i) # define lockdep_release(l, n, i) __rel(l, n, i) # endif #else # define lockdep_acquire(l, s, t, i) do { } while (0) # define lockdep_acquire_nest(l, s, t, n, i) do { } while (0) # define lockdep_acquire_read(l, s, t, i) do { } while (0) # define lockdep_release(l, n, i) do { } while (0) #endif #ifdef CONFIG_LOCK_STAT # define lock_stat(_lock, stat) lock_##stat(&(_lock)->dep_map, _RET_IP_) #else # define lock_stat(_lock, stat) do { } while (0) #endif #if BITS_PER_LONG == 64 # define LDSEM_ACTIVE_MASK 0xffffffffL #else # define LDSEM_ACTIVE_MASK 0x0000ffffL #endif #define LDSEM_UNLOCKED 0L #define LDSEM_ACTIVE_BIAS 1L #define LDSEM_WAIT_BIAS (-LDSEM_ACTIVE_MASK-1) #define LDSEM_READ_BIAS LDSEM_ACTIVE_BIAS #define LDSEM_WRITE_BIAS (LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS) struct ldsem_waiter { struct list_head list; struct task_struct *task; }; static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem) { return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); } static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem) { long tmp = *old; *old = atomic_long_cmpxchg(&sem->count, *old, new); return *old == tmp; } /* * Initialize an ldsem: */ void __init_ldsem(struct ld_semaphore *sem, const char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held semaphore: */ debug_check_no_locks_freed((void *)sem, sizeof(*sem)); lockdep_init_map(&sem->dep_map, name, key, 0); #endif sem->count = LDSEM_UNLOCKED; sem->wait_readers = 0; raw_spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->read_wait); INIT_LIST_HEAD(&sem->write_wait); } static void __ldsem_wake_readers(struct ld_semaphore *sem) { struct ldsem_waiter *waiter, *next; struct task_struct *tsk; long adjust, count; /* Try to grant read locks to all readers on the read wait list. * Note the 'active part' of the count is incremented by * the number of readers before waking any processes up. */ adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS); count = ldsem_atomic_update(adjust, sem); do { if (count > 0) break; if (ldsem_cmpxchg(&count, count - adjust, sem)) return; } while (1); list_for_each_entry_safe(waiter, next, &sem->read_wait, list) { tsk = waiter->task; smp_mb(); waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); } INIT_LIST_HEAD(&sem->read_wait); sem->wait_readers = 0; } static inline int writer_trylock(struct ld_semaphore *sem) { /* only wake this writer if the active part of the count can be * transitioned from 0 -> 1 */ long count = ldsem_atomic_update(LDSEM_ACTIVE_BIAS, sem); do { if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) return 1; if (ldsem_cmpxchg(&count, count - LDSEM_ACTIVE_BIAS, sem)) return 0; } while (1); } static void __ldsem_wake_writer(struct ld_semaphore *sem) { struct ldsem_waiter *waiter; waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list); wake_up_process(waiter->task); } /* * handle the lock release when processes blocked on it that can now run * - if we come here from up_xxxx(), then: * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) * - the spinlock must be held by the caller * - woken process blocks are discarded from the list after having task zeroed */ static void __ldsem_wake(struct ld_semaphore *sem) { if (!list_empty(&sem->write_wait)) __ldsem_wake_writer(sem); else if (!list_empty(&sem->read_wait)) __ldsem_wake_readers(sem); } static void ldsem_wake(struct ld_semaphore *sem) { unsigned long flags; raw_spin_lock_irqsave(&sem->wait_lock, flags); __ldsem_wake(sem); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); } /* * wait for the read lock to be granted */ static struct ld_semaphore __sched * down_read_failed(struct ld_semaphore *sem, long count, long timeout) { struct ldsem_waiter waiter; struct task_struct *tsk = current; long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS; /* set up my own style of waitqueue */ raw_spin_lock_irq(&sem->wait_lock); /* Try to reverse the lock attempt but if the count has changed * so that reversing fails, check if there are are no waiters, * and early-out if not */ do { if (ldsem_cmpxchg(&count, count + adjust, sem)) break; if (count > 0) { raw_spin_unlock_irq(&sem->wait_lock); return sem; } } while (1); list_add_tail(&waiter.list, &sem->read_wait); sem->wait_readers++; waiter.task = tsk; get_task_struct(tsk); /* if there are no active locks, wake the new lock owner(s) */ if ((count & LDSEM_ACTIVE_MASK) == 0) __ldsem_wake(sem); raw_spin_unlock_irq(&sem->wait_lock); /* wait to be given the lock */ for (;;) { set_task_state(tsk, TASK_UNINTERRUPTIBLE); if (!waiter.task) break; if (!timeout) break; timeout = schedule_timeout(timeout); } __set_task_state(tsk, TASK_RUNNING); if (!timeout) { /* lock timed out but check if this task was just * granted lock ownership - if so, pretend there * was no timeout; otherwise, cleanup lock wait */ raw_spin_lock_irq(&sem->wait_lock); if (waiter.task) { ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem); list_del(&waiter.list); raw_spin_unlock_irq(&sem->wait_lock); put_task_struct(waiter.task); return NULL; } raw_spin_unlock_irq(&sem->wait_lock); } return sem; } /* * wait for the write lock to be granted */ static struct ld_semaphore __sched * down_write_failed(struct ld_semaphore *sem, long count, long timeout) { struct ldsem_waiter waiter; struct task_struct *tsk = current; long adjust = -LDSEM_ACTIVE_BIAS; int locked = 0; /* set up my own style of waitqueue */ raw_spin_lock_irq(&sem->wait_lock); /* Try to reverse the lock attempt but if the count has changed * so that reversing fails, check if the lock is now owned, * and early-out if so */ do { if (ldsem_cmpxchg(&count, count + adjust, sem)) break; if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) { raw_spin_unlock_irq(&sem->wait_lock); return sem; } } while (1); list_add_tail(&waiter.list, &sem->write_wait); waiter.task = tsk; set_task_state(tsk, TASK_UNINTERRUPTIBLE); for (;;) { if (!timeout) break; raw_spin_unlock_irq(&sem->wait_lock); timeout = schedule_timeout(timeout); raw_spin_lock_irq(&sem->wait_lock); set_task_state(tsk, TASK_UNINTERRUPTIBLE); if ((locked = writer_trylock(sem))) break; } if (!locked) ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem); list_del(&waiter.list); raw_spin_unlock_irq(&sem->wait_lock); __set_task_state(tsk, TASK_RUNNING); /* lock wait may have timed out */ if (!locked) return NULL; return sem; } static inline int __ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout) { long count; lockdep_acquire_read(sem, subclass, 0, _RET_IP_); count = ldsem_atomic_update(LDSEM_READ_BIAS, sem); if (count <= 0) { lock_stat(sem, contended); if (!down_read_failed(sem, count, timeout)) { lockdep_release(sem, 1, _RET_IP_); return 0; } } lock_stat(sem, acquired); return 1; } static inline int __ldsem_down_write_nested(struct ld_semaphore *sem, int subclass, long timeout) { long count; lockdep_acquire(sem, subclass, 0, _RET_IP_); count = ldsem_atomic_update(LDSEM_WRITE_BIAS, sem); if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) { lock_stat(sem, contended); if (!down_write_failed(sem, count, timeout)) { lockdep_release(sem, 1, _RET_IP_); return 0; } } lock_stat(sem, acquired); return 1; } /* * lock for reading -- returns 1 if successful, 0 if timed out */ int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout) { might_sleep(); return __ldsem_down_read_nested(sem, 0, timeout); } /* * trylock for reading -- returns 1 if successful, 0 if contention */ int ldsem_down_read_trylock(struct ld_semaphore *sem) { long count = sem->count; while (count >= 0) { if (ldsem_cmpxchg(&count, count + LDSEM_READ_BIAS, sem)) { lockdep_acquire_read(sem, 0, 1, _RET_IP_); lock_stat(sem, acquired); return 1; } } return 0; } /* * lock for writing -- returns 1 if successful, 0 if timed out */ int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout) { might_sleep(); return __ldsem_down_write_nested(sem, 0, timeout); } /* * trylock for writing -- returns 1 if successful, 0 if contention */ int ldsem_down_write_trylock(struct ld_semaphore *sem) { long count = sem->count; while ((count & LDSEM_ACTIVE_MASK) == 0) { if (ldsem_cmpxchg(&count, count + LDSEM_WRITE_BIAS, sem)) { lockdep_acquire(sem, 0, 1, _RET_IP_); lock_stat(sem, acquired); return 1; } } return 0; } /* * release a read lock */ void ldsem_up_read(struct ld_semaphore *sem) { long count; lockdep_release(sem, 1, _RET_IP_); count = ldsem_atomic_update(-LDSEM_READ_BIAS, sem); if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0) ldsem_wake(sem); } /* * release a write lock */ void ldsem_up_write(struct ld_semaphore *sem) { long count; lockdep_release(sem, 1, _RET_IP_); count = ldsem_atomic_update(-LDSEM_WRITE_BIAS, sem); if (count < 0) ldsem_wake(sem); } #ifdef CONFIG_DEBUG_LOCK_ALLOC int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout) { might_sleep(); return __ldsem_down_read_nested(sem, subclass, timeout); } int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass, long timeout) { might_sleep(); return __ldsem_down_write_nested(sem, subclass, timeout); } #endif
./CrossVul/dataset_final_sorted/CWE-362/c/bad_1625_0
crossvul-cpp_data_bad_5222_5
/* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ /* Describe, check and repair of MyISAM tables */ #include "fulltext.h" #include <m_ctype.h> #include <stdarg.h> #include <my_getopt.h> #include <my_bit.h> #ifdef HAVE_SYS_VADVICE_H #include <sys/vadvise.h> #endif #ifdef HAVE_SYS_MMAN_H #include <sys/mman.h> #endif static uint decode_bits; static char **default_argv; static const char *load_default_groups[]= { "myisamchk", 0 }; static const char *set_collation_name, *opt_tmpdir; static CHARSET_INFO *set_collation; static long opt_myisam_block_size; static long opt_key_cache_block_size; static const char *my_progname_short; static int stopwords_inited= 0; static MY_TMPDIR myisamchk_tmpdir; static const char *type_names[]= { "impossible","char","binary", "short", "long", "float", "double","number","unsigned short", "unsigned long","longlong","ulonglong","int24", "uint24","int8","varchar", "varbin","?", "?"}; static const char *prefix_packed_txt="packed ", *bin_packed_txt="prefix ", *diff_txt="stripped ", *null_txt="NULL", *blob_txt="BLOB "; static const char *field_pack[]= {"","no endspace", "no prespace", "no zeros", "blob", "constant", "table-lockup", "always zero","varchar","unique-hash","?","?"}; static const char *myisam_stats_method_str="nulls_unequal"; static void get_options(int *argc,char * * *argv); static void print_version(void); static void usage(void); static int myisamchk(MI_CHECK *param, char *filename); static void descript(MI_CHECK *param, register MI_INFO *info, char * name); static int mi_sort_records(MI_CHECK *param, register MI_INFO *info, char * name, uint sort_key, my_bool write_info, my_bool update_index); static int sort_record_index(MI_SORT_PARAM *sort_param, MI_INFO *info, MI_KEYDEF *keyinfo, my_off_t page,uchar *buff,uint sortkey, File new_file, my_bool update_index); MI_CHECK check_param; /* Main program */ int main(int argc, char **argv) { int error; MY_INIT(argv[0]); my_progname_short= my_progname+dirname_length(my_progname); myisamchk_init(&check_param); check_param.opt_lock_memory=1; /* Lock memory if possible */ check_param.using_global_keycache = 0; get_options(&argc,(char***) &argv); myisam_quick_table_bits=decode_bits; error=0; while (--argc >= 0) { int new_error=myisamchk(&check_param, *(argv++)); if ((check_param.testflag & T_REP_ANY) != T_REP) check_param.testflag&= ~T_REP; (void) fflush(stdout); (void) fflush(stderr); if ((check_param.error_printed | check_param.warning_printed) && (check_param.testflag & T_FORCE_CREATE) && (!(check_param.testflag & (T_REP | T_REP_BY_SORT | T_SORT_RECORDS | T_SORT_INDEX)))) { uint old_testflag=check_param.testflag; if (!(check_param.testflag & T_REP)) check_param.testflag|= T_REP_BY_SORT; check_param.testflag&= ~T_EXTEND; /* Don't needed */ error|=myisamchk(&check_param, argv[-1]); check_param.testflag= old_testflag; (void) fflush(stdout); (void) fflush(stderr); } else error|=new_error; if (argc && (!(check_param.testflag & T_SILENT) || check_param.testflag & T_INFO)) { puts("\n---------\n"); (void) fflush(stdout); } } if (check_param.total_files > 1) { /* Only if descript */ char buff[22],buff2[22]; if (!(check_param.testflag & T_SILENT) || check_param.testflag & T_INFO) puts("\n---------\n"); printf("\nTotal of all %d MyISAM-files:\nData records: %9s Deleted blocks: %9s\n",check_param.total_files,llstr(check_param.total_records,buff), llstr(check_param.total_deleted,buff2)); } free_defaults(default_argv); free_tmpdir(&myisamchk_tmpdir); ft_free_stopwords(); my_end(check_param.testflag & T_INFO ? MY_CHECK_ERROR | MY_GIVE_INFO : MY_CHECK_ERROR); exit(error); #ifndef _lint return 0; /* No compiler warning */ #endif } /* main */ enum options_mc { OPT_CHARSETS_DIR=256, OPT_SET_COLLATION,OPT_START_CHECK_POS, OPT_CORRECT_CHECKSUM, OPT_KEY_BUFFER_SIZE, OPT_KEY_CACHE_BLOCK_SIZE, OPT_MYISAM_BLOCK_SIZE, OPT_READ_BUFFER_SIZE, OPT_WRITE_BUFFER_SIZE, OPT_SORT_BUFFER_SIZE, OPT_SORT_KEY_BLOCKS, OPT_DECODE_BITS, OPT_FT_MIN_WORD_LEN, OPT_FT_MAX_WORD_LEN, OPT_FT_STOPWORD_FILE, OPT_MAX_RECORD_LENGTH, OPT_STATS_METHOD }; static struct my_option my_long_options[] = { {"analyze", 'a', "Analyze distribution of keys. Will make some joins in MySQL faster. You can check the calculated distribution.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"block-search", 'b', "No help available.", 0, 0, 0, GET_ULONG, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"backup", 'B', "Make a backup of the .MYD file as 'filename-time.BAK'.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"character-sets-dir", OPT_CHARSETS_DIR, "Directory where character sets are.", &charsets_dir, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"check", 'c', "Check table for errors.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"check-only-changed", 'C', "Check only tables that have changed since last check. It also applies to other requested actions (e.g. --analyze will be ignored if the table is already analyzed).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"correct-checksum", OPT_CORRECT_CHECKSUM, "Correct checksum information for table.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifndef DBUG_OFF {"debug", '#', "Output debug log. Often this is 'd:t:o,filename'.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif {"description", 'd', "Prints some information about table.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"data-file-length", 'D', "Max length of data file (when recreating data-file when it's full).", &check_param.max_data_file_length, &check_param.max_data_file_length, 0, GET_LL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"extend-check", 'e', "If used when checking a table, ensure that the table is 100 percent consistent, which will take a long time. If used when repairing a table, try to recover every possible row from the data file. Normally this will also find a lot of garbage rows; Don't use this option with repair if you are not totally desperate.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"fast", 'F', "Check only tables that haven't been closed properly. It also applies to other requested actions (e.g. --analyze will be ignored if the table is already analyzed).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"force", 'f', "Restart with -r if there are any errors in the table. States will be updated as with --update-state.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"HELP", 'H', "Display this help and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"help", '?', "Display this help and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"information", 'i', "Print statistics information about table that is checked.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"keys-used", 'k', "Tell MyISAM to update only some specific keys. # is a bit mask of which keys to use. This can be used to get faster inserts.", &check_param.keys_in_use, &check_param.keys_in_use, 0, GET_ULL, REQUIRED_ARG, -1, 0, 0, 0, 0, 0}, {"max-record-length", OPT_MAX_RECORD_LENGTH, "Skip rows bigger than this if myisamchk can't allocate memory to hold it", &check_param.max_record_length, &check_param.max_record_length, 0, GET_ULL, REQUIRED_ARG, LONGLONG_MAX, 0, LONGLONG_MAX, 0, 0, 0}, {"medium-check", 'm', "Faster than extend-check, but only finds 99.99% of all errors. Should be good enough for most cases.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"quick", 'q', "Faster repair by not modifying the data file.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"read-only", 'T', "Don't mark table as checked.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"recover", 'r', "Can fix almost anything except unique keys that aren't unique.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"parallel-recover", 'p', "Same as '-r' but creates all the keys in parallel.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"safe-recover", 'o', "Uses old recovery method; Slower than '-r' but can handle a couple of cases where '-r' reports that it can't fix the data file.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"sort-recover", 'n', "Force recovering with sorting even if the temporary file was very big.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifdef DEBUG {"start-check-pos", OPT_START_CHECK_POS, "No help available.", 0, 0, 0, GET_ULL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif {"set-auto-increment", 'A', "Force auto_increment to start at this or higher value. If no value is given, then sets the next auto_increment value to the highest used value for the auto key + 1.", &check_param.auto_increment_value, &check_param.auto_increment_value, 0, GET_ULL, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"set-collation", OPT_SET_COLLATION, "Change the collation used by the index", &set_collation_name, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"silent", 's', "Only print errors. One can use two -s to make myisamchk very silent.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"sort-index", 'S', "Sort index blocks. This speeds up 'read-next' in applications.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"sort-records", 'R', "Sort records according to an index. This makes your data much more localized and may speed up things. (It may be VERY slow to do a sort the first time!)", &check_param.opt_sort_key, &check_param.opt_sort_key, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"tmpdir", 't', "Path for temporary files.", &opt_tmpdir, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"update-state", 'U', "Mark tables as crashed if any errors were found.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"unpack", 'u', "Unpack file packed with myisampack.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"verbose", 'v', "Print more information. This can be used with --description and --check. Use many -v for more verbosity!", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version", 'V', "Print version and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"wait", 'w', "Wait if table is locked.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, { "key_buffer_size", OPT_KEY_BUFFER_SIZE, "", &check_param.use_buffers, &check_param.use_buffers, 0, GET_ULL, REQUIRED_ARG, USE_BUFFER_INIT, MALLOC_OVERHEAD, SIZE_T_MAX, MALLOC_OVERHEAD, IO_SIZE, 0}, { "key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE, "", &opt_key_cache_block_size, &opt_key_cache_block_size, 0, GET_LONG, REQUIRED_ARG, MI_KEY_BLOCK_LENGTH, MI_MIN_KEY_BLOCK_LENGTH, MI_MAX_KEY_BLOCK_LENGTH, 0, MI_MIN_KEY_BLOCK_LENGTH, 0}, { "myisam_block_size", OPT_MYISAM_BLOCK_SIZE, "", &opt_myisam_block_size, &opt_myisam_block_size, 0, GET_LONG, REQUIRED_ARG, MI_KEY_BLOCK_LENGTH, MI_MIN_KEY_BLOCK_LENGTH, MI_MAX_KEY_BLOCK_LENGTH, 0, MI_MIN_KEY_BLOCK_LENGTH, 0}, { "read_buffer_size", OPT_READ_BUFFER_SIZE, "", &check_param.read_buffer_length, &check_param.read_buffer_length, 0, GET_ULONG, REQUIRED_ARG, (long) READ_BUFFER_INIT, (long) MALLOC_OVERHEAD, INT_MAX32, (long) MALLOC_OVERHEAD, (long) 1L, 0}, { "write_buffer_size", OPT_WRITE_BUFFER_SIZE, "", &check_param.write_buffer_length, &check_param.write_buffer_length, 0, GET_ULONG, REQUIRED_ARG, (long) READ_BUFFER_INIT, (long) MALLOC_OVERHEAD, INT_MAX32, (long) MALLOC_OVERHEAD, (long) 1L, 0}, { "sort_buffer_size", OPT_SORT_BUFFER_SIZE, "Deprecated. myisam_sort_buffer_size alias is being used", &check_param.sort_buffer_length, &check_param.sort_buffer_length, 0, GET_ULL, REQUIRED_ARG, (long) SORT_BUFFER_INIT, (long) (MIN_SORT_BUFFER + MALLOC_OVERHEAD), SIZE_T_MAX, (long) MALLOC_OVERHEAD, (long) 1L, 0}, { "myisam_sort_buffer_size", OPT_SORT_BUFFER_SIZE, "Alias of sort_buffer_size parameter", &check_param.sort_buffer_length, &check_param.sort_buffer_length, 0, GET_ULL, REQUIRED_ARG, (long) SORT_BUFFER_INIT, (long) (MIN_SORT_BUFFER + MALLOC_OVERHEAD), SIZE_T_MAX, (long) MALLOC_OVERHEAD, (long) 1L, 0}, { "sort_key_blocks", OPT_SORT_KEY_BLOCKS, "", &check_param.sort_key_blocks, &check_param.sort_key_blocks, 0, GET_ULONG, REQUIRED_ARG, BUFFERS_WHEN_SORTING, 4L, 100L, 0L, 1L, 0}, { "decode_bits", OPT_DECODE_BITS, "", &decode_bits, &decode_bits, 0, GET_UINT, REQUIRED_ARG, 9L, 4L, 17L, 0L, 1L, 0}, { "ft_min_word_len", OPT_FT_MIN_WORD_LEN, "", &ft_min_word_len, &ft_min_word_len, 0, GET_ULONG, REQUIRED_ARG, 4, 1, HA_FT_MAXCHARLEN, 0, 1, 0}, { "ft_max_word_len", OPT_FT_MAX_WORD_LEN, "", &ft_max_word_len, &ft_max_word_len, 0, GET_ULONG, REQUIRED_ARG, HA_FT_MAXCHARLEN, 10, HA_FT_MAXCHARLEN, 0, 1, 0}, { "ft_stopword_file", OPT_FT_STOPWORD_FILE, "Use stopwords from this file instead of built-in list.", &ft_stopword_file, &ft_stopword_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"stats_method", OPT_STATS_METHOD, "Specifies how index statistics collection code should treat NULLs. " "Possible values of name are \"nulls_unequal\" (default behavior for 4.1/5.0), " "\"nulls_equal\" (emulate 4.0 behavior), and \"nulls_ignored\".", &myisam_stats_method_str, &myisam_stats_method_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static void print_version(void) { printf("%s Ver 2.7 for %s at %s\n", my_progname, SYSTEM_TYPE, MACHINE_TYPE); } static void usage(void) { print_version(); puts("By Monty, for your professional use"); puts("This software comes with NO WARRANTY: see the PUBLIC for details.\n"); puts("Description, check and repair of MyISAM tables."); puts("Used without options all tables on the command will be checked for errors"); printf("Usage: %s [OPTIONS] tables[.MYI]\n", my_progname_short); printf("\nGlobal options:\n"); #ifndef DBUG_OFF printf("\ -#, --debug=... Output debug log. Often this is 'd:t:o,filename'.\n"); #endif printf("\ -H, --HELP Display this help and exit.\n\ -?, --help Display this help and exit.\n\ -t, --tmpdir=path Path for temporary files. Multiple paths can be\n\ specified, separated by "); #if defined( __WIN__) printf("semicolon (;)"); #else printf("colon (:)"); #endif printf(", they will be used\n\ in a round-robin fashion.\n\ -s, --silent Only print errors. One can use two -s to make\n\ myisamchk very silent.\n\ -v, --verbose Print more information. This can be used with\n\ --description and --check. Use many -v for more verbosity.\n\ -V, --version Print version and exit.\n\ -w, --wait Wait if table is locked.\n\n"); #ifdef DEBUG puts(" --start-check-pos=# Start reading file at given offset.\n"); #endif puts("Check options (check is the default action for myisamchk):\n\ -c, --check Check table for errors.\n\ -e, --extend-check Check the table VERY throughly. Only use this in\n\ extreme cases as myisamchk should normally be able to\n\ find out if the table is ok even without this switch.\n\ -F, --fast Check only tables that haven't been closed properly.\n\ -C, --check-only-changed\n\ Check only tables that have changed since last check.\n\ -f, --force Restart with '-r' if there are any errors in the table.\n\ States will be updated as with '--update-state'.\n\ -i, --information Print statistics information about table that is checked.\n\ -m, --medium-check Faster than extend-check, but only finds 99.99% of\n\ all errors. Should be good enough for most cases.\n\ -U --update-state Mark tables as crashed if you find any errors.\n\ -T, --read-only Don't mark table as checked.\n"); puts("Repair options (When using '-r' or '-o'):\n\ -B, --backup Make a backup of the .MYD file as 'filename-time.BAK'.\n\ --correct-checksum Correct checksum information for table.\n\ -D, --data-file-length=# Max length of data file (when recreating data\n\ file when it's full).\n\ -e, --extend-check Try to recover every possible row from the data file\n\ Normally this will also find a lot of garbage rows;\n\ Don't use this option if you are not totally desperate.\n\ -f, --force Overwrite old temporary files.\n\ -k, --keys-used=# Tell MyISAM to update only some specific keys. # is a\n\ bit mask of which keys to use. This can be used to\n\ get faster inserts.\n\ --max-record-length=#\n\ Skip rows bigger than this if myisamchk can't allocate\n\ memory to hold it.\n\ -r, --recover Can fix almost anything except unique keys that aren't\n\ unique.\n\ -n, --sort-recover Forces recovering with sorting even if the temporary\n\ file would be very big.\n\ -p, --parallel-recover\n\ Uses the same technique as '-r' and '-n', but creates\n\ all the keys in parallel, in different threads.\n\ -o, --safe-recover Uses old recovery method; Slower than '-r' but can\n\ handle a couple of cases where '-r' reports that it\n\ can't fix the data file.\n\ --character-sets-dir=...\n\ Directory where character sets are.\n\ --set-collation=name\n\ Change the collation used by the index.\n\ -q, --quick Faster repair by not modifying the data file.\n\ One can give a second '-q' to force myisamchk to\n\ modify the original datafile in case of duplicate keys.\n\ NOTE: Tables where the data file is currupted can't be\n\ fixed with this option.\n\ -u, --unpack Unpack file packed with myisampack.\n\ "); puts("Other actions:\n\ -a, --analyze Analyze distribution of keys. Will make some joins in\n\ MySQL faster. You can check the calculated distribution\n\ by using '--description --verbose table_name'.\n\ --stats_method=name Specifies how index statistics collection code should\n\ treat NULLs. Possible values of name are \"nulls_unequal\"\n\ (default for 4.1/5.0), \"nulls_equal\" (emulate 4.0), and \n\ \"nulls_ignored\".\n\ -d, --description Prints some information about table.\n\ -A, --set-auto-increment[=value]\n\ Force auto_increment to start at this or higher value\n\ If no value is given, then sets the next auto_increment\n\ value to the highest used value for the auto key + 1.\n\ -S, --sort-index Sort index blocks. This speeds up 'read-next' in\n\ applications.\n\ -R, --sort-records=#\n\ Sort records according to an index. This makes your\n\ data much more localized and may speed up things\n\ (It may be VERY slow to do a sort the first time!).\n\ -b, --block-search=#\n\ Find a record, a block at given offset belongs to."); print_defaults("my", load_default_groups); my_print_variables(my_long_options); } const char *myisam_stats_method_names[] = {"nulls_unequal", "nulls_equal", "nulls_ignored", NullS}; TYPELIB myisam_stats_method_typelib= { array_elements(myisam_stats_method_names) - 1, "", myisam_stats_method_names, NULL}; /* Read options */ static my_bool get_one_option(int optid, const struct my_option *opt __attribute__((unused)), char *argument) { switch (optid) { case 'a': if (argument == disabled_my_option) check_param.testflag&= ~T_STATISTICS; else check_param.testflag|= T_STATISTICS; break; case 'A': if (argument) check_param.auto_increment_value= strtoull(argument, NULL, 0); else check_param.auto_increment_value= 0; /* Set to max used value */ check_param.testflag|= T_AUTO_INC; break; case 'b': check_param.search_after_block= strtoul(argument, NULL, 10); break; case 'B': if (argument == disabled_my_option) check_param.testflag&= ~T_BACKUP_DATA; else check_param.testflag|= T_BACKUP_DATA; break; case 'c': if (argument == disabled_my_option) check_param.testflag&= ~T_CHECK; else check_param.testflag|= T_CHECK; break; case 'C': if (argument == disabled_my_option) check_param.testflag&= ~(T_CHECK | T_CHECK_ONLY_CHANGED); else check_param.testflag|= T_CHECK | T_CHECK_ONLY_CHANGED; break; case 'D': check_param.max_data_file_length=strtoll(argument, NULL, 10); break; case 's': /* silent */ if (argument == disabled_my_option) check_param.testflag&= ~(T_SILENT | T_VERY_SILENT); else { if (check_param.testflag & T_SILENT) check_param.testflag|= T_VERY_SILENT; check_param.testflag|= T_SILENT; check_param.testflag&= ~T_WRITE_LOOP; } break; case 'w': if (argument == disabled_my_option) check_param.testflag&= ~T_WAIT_FOREVER; else check_param.testflag|= T_WAIT_FOREVER; break; case 'd': /* description if isam-file */ if (argument == disabled_my_option) check_param.testflag&= ~T_DESCRIPT; else check_param.testflag|= T_DESCRIPT; break; case 'e': /* extend check */ if (argument == disabled_my_option) check_param.testflag&= ~T_EXTEND; else check_param.testflag|= T_EXTEND; break; case 'i': if (argument == disabled_my_option) check_param.testflag&= ~T_INFO; else check_param.testflag|= T_INFO; break; case 'f': if (argument == disabled_my_option) { check_param.tmpfile_createflag= O_RDWR | O_TRUNC | O_EXCL; check_param.testflag&= ~(T_FORCE_CREATE | T_UPDATE_STATE); } else { check_param.tmpfile_createflag= O_RDWR | O_TRUNC; check_param.testflag|= T_FORCE_CREATE | T_UPDATE_STATE; } break; case 'F': if (argument == disabled_my_option) check_param.testflag&= ~T_FAST; else check_param.testflag|= T_FAST; break; case 'k': check_param.keys_in_use= (ulonglong) strtoll(argument, NULL, 10); break; case 'm': if (argument == disabled_my_option) check_param.testflag&= ~T_MEDIUM; else check_param.testflag|= T_MEDIUM; /* Medium check */ break; case 'r': /* Repair table */ check_param.testflag&= ~T_REP_ANY; if (argument != disabled_my_option) check_param.testflag|= T_REP_BY_SORT; break; case 'p': check_param.testflag&= ~T_REP_ANY; if (argument != disabled_my_option) check_param.testflag|= T_REP_PARALLEL; break; case 'o': check_param.testflag&= ~T_REP_ANY; check_param.force_sort= 0; if (argument != disabled_my_option) { check_param.testflag|= T_REP; my_disable_async_io= 1; /* More safety */ } break; case 'n': check_param.testflag&= ~T_REP_ANY; if (argument == disabled_my_option) check_param.force_sort= 0; else { check_param.testflag|= T_REP_BY_SORT; check_param.force_sort= 1; } break; case 'q': if (argument == disabled_my_option) check_param.testflag&= ~(T_QUICK | T_FORCE_UNIQUENESS); else check_param.testflag|= (check_param.testflag & T_QUICK) ? T_FORCE_UNIQUENESS : T_QUICK; break; case 'u': if (argument == disabled_my_option) check_param.testflag&= ~(T_UNPACK | T_REP_BY_SORT); else check_param.testflag|= T_UNPACK | T_REP_BY_SORT; break; case 'v': /* Verbose */ if (argument == disabled_my_option) { check_param.testflag&= ~T_VERBOSE; check_param.verbose=0; } else { check_param.testflag|= T_VERBOSE; check_param.verbose++; } break; case 'R': /* Sort records */ if (argument == disabled_my_option) check_param.testflag&= ~T_SORT_RECORDS; else { check_param.testflag|= T_SORT_RECORDS; check_param.opt_sort_key= (uint) atoi(argument) - 1; if (check_param.opt_sort_key >= MI_MAX_KEY) { fprintf(stderr, "The value of the sort key is bigger than max key: %d.\n", MI_MAX_KEY); exit(1); } } break; case 'S': /* Sort index */ if (argument == disabled_my_option) check_param.testflag&= ~T_SORT_INDEX; else check_param.testflag|= T_SORT_INDEX; break; case 'T': if (argument == disabled_my_option) check_param.testflag&= ~T_READONLY; else check_param.testflag|= T_READONLY; break; case 'U': if (argument == disabled_my_option) check_param.testflag&= ~T_UPDATE_STATE; else check_param.testflag|= T_UPDATE_STATE; break; case '#': if (argument == disabled_my_option) { DBUG_POP(); } else { DBUG_PUSH(argument ? argument : "d:t:o,/tmp/myisamchk.trace"); } break; case 'V': print_version(); exit(0); case OPT_CORRECT_CHECKSUM: if (argument == disabled_my_option) check_param.testflag&= ~T_CALC_CHECKSUM; else check_param.testflag|= T_CALC_CHECKSUM; break; case OPT_STATS_METHOD: { int method; enum_mi_stats_method UNINIT_VAR(method_conv); myisam_stats_method_str= argument; if ((method= find_type(argument, &myisam_stats_method_typelib, FIND_TYPE_BASIC)) <= 0) { fprintf(stderr, "Invalid value of stats_method: %s.\n", argument); exit(1); } switch (method-1) { case 0: method_conv= MI_STATS_METHOD_NULLS_EQUAL; break; case 1: method_conv= MI_STATS_METHOD_NULLS_NOT_EQUAL; break; case 2: method_conv= MI_STATS_METHOD_IGNORE_NULLS; break; default: assert(0); /* Impossible */ } check_param.stats_method= method_conv; break; } #ifdef DEBUG /* Only useful if debugging */ case OPT_START_CHECK_POS: check_param.start_check_pos= strtoull(argument, NULL, 0); break; #endif case 'H': my_print_help(my_long_options); exit(0); case '?': usage(); exit(0); } return 0; } static void get_options(register int *argc,register char ***argv) { int ho_error; if (load_defaults("my", load_default_groups, argc, argv)) exit(1); default_argv= *argv; if (isatty(fileno(stdout))) check_param.testflag|=T_WRITE_LOOP; if ((ho_error=handle_options(argc, argv, my_long_options, get_one_option))) exit(ho_error); /* If using repair, then update checksum if one uses --update-state */ if ((check_param.testflag & T_UPDATE_STATE) && (check_param.testflag & T_REP_ANY)) check_param.testflag|= T_CALC_CHECKSUM; if (*argc == 0) { usage(); exit(-1); } if ((check_param.testflag & T_UNPACK) && (check_param.testflag & (T_QUICK | T_SORT_RECORDS))) { (void) fprintf(stderr, "%s: --unpack can't be used with --quick or --sort-records\n", my_progname_short); exit(1); } if ((check_param.testflag & T_READONLY) && (check_param.testflag & (T_REP_ANY | T_STATISTICS | T_AUTO_INC | T_SORT_RECORDS | T_SORT_INDEX | T_FORCE_CREATE))) { (void) fprintf(stderr, "%s: Can't use --readonly when repairing or sorting\n", my_progname_short); exit(1); } if (init_tmpdir(&myisamchk_tmpdir, opt_tmpdir)) exit(1); check_param.tmpdir=&myisamchk_tmpdir; check_param.key_cache_block_size= opt_key_cache_block_size; if (set_collation_name) if (!(set_collation= get_charset_by_name(set_collation_name, MYF(MY_WME)))) exit(1); myisam_block_size=(uint) 1 << my_bit_log2(opt_myisam_block_size); return; } /* get options */ /* Check table */ static int myisamchk(MI_CHECK *param, char * filename) { int error,lock_type,recreate; int rep_quick= param->testflag & (T_QUICK | T_FORCE_UNIQUENESS); MI_INFO *info; File datafile; char llbuff[22],llbuff2[22]; my_bool state_updated=0; MYISAM_SHARE *share; DBUG_ENTER("myisamchk"); param->out_flag=error=param->warning_printed=param->error_printed= recreate=0; datafile=0; param->isam_file_name=filename; /* For error messages */ if (!(info=mi_open(filename, (param->testflag & (T_DESCRIPT | T_READONLY)) ? O_RDONLY : O_RDWR, HA_OPEN_FOR_REPAIR | ((param->testflag & T_WAIT_FOREVER) ? HA_OPEN_WAIT_IF_LOCKED : (param->testflag & T_DESCRIPT) ? HA_OPEN_IGNORE_IF_LOCKED : HA_OPEN_ABORT_IF_LOCKED)))) { /* Avoid twice printing of isam file name */ param->error_printed=1; switch (my_errno) { case HA_ERR_CRASHED: mi_check_print_error(param,"'%s' doesn't have a correct index definition. You need to recreate it before you can do a repair",filename); break; case HA_ERR_NOT_A_TABLE: mi_check_print_error(param,"'%s' is not a MyISAM-table",filename); break; case HA_ERR_CRASHED_ON_USAGE: mi_check_print_error(param,"'%s' is marked as crashed",filename); break; case HA_ERR_CRASHED_ON_REPAIR: mi_check_print_error(param,"'%s' is marked as crashed after last repair",filename); break; case HA_ERR_OLD_FILE: mi_check_print_error(param,"'%s' is an old type of MyISAM-table", filename); break; case HA_ERR_END_OF_FILE: mi_check_print_error(param,"Couldn't read complete header from '%s'", filename); break; case EAGAIN: mi_check_print_error(param,"'%s' is locked. Use -w to wait until unlocked",filename); break; case ENOENT: mi_check_print_error(param,"File '%s' doesn't exist",filename); break; case EACCES: mi_check_print_error(param,"You don't have permission to use '%s'",filename); break; default: mi_check_print_error(param,"%d when opening MyISAM-table '%s'", my_errno,filename); break; } DBUG_RETURN(1); } share=info->s; share->options&= ~HA_OPTION_READ_ONLY_DATA; /* We are modifing it */ share->tot_locks-= share->r_locks; share->r_locks=0; /* Skip the checking of the file if: We are using --fast and the table is closed properly We are using --check-only-changed-tables and the table hasn't changed */ if (param->testflag & (T_FAST | T_CHECK_ONLY_CHANGED)) { my_bool need_to_check= mi_is_crashed(info) || share->state.open_count != 0; if ((param->testflag & (T_REP_ANY | T_SORT_RECORDS)) && ((share->state.changed & (STATE_CHANGED | STATE_CRASHED | STATE_CRASHED_ON_REPAIR) || !(param->testflag & T_CHECK_ONLY_CHANGED)))) need_to_check=1; if (info->s->base.keys && info->state->records) { if ((param->testflag & T_STATISTICS) && (share->state.changed & STATE_NOT_ANALYZED)) need_to_check=1; if ((param->testflag & T_SORT_INDEX) && (share->state.changed & STATE_NOT_SORTED_PAGES)) need_to_check=1; if ((param->testflag & T_REP_BY_SORT) && (share->state.changed & STATE_NOT_OPTIMIZED_KEYS)) need_to_check=1; } if ((param->testflag & T_CHECK_ONLY_CHANGED) && (share->state.changed & (STATE_CHANGED | STATE_CRASHED | STATE_CRASHED_ON_REPAIR))) need_to_check=1; if (!need_to_check) { if (!(param->testflag & T_SILENT) || param->testflag & T_INFO) printf("MyISAM file: %s is already checked\n",filename); if (mi_close(info)) { mi_check_print_error(param,"%d when closing MyISAM-table '%s'", my_errno,filename); DBUG_RETURN(1); } DBUG_RETURN(0); } } if ((param->testflag & (T_REP_ANY | T_STATISTICS | T_SORT_RECORDS | T_SORT_INDEX)) && (((param->testflag & T_UNPACK) && share->data_file_type == COMPRESSED_RECORD) || mi_uint2korr(share->state.header.state_info_length) != MI_STATE_INFO_SIZE || mi_uint2korr(share->state.header.base_info_length) != MI_BASE_INFO_SIZE || mi_is_any_intersect_keys_active(param->keys_in_use, share->base.keys, ~share->state.key_map) || test_if_almost_full(info) || info->s->state.header.file_version[3] != myisam_file_magic[3] || (set_collation && set_collation->number != share->state.header.language) || myisam_block_size != MI_KEY_BLOCK_LENGTH)) { if (set_collation) param->language= set_collation->number; if (recreate_table(param, &info,filename)) { (void) fprintf(stderr, "MyISAM-table '%s' is not fixed because of errors\n", filename); return(-1); } recreate=1; if (!(param->testflag & T_REP_ANY)) { param->testflag|=T_REP_BY_SORT; /* if only STATISTICS */ if (!(param->testflag & T_SILENT)) printf("- '%s' has old table-format. Recreating index\n",filename); rep_quick|=T_QUICK; } share=info->s; share->tot_locks-= share->r_locks; share->r_locks=0; } if (param->testflag & T_DESCRIPT) { param->total_files++; param->total_records+=info->state->records; param->total_deleted+=info->state->del; descript(param, info, filename); } else { if (!stopwords_inited++) ft_init_stopwords(); if (!(param->testflag & T_READONLY)) lock_type = F_WRLCK; /* table is changed */ else lock_type= F_RDLCK; if (info->lock_type == F_RDLCK) info->lock_type=F_UNLCK; /* Read only table */ if (_mi_readinfo(info,lock_type,0)) { mi_check_print_error(param,"Can't lock indexfile of '%s', error: %d", filename,my_errno); param->error_printed=0; goto end2; } /* _mi_readinfo() has locked the table. We mark the table as locked (without doing file locks) to be able to use functions that only works on locked tables (like row caching). */ mi_lock_database(info, F_EXTRA_LCK); datafile=info->dfile; if (param->testflag & (T_REP_ANY | T_SORT_RECORDS | T_SORT_INDEX)) { if (param->testflag & T_REP_ANY) { ulonglong tmp=share->state.key_map; mi_copy_keys_active(share->state.key_map, share->base.keys, param->keys_in_use); if (tmp != share->state.key_map) info->update|=HA_STATE_CHANGED; } if (rep_quick && chk_del(param, info, param->testflag & ~T_VERBOSE)) { if (param->testflag & T_FORCE_CREATE) { rep_quick=0; mi_check_print_info(param,"Creating new data file\n"); } else { error=1; mi_check_print_error(param, "Quick-recover aborted; Run recovery without switch 'q'"); } } if (!error) { if ((param->testflag & (T_REP_BY_SORT | T_REP_PARALLEL)) && (mi_is_any_key_active(share->state.key_map) || (rep_quick && !param->keys_in_use && !recreate)) && mi_test_if_sort_rep(info, info->state->records, info->s->state.key_map, param->force_sort)) { if (param->testflag & T_REP_BY_SORT) error=mi_repair_by_sort(param,info,filename,rep_quick); else error=mi_repair_parallel(param,info,filename,rep_quick); state_updated=1; } else if (param->testflag & T_REP_ANY) error=mi_repair(param, info,filename,rep_quick); } if (!error && param->testflag & T_SORT_RECORDS) { /* The data file is nowadays reopened in the repair code so we should soon remove the following reopen-code */ #ifndef TO_BE_REMOVED if (param->out_flag & O_NEW_DATA) { /* Change temp file to org file */ (void) my_close(info->dfile,MYF(MY_WME)); /* Close new file */ error|=change_to_newfile(filename, MI_NAME_DEXT, DATA_TMP_EXT, MYF(0)); if (mi_open_datafile(info,info->s, NULL, -1)) error=1; param->out_flag&= ~O_NEW_DATA; /* We are using new datafile */ param->read_cache.file=info->dfile; } #endif if (! error) { uint key; /* We can't update the index in mi_sort_records if we have a prefix compressed or fulltext index */ my_bool update_index=1; for (key=0 ; key < share->base.keys; key++) if (share->keyinfo[key].flag & (HA_BINARY_PACK_KEY|HA_FULLTEXT)) update_index=0; error=mi_sort_records(param,info,filename,param->opt_sort_key, /* what is the following parameter for ? */ (my_bool) !(param->testflag & T_REP), update_index); datafile=info->dfile; /* This is now locked */ if (!error && !update_index) { if (param->verbose) puts("Table had a compressed index; We must now recreate the index"); error=mi_repair_by_sort(param,info,filename,1); } } } if (!error && param->testflag & T_SORT_INDEX) error=mi_sort_index(param,info,filename); if (!error) share->state.changed&= ~(STATE_CHANGED | STATE_CRASHED | STATE_CRASHED_ON_REPAIR); else mi_mark_crashed(info); } else if ((param->testflag & T_CHECK) || !(param->testflag & T_AUTO_INC)) { if (!(param->testflag & T_SILENT) || param->testflag & T_INFO) printf("Checking MyISAM file: %s\n",filename); if (!(param->testflag & T_SILENT)) printf("Data records: %7s Deleted blocks: %7s\n", llstr(info->state->records,llbuff), llstr(info->state->del,llbuff2)); error =chk_status(param,info); mi_intersect_keys_active(share->state.key_map, param->keys_in_use); error =chk_size(param,info); if (!error || !(param->testflag & (T_FAST | T_FORCE_CREATE))) error|=chk_del(param, info,param->testflag); if ((!error || (!(param->testflag & (T_FAST | T_FORCE_CREATE)) && !param->start_check_pos))) { error|=chk_key(param, info); if (!error && (param->testflag & (T_STATISTICS | T_AUTO_INC))) error=update_state_info(param, info, ((param->testflag & T_STATISTICS) ? UPDATE_STAT : 0) | ((param->testflag & T_AUTO_INC) ? UPDATE_AUTO_INC : 0)); } if ((!rep_quick && !error) || !(param->testflag & (T_FAST | T_FORCE_CREATE))) { if (param->testflag & (T_EXTEND | T_MEDIUM)) (void) init_key_cache(dflt_key_cache,opt_key_cache_block_size, param->use_buffers, 0, 0); (void) init_io_cache(&param->read_cache,datafile, (uint) param->read_buffer_length, READ_CACHE, (param->start_check_pos ? param->start_check_pos : share->pack.header_length), 1, MYF(MY_WME)); lock_memory(param); if ((info->s->options & (HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD)) || (param->testflag & (T_EXTEND | T_MEDIUM))) error|=chk_data_link(param, info, param->testflag & T_EXTEND); error|=flush_blocks(param, share->key_cache, share->kfile); (void) end_io_cache(&param->read_cache); } if (!error) { if ((share->state.changed & STATE_CHANGED) && (param->testflag & T_UPDATE_STATE)) info->update|=HA_STATE_CHANGED | HA_STATE_ROW_CHANGED; share->state.changed&= ~(STATE_CHANGED | STATE_CRASHED | STATE_CRASHED_ON_REPAIR); } else if (!mi_is_crashed(info) && (param->testflag & T_UPDATE_STATE)) { /* Mark crashed */ mi_mark_crashed(info); info->update|=HA_STATE_CHANGED | HA_STATE_ROW_CHANGED; } } } if ((param->testflag & T_AUTO_INC) || ((param->testflag & T_REP_ANY) && info->s->base.auto_key)) update_auto_increment_key(param, info, (my_bool) !test(param->testflag & T_AUTO_INC)); if (!(param->testflag & T_DESCRIPT)) { if (info->update & HA_STATE_CHANGED && ! (param->testflag & T_READONLY)) error|=update_state_info(param, info, UPDATE_OPEN_COUNT | (((param->testflag & T_REP_ANY) ? UPDATE_TIME : 0) | (state_updated ? UPDATE_STAT : 0) | ((param->testflag & T_SORT_RECORDS) ? UPDATE_SORT : 0))); (void) lock_file(param, share->kfile,0L,F_UNLCK,"indexfile",filename); info->update&= ~HA_STATE_CHANGED; } mi_lock_database(info, F_UNLCK); end2: if (mi_close(info)) { mi_check_print_error(param,"%d when closing MyISAM-table '%s'",my_errno,filename); DBUG_RETURN(1); } if (error == 0) { if (param->out_flag & O_NEW_DATA) error|=change_to_newfile(filename,MI_NAME_DEXT,DATA_TMP_EXT, ((param->testflag & T_BACKUP_DATA) ? MYF(MY_REDEL_MAKE_BACKUP) : MYF(0))); if (param->out_flag & O_NEW_INDEX) error|=change_to_newfile(filename, MI_NAME_IEXT, INDEX_TMP_EXT, MYF(0)); } (void) fflush(stdout); (void) fflush(stderr); if (param->error_printed) { if (param->testflag & (T_REP_ANY | T_SORT_RECORDS | T_SORT_INDEX)) { (void) fprintf(stderr, "MyISAM-table '%s' is not fixed because of errors\n", filename); if (param->testflag & T_REP_ANY) (void) fprintf(stderr, "Try fixing it by using the --safe-recover (-o), the --force (-f) option or by not using the --quick (-q) flag\n"); } else if (!(param->error_printed & 2) && !(param->testflag & T_FORCE_CREATE)) (void) fprintf(stderr, "MyISAM-table '%s' is corrupted\nFix it using switch \"-r\" or \"-o\"\n", filename); } else if (param->warning_printed && ! (param->testflag & (T_REP_ANY | T_SORT_RECORDS | T_SORT_INDEX | T_FORCE_CREATE))) (void) fprintf(stderr, "MyISAM-table '%s' is usable but should be fixed\n", filename); (void) fflush(stderr); DBUG_RETURN(error); } /* myisamchk */ /* Write info about table */ static void descript(MI_CHECK *param, register MI_INFO *info, char * name) { uint key,keyseg_nr,field,start; reg3 MI_KEYDEF *keyinfo; reg2 HA_KEYSEG *keyseg; reg4 const char *text; char buff[160],length[10],*pos,*end; enum en_fieldtype type; MYISAM_SHARE *share=info->s; char llbuff[22],llbuff2[22]; DBUG_ENTER("describe"); printf("\nMyISAM file: %s\n",name); fputs("Record format: ",stdout); if (share->options & HA_OPTION_COMPRESS_RECORD) puts("Compressed"); else if (share->options & HA_OPTION_PACK_RECORD) puts("Packed"); else puts("Fixed length"); printf("Character set: %s (%d)\n", get_charset_name(share->state.header.language), share->state.header.language); if (param->testflag & T_VERBOSE) { printf("File-version: %d\n", (int) share->state.header.file_version[3]); if (share->state.create_time) { get_date(buff,1,share->state.create_time); printf("Creation time: %s\n",buff); } if (share->state.check_time) { get_date(buff,1,share->state.check_time); printf("Recover time: %s\n",buff); } pos=buff; if (share->state.changed & STATE_CRASHED) strmov(buff,"crashed"); else { if (share->state.open_count) pos=strmov(pos,"open,"); if (share->state.changed & STATE_CHANGED) pos=strmov(pos,"changed,"); else pos=strmov(pos,"checked,"); if (!(share->state.changed & STATE_NOT_ANALYZED)) pos=strmov(pos,"analyzed,"); if (!(share->state.changed & STATE_NOT_OPTIMIZED_KEYS)) pos=strmov(pos,"optimized keys,"); if (!(share->state.changed & STATE_NOT_SORTED_PAGES)) pos=strmov(pos,"sorted index pages,"); pos[-1]=0; /* Remove extra ',' */ } printf("Status: %s\n",buff); if (share->base.auto_key) { printf("Auto increment key: %13d Last value: %13s\n", share->base.auto_key, llstr(share->state.auto_increment,llbuff)); } if (share->options & (HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD)) printf("Checksum: %23s\n",llstr(info->state->checksum,llbuff)); if (share->options & HA_OPTION_DELAY_KEY_WRITE) printf("Keys are only flushed at close\n"); } printf("Data records: %13s Deleted blocks: %13s\n", llstr(info->state->records,llbuff),llstr(info->state->del,llbuff2)); if (param->testflag & T_SILENT) DBUG_VOID_RETURN; /* This is enough */ if (param->testflag & T_VERBOSE) { #ifdef USE_RELOC printf("Init-relocation: %13s\n",llstr(share->base.reloc,llbuff)); #endif printf("Datafile parts: %13s Deleted data: %13s\n", llstr(share->state.split,llbuff), llstr(info->state->empty,llbuff2)); printf("Datafile pointer (bytes):%9d Keyfile pointer (bytes):%9d\n", share->rec_reflength,share->base.key_reflength); printf("Datafile length: %13s Keyfile length: %13s\n", llstr(info->state->data_file_length,llbuff), llstr(info->state->key_file_length,llbuff2)); if (info->s->base.reloc == 1L && info->s->base.records == 1L) puts("This is a one-record table"); else { if (share->base.max_data_file_length != HA_OFFSET_ERROR || share->base.max_key_file_length != HA_OFFSET_ERROR) printf("Max datafile length: %13s Max keyfile length: %13s\n", llstr(share->base.max_data_file_length-1,llbuff), ullstr(share->base.max_key_file_length - 1, llbuff2)); } } printf("Recordlength: %13d\n",(int) share->base.pack_reclength); if (! mi_is_all_keys_active(share->state.key_map, share->base.keys)) { longlong2str(share->state.key_map,buff,2); printf("Using only keys '%s' of %d possibly keys\n", buff, share->base.keys); } puts("\ntable description:"); printf("Key Start Len Index Type"); if (param->testflag & T_VERBOSE) printf(" Rec/key Root Blocksize"); (void) putchar('\n'); for (key=keyseg_nr=0, keyinfo= &share->keyinfo[0] ; key < share->base.keys; key++,keyinfo++) { keyseg=keyinfo->seg; if (keyinfo->flag & HA_NOSAME) text="unique "; else if (keyinfo->flag & HA_FULLTEXT) text="fulltext "; else text="multip."; pos=buff; if (keyseg->flag & HA_REVERSE_SORT) *pos++ = '-'; pos=strmov(pos,type_names[keyseg->type]); *pos++ = ' '; *pos=0; if (keyinfo->flag & HA_PACK_KEY) pos=strmov(pos,prefix_packed_txt); if (keyinfo->flag & HA_BINARY_PACK_KEY) pos=strmov(pos,bin_packed_txt); if (keyseg->flag & HA_SPACE_PACK) pos=strmov(pos,diff_txt); if (keyseg->flag & HA_BLOB_PART) pos=strmov(pos,blob_txt); if (keyseg->flag & HA_NULL_PART) pos=strmov(pos,null_txt); *pos=0; printf("%-4d%-6ld%-3d %-8s%-21s", key+1,(long) keyseg->start+1,keyseg->length,text,buff); if (share->state.key_root[key] != HA_OFFSET_ERROR) llstr(share->state.key_root[key],buff); else buff[0]=0; if (param->testflag & T_VERBOSE) printf("%11lu %12s %10d", share->state.rec_per_key_part[keyseg_nr++], buff,keyinfo->block_length); (void) putchar('\n'); while ((++keyseg)->type != HA_KEYTYPE_END) { pos=buff; if (keyseg->flag & HA_REVERSE_SORT) *pos++ = '-'; pos=strmov(pos,type_names[keyseg->type]); *pos++= ' '; if (keyseg->flag & HA_SPACE_PACK) pos=strmov(pos,diff_txt); if (keyseg->flag & HA_BLOB_PART) pos=strmov(pos,blob_txt); if (keyseg->flag & HA_NULL_PART) pos=strmov(pos,null_txt); *pos=0; printf(" %-6ld%-3d %-21s", (long) keyseg->start+1,keyseg->length,buff); if (param->testflag & T_VERBOSE) printf("%11lu", share->state.rec_per_key_part[keyseg_nr++]); (void) putchar('\n'); } keyseg++; } if (share->state.header.uniques) { MI_UNIQUEDEF *uniqueinfo; puts("\nUnique Key Start Len Nullpos Nullbit Type"); for (key=0,uniqueinfo= &share->uniqueinfo[0] ; key < share->state.header.uniques; key++, uniqueinfo++) { my_bool new_row=0; char null_bit[8],null_pos[8]; printf("%-8d%-5d",key+1,uniqueinfo->key+1); for (keyseg=uniqueinfo->seg ; keyseg->type != HA_KEYTYPE_END ; keyseg++) { if (new_row) fputs(" ",stdout); null_bit[0]=null_pos[0]=0; if (keyseg->null_bit) { sprintf(null_bit,"%d",keyseg->null_bit); sprintf(null_pos,"%ld",(long) keyseg->null_pos+1); } printf("%-7ld%-5d%-9s%-10s%-30s\n", (long) keyseg->start+1,keyseg->length, null_pos,null_bit, type_names[keyseg->type]); new_row=1; } } } if (param->verbose > 1) { char null_bit[8],null_pos[8]; printf("\nField Start Length Nullpos Nullbit Type"); if (share->options & HA_OPTION_COMPRESS_RECORD) printf(" Huff tree Bits"); (void) putchar('\n'); start=1; for (field=0 ; field < share->base.fields ; field++) { if (share->options & HA_OPTION_COMPRESS_RECORD) type=share->rec[field].base_type; else type=(enum en_fieldtype) share->rec[field].type; end=strmov(buff,field_pack[type]); if (share->options & HA_OPTION_COMPRESS_RECORD) { if (share->rec[field].pack_type & PACK_TYPE_SELECTED) end=strmov(end,", not_always"); if (share->rec[field].pack_type & PACK_TYPE_SPACE_FIELDS) end=strmov(end,", no empty"); if (share->rec[field].pack_type & PACK_TYPE_ZERO_FILL) { sprintf(end,", zerofill(%d)",share->rec[field].space_length_bits); end=strend(end); } } if (buff[0] == ',') strmov(buff,buff+2); int10_to_str((long) share->rec[field].length,length,10); null_bit[0]=null_pos[0]=0; if (share->rec[field].null_bit) { sprintf(null_bit,"%d",share->rec[field].null_bit); sprintf(null_pos,"%d",share->rec[field].null_pos+1); } printf("%-6d%-6d%-7s%-8s%-8s%-35s",field+1,start,length, null_pos, null_bit, buff); if (share->options & HA_OPTION_COMPRESS_RECORD) { if (share->rec[field].huff_tree) printf("%3d %2d", (uint) (share->rec[field].huff_tree-share->decode_trees)+1, share->rec[field].huff_tree->quick_table_bits); } (void) putchar('\n'); start+=share->rec[field].length; } } DBUG_VOID_RETURN; } /* describe */ /* Sort records according to one key */ static int mi_sort_records(MI_CHECK *param, register MI_INFO *info, char * name, uint sort_key, my_bool write_info, my_bool update_index) { int got_error; uint key; MI_KEYDEF *keyinfo; File new_file; uchar *temp_buff; ha_rows old_record_count; MYISAM_SHARE *share=info->s; char llbuff[22],llbuff2[22]; SORT_INFO sort_info; MI_SORT_PARAM sort_param; DBUG_ENTER("sort_records"); bzero((char*)&sort_info,sizeof(sort_info)); bzero((char*)&sort_param,sizeof(sort_param)); sort_param.sort_info=&sort_info; sort_info.param=param; keyinfo= &share->keyinfo[sort_key]; got_error=1; temp_buff=0; new_file= -1; if (! mi_is_key_active(share->state.key_map, sort_key)) { mi_check_print_warning(param, "Can't sort table '%s' on key %d; No such key", name,sort_key+1); param->error_printed=0; DBUG_RETURN(0); /* Nothing to do */ } if (keyinfo->flag & HA_FULLTEXT) { mi_check_print_warning(param,"Can't sort table '%s' on FULLTEXT key %d", name,sort_key+1); param->error_printed=0; DBUG_RETURN(0); /* Nothing to do */ } if (share->data_file_type == COMPRESSED_RECORD) { mi_check_print_warning(param,"Can't sort read-only table '%s'", name); param->error_printed=0; DBUG_RETURN(0); /* Nothing to do */ } if (!(param->testflag & T_SILENT)) { printf("- Sorting records for MyISAM-table '%s'\n",name); if (write_info) printf("Data records: %9s Deleted: %9s\n", llstr(info->state->records,llbuff), llstr(info->state->del,llbuff2)); } if (share->state.key_root[sort_key] == HA_OFFSET_ERROR) DBUG_RETURN(0); /* Nothing to do */ init_key_cache(dflt_key_cache, opt_key_cache_block_size, (size_t) param->use_buffers, 0, 0); if (init_io_cache(&info->rec_cache,-1,(uint) param->write_buffer_length, WRITE_CACHE,share->pack.header_length,1, MYF(MY_WME | MY_WAIT_IF_FULL))) goto err; info->opt_flag|=WRITE_CACHE_USED; if (!(temp_buff=(uchar*) my_alloca((uint) keyinfo->block_length))) { mi_check_print_error(param,"Not enough memory for key block"); goto err; } if (!mi_alloc_rec_buff(info, -1, &sort_param.record)) { mi_check_print_error(param,"Not enough memory for record"); goto err; } fn_format(param->temp_filename,name,"", MI_NAME_DEXT,2+4+32); new_file= my_create(fn_format(param->temp_filename, param->temp_filename, "", DATA_TMP_EXT, 2+4), 0, param->tmpfile_createflag, MYF(0)); if (new_file < 0) { mi_check_print_error(param,"Can't create new tempfile: '%s'", param->temp_filename); goto err; } if (share->pack.header_length) if (filecopy(param,new_file,info->dfile,0L,share->pack.header_length, "datafile-header")) goto err; info->rec_cache.file=new_file; /* Use this file for cacheing*/ lock_memory(param); for (key=0 ; key < share->base.keys ; key++) share->keyinfo[key].flag|= HA_SORT_ALLOWS_SAME; if (my_pread(share->kfile,(uchar*) temp_buff, (uint) keyinfo->block_length, share->state.key_root[sort_key], MYF(MY_NABP+MY_WME))) { mi_check_print_error(param,"Can't read indexpage from filepos: %s", (ulong) share->state.key_root[sort_key]); goto err; } /* Setup param for sort_write_record */ sort_info.info=info; sort_info.new_data_file_type=share->data_file_type; sort_param.fix_datafile=1; sort_param.master=1; sort_param.filepos=share->pack.header_length; old_record_count=info->state->records; info->state->records=0; if (sort_info.new_data_file_type != COMPRESSED_RECORD) info->state->checksum=0; if (sort_record_index(&sort_param,info,keyinfo,share->state.key_root[sort_key], temp_buff, sort_key,new_file,update_index) || write_data_suffix(&sort_info,1) || flush_io_cache(&info->rec_cache)) goto err; if (info->state->records != old_record_count) { mi_check_print_error(param,"found %s of %s records", llstr(info->state->records,llbuff), llstr(old_record_count,llbuff2)); goto err; } (void) my_close(info->dfile,MYF(MY_WME)); param->out_flag|=O_NEW_DATA; /* Data in new file */ info->dfile=new_file; /* Use new datafile */ info->state->del=0; info->state->empty=0; share->state.dellink= HA_OFFSET_ERROR; info->state->data_file_length=sort_param.filepos; share->state.split=info->state->records; /* Only hole records */ share->state.version=(ulong) time((time_t*) 0); info->update= (short) (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED); if (param->testflag & T_WRITE_LOOP) { (void) fputs(" \r",stdout); (void) fflush(stdout); } got_error=0; err: if (got_error && new_file >= 0) { (void) end_io_cache(&info->rec_cache); (void) my_close(new_file,MYF(MY_WME)); (void) my_delete(param->temp_filename, MYF(MY_WME)); } if (temp_buff) { my_afree((uchar*) temp_buff); } my_free(mi_get_rec_buff_ptr(info, sort_param.record)); info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED); (void) end_io_cache(&info->rec_cache); my_free(sort_info.buff); sort_info.buff=0; share->state.sortkey=sort_key; DBUG_RETURN(flush_blocks(param, share->key_cache, share->kfile) | got_error); } /* sort_records */ /* Sort records recursive using one index */ static int sort_record_index(MI_SORT_PARAM *sort_param,MI_INFO *info, MI_KEYDEF *keyinfo, my_off_t page, uchar *buff, uint sort_key, File new_file,my_bool update_index) { uint nod_flag,used_length,key_length; uchar *temp_buff,*keypos,*endpos; my_off_t next_page,rec_pos; uchar lastkey[MI_MAX_KEY_BUFF]; char llbuff[22]; SORT_INFO *sort_info= sort_param->sort_info; MI_CHECK *param=sort_info->param; DBUG_ENTER("sort_record_index"); nod_flag=mi_test_if_nod(buff); temp_buff=0; if (nod_flag) { if (!(temp_buff=(uchar*) my_alloca((uint) keyinfo->block_length))) { mi_check_print_error(param,"Not Enough memory"); DBUG_RETURN(-1); } } used_length=mi_getint(buff); keypos=buff+2+nod_flag; endpos=buff+used_length; for ( ;; ) { if (nod_flag) { next_page=_mi_kpos(nod_flag,keypos); if (my_pread(info->s->kfile,(uchar*) temp_buff, (uint) keyinfo->block_length, next_page, MYF(MY_NABP+MY_WME))) { mi_check_print_error(param,"Can't read keys from filepos: %s", llstr(next_page,llbuff)); goto err; } if (sort_record_index(sort_param, info,keyinfo,next_page,temp_buff,sort_key, new_file, update_index)) goto err; } if (keypos >= endpos || (key_length=(*keyinfo->get_key)(keyinfo,nod_flag,&keypos,lastkey)) == 0) break; rec_pos= _mi_dpos(info,0,lastkey+key_length); if ((*info->s->read_rnd)(info,sort_param->record,rec_pos,0)) { mi_check_print_error(param,"%d when reading datafile",my_errno); goto err; } if (rec_pos != sort_param->filepos && update_index) { _mi_dpointer(info,keypos-nod_flag-info->s->rec_reflength, sort_param->filepos); if (movepoint(info,sort_param->record,rec_pos,sort_param->filepos, sort_key)) { mi_check_print_error(param,"%d when updating key-pointers",my_errno); goto err; } } if (sort_write_record(sort_param)) goto err; } /* Clear end of block to get better compression if the table is backuped */ bzero((uchar*) buff+used_length,keyinfo->block_length-used_length); if (my_pwrite(info->s->kfile,(uchar*) buff,(uint) keyinfo->block_length, page,param->myf_rw)) { mi_check_print_error(param,"%d when updating keyblock",my_errno); goto err; } if (temp_buff) my_afree((uchar*) temp_buff); DBUG_RETURN(0); err: if (temp_buff) my_afree((uchar*) temp_buff); DBUG_RETURN(1); } /* sort_record_index */ /* Check if myisamchk was killed by a signal This is overloaded by other programs that want to be able to abort sorting */ static int not_killed= 0; volatile int *killed_ptr(MI_CHECK *param __attribute__((unused))) { return &not_killed; /* always NULL */ } /* print warnings and errors */ /* VARARGS */ void mi_check_print_info(MI_CHECK *param __attribute__((unused)), const char *fmt,...) { va_list args; va_start(args,fmt); (void) vfprintf(stdout, fmt, args); (void) fputc('\n',stdout); va_end(args); } /* VARARGS */ void mi_check_print_warning(MI_CHECK *param, const char *fmt,...) { va_list args; DBUG_ENTER("mi_check_print_warning"); fflush(stdout); if (!param->warning_printed && !param->error_printed) { if (param->testflag & T_SILENT) fprintf(stderr,"%s: MyISAM file %s\n",my_progname_short, param->isam_file_name); param->out_flag|= O_DATA_LOST; } param->warning_printed=1; va_start(args,fmt); fprintf(stderr,"%s: warning: ",my_progname_short); (void) vfprintf(stderr, fmt, args); (void) fputc('\n',stderr); fflush(stderr); va_end(args); DBUG_VOID_RETURN; } /* VARARGS */ void mi_check_print_error(MI_CHECK *param, const char *fmt,...) { va_list args; DBUG_ENTER("mi_check_print_error"); DBUG_PRINT("enter",("format: %s",fmt)); fflush(stdout); if (!param->warning_printed && !param->error_printed) { if (param->testflag & T_SILENT) fprintf(stderr,"%s: MyISAM file %s\n",my_progname_short,param->isam_file_name); param->out_flag|= O_DATA_LOST; } param->error_printed|=1; va_start(args,fmt); fprintf(stderr,"%s: error: ",my_progname_short); (void) vfprintf(stderr, fmt, args); (void) fputc('\n',stderr); fflush(stderr); va_end(args); DBUG_VOID_RETURN; } #include "mi_extrafunc.h"
./CrossVul/dataset_final_sorted/CWE-362/c/bad_5222_5
crossvul-cpp_data_good_3014_0
/* * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ #include "dm-core.h" #include "dm-rq.h" #include "dm-uevent.h" #include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/sched/signal.h> #include <linux/blkpg.h> #include <linux/bio.h> #include <linux/mempool.h> #include <linux/dax.h> #include <linux/slab.h> #include <linux/idr.h> #include <linux/uio.h> #include <linux/hdreg.h> #include <linux/delay.h> #include <linux/wait.h> #include <linux/pr.h> #include <linux/refcount.h> #define DM_MSG_PREFIX "core" /* * Cookies are numeric values sent with CHANGE and REMOVE * uevents while resuming, removing or renaming the device. */ #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" #define DM_COOKIE_LENGTH 24 static const char *_name = DM_NAME; static unsigned int major = 0; static unsigned int _major = 0; static DEFINE_IDR(_minor_idr); static DEFINE_SPINLOCK(_minor_lock); static void do_deferred_remove(struct work_struct *w); static DECLARE_WORK(deferred_remove_work, do_deferred_remove); static struct workqueue_struct *deferred_remove_workqueue; atomic_t dm_global_event_nr = ATOMIC_INIT(0); DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); void dm_issue_global_event(void) { atomic_inc(&dm_global_event_nr); wake_up(&dm_global_eventq); } /* * One of these is allocated per bio. */ struct dm_io { struct mapped_device *md; blk_status_t status; atomic_t io_count; struct bio *bio; unsigned long start_time; spinlock_t endio_lock; struct dm_stats_aux stats_aux; }; #define MINOR_ALLOCED ((void *)-1) /* * Bits for the md->flags field. */ #define DMF_BLOCK_IO_FOR_SUSPEND 0 #define DMF_SUSPENDED 1 #define DMF_FROZEN 2 #define DMF_FREEING 3 #define DMF_DELETING 4 #define DMF_NOFLUSH_SUSPENDING 5 #define DMF_DEFERRED_REMOVE 6 #define DMF_SUSPENDED_INTERNALLY 7 #define DM_NUMA_NODE NUMA_NO_NODE static int dm_numa_node = DM_NUMA_NODE; /* * For mempools pre-allocation at the table loading time. */ struct dm_md_mempools { mempool_t *io_pool; struct bio_set *bs; }; struct table_device { struct list_head list; refcount_t count; struct dm_dev dm_dev; }; static struct kmem_cache *_io_cache; static struct kmem_cache *_rq_tio_cache; static struct kmem_cache *_rq_cache; /* * Bio-based DM's mempools' reserved IOs set by the user. */ #define RESERVED_BIO_BASED_IOS 16 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; static int __dm_get_module_param_int(int *module_param, int min, int max) { int param = ACCESS_ONCE(*module_param); int modified_param = 0; bool modified = true; if (param < min) modified_param = min; else if (param > max) modified_param = max; else modified = false; if (modified) { (void)cmpxchg(module_param, param, modified_param); param = modified_param; } return param; } unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max) { unsigned param = ACCESS_ONCE(*module_param); unsigned modified_param = 0; if (!param) modified_param = def; else if (param > max) modified_param = max; if (modified_param) { (void)cmpxchg(module_param, param, modified_param); param = modified_param; } return param; } unsigned dm_get_reserved_bio_based_ios(void) { return __dm_get_module_param(&reserved_bio_based_ios, RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); } EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); static unsigned dm_get_numa_node(void) { return __dm_get_module_param_int(&dm_numa_node, DM_NUMA_NODE, num_online_nodes() - 1); } static int __init local_init(void) { int r = -ENOMEM; /* allocate a slab for the dm_ios */ _io_cache = KMEM_CACHE(dm_io, 0); if (!_io_cache) return r; _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); if (!_rq_tio_cache) goto out_free_io_cache; _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request), __alignof__(struct request), 0, NULL); if (!_rq_cache) goto out_free_rq_tio_cache; r = dm_uevent_init(); if (r) goto out_free_rq_cache; deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); if (!deferred_remove_workqueue) { r = -ENOMEM; goto out_uevent_exit; } _major = major; r = register_blkdev(_major, _name); if (r < 0) goto out_free_workqueue; if (!_major) _major = r; return 0; out_free_workqueue: destroy_workqueue(deferred_remove_workqueue); out_uevent_exit: dm_uevent_exit(); out_free_rq_cache: kmem_cache_destroy(_rq_cache); out_free_rq_tio_cache: kmem_cache_destroy(_rq_tio_cache); out_free_io_cache: kmem_cache_destroy(_io_cache); return r; } static void local_exit(void) { flush_scheduled_work(); destroy_workqueue(deferred_remove_workqueue); kmem_cache_destroy(_rq_cache); kmem_cache_destroy(_rq_tio_cache); kmem_cache_destroy(_io_cache); unregister_blkdev(_major, _name); dm_uevent_exit(); _major = 0; DMINFO("cleaned up"); } static int (*_inits[])(void) __initdata = { local_init, dm_target_init, dm_linear_init, dm_stripe_init, dm_io_init, dm_kcopyd_init, dm_interface_init, dm_statistics_init, }; static void (*_exits[])(void) = { local_exit, dm_target_exit, dm_linear_exit, dm_stripe_exit, dm_io_exit, dm_kcopyd_exit, dm_interface_exit, dm_statistics_exit, }; static int __init dm_init(void) { const int count = ARRAY_SIZE(_inits); int r, i; for (i = 0; i < count; i++) { r = _inits[i](); if (r) goto bad; } return 0; bad: while (i--) _exits[i](); return r; } static void __exit dm_exit(void) { int i = ARRAY_SIZE(_exits); while (i--) _exits[i](); /* * Should be empty by this point. */ idr_destroy(&_minor_idr); } /* * Block device functions */ int dm_deleting_md(struct mapped_device *md) { return test_bit(DMF_DELETING, &md->flags); } static int dm_blk_open(struct block_device *bdev, fmode_t mode) { struct mapped_device *md; spin_lock(&_minor_lock); md = bdev->bd_disk->private_data; if (!md) goto out; if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { md = NULL; goto out; } dm_get(md); atomic_inc(&md->open_count); out: spin_unlock(&_minor_lock); return md ? 0 : -ENXIO; } static void dm_blk_close(struct gendisk *disk, fmode_t mode) { struct mapped_device *md; spin_lock(&_minor_lock); md = disk->private_data; if (WARN_ON(!md)) goto out; if (atomic_dec_and_test(&md->open_count) && (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) queue_work(deferred_remove_workqueue, &deferred_remove_work); dm_put(md); out: spin_unlock(&_minor_lock); } int dm_open_count(struct mapped_device *md) { return atomic_read(&md->open_count); } /* * Guarantees nothing is using the device before it's deleted. */ int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) { int r = 0; spin_lock(&_minor_lock); if (dm_open_count(md)) { r = -EBUSY; if (mark_deferred) set_bit(DMF_DEFERRED_REMOVE, &md->flags); } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) r = -EEXIST; else set_bit(DMF_DELETING, &md->flags); spin_unlock(&_minor_lock); return r; } int dm_cancel_deferred_remove(struct mapped_device *md) { int r = 0; spin_lock(&_minor_lock); if (test_bit(DMF_DELETING, &md->flags)) r = -EBUSY; else clear_bit(DMF_DEFERRED_REMOVE, &md->flags); spin_unlock(&_minor_lock); return r; } static void do_deferred_remove(struct work_struct *w) { dm_deferred_remove(); } sector_t dm_get_size(struct mapped_device *md) { return get_capacity(md->disk); } struct request_queue *dm_get_md_queue(struct mapped_device *md) { return md->queue; } struct dm_stats *dm_get_stats(struct mapped_device *md) { return &md->stats; } static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct mapped_device *md = bdev->bd_disk->private_data; return dm_get_geometry(md, geo); } static int dm_grab_bdev_for_ioctl(struct mapped_device *md, struct block_device **bdev, fmode_t *mode) { struct dm_target *tgt; struct dm_table *map; int srcu_idx, r; retry: r = -ENOTTY; map = dm_get_live_table(md, &srcu_idx); if (!map || !dm_table_get_size(map)) goto out; /* We only support devices that have a single target */ if (dm_table_get_num_targets(map) != 1) goto out; tgt = dm_table_get_target(map, 0); if (!tgt->type->prepare_ioctl) goto out; if (dm_suspended_md(md)) { r = -EAGAIN; goto out; } r = tgt->type->prepare_ioctl(tgt, bdev, mode); if (r < 0) goto out; bdgrab(*bdev); dm_put_live_table(md, srcu_idx); return r; out: dm_put_live_table(md, srcu_idx); if (r == -ENOTCONN && !fatal_signal_pending(current)) { msleep(10); goto retry; } return r; } static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct mapped_device *md = bdev->bd_disk->private_data; int r; r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); if (r < 0) return r; if (r > 0) { /* * Target determined this ioctl is being issued against a * subset of the parent bdev; require extra privileges. */ if (!capable(CAP_SYS_RAWIO)) { DMWARN_LIMIT( "%s: sending ioctl %x to DM device without required privilege.", current->comm, cmd); r = -ENOIOCTLCMD; goto out; } } r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); out: bdput(bdev); return r; } static struct dm_io *alloc_io(struct mapped_device *md) { return mempool_alloc(md->io_pool, GFP_NOIO); } static void free_io(struct mapped_device *md, struct dm_io *io) { mempool_free(io, md->io_pool); } static void free_tio(struct dm_target_io *tio) { bio_put(&tio->clone); } int md_in_flight(struct mapped_device *md) { return atomic_read(&md->pending[READ]) + atomic_read(&md->pending[WRITE]); } static void start_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; struct bio *bio = io->bio; int cpu; int rw = bio_data_dir(bio); io->start_time = jiffies; cpu = part_stat_lock(); part_round_stats(md->queue, cpu, &dm_disk(md)->part0); part_stat_unlock(); atomic_set(&dm_disk(md)->part0.in_flight[rw], atomic_inc_return(&md->pending[rw])); if (unlikely(dm_stats_used(&md->stats))) dm_stats_account_io(&md->stats, bio_data_dir(bio), bio->bi_iter.bi_sector, bio_sectors(bio), false, 0, &io->stats_aux); } static void end_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; struct bio *bio = io->bio; unsigned long duration = jiffies - io->start_time; int pending; int rw = bio_data_dir(bio); generic_end_io_acct(md->queue, rw, &dm_disk(md)->part0, io->start_time); if (unlikely(dm_stats_used(&md->stats))) dm_stats_account_io(&md->stats, bio_data_dir(bio), bio->bi_iter.bi_sector, bio_sectors(bio), true, duration, &io->stats_aux); /* * After this is decremented the bio must not be touched if it is * a flush. */ pending = atomic_dec_return(&md->pending[rw]); atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); pending += atomic_read(&md->pending[rw^0x1]); /* nudge anyone waiting on suspend queue */ if (!pending) wake_up(&md->wait); } /* * Add the bio to the list of deferred io. */ static void queue_io(struct mapped_device *md, struct bio *bio) { unsigned long flags; spin_lock_irqsave(&md->deferred_lock, flags); bio_list_add(&md->deferred, bio); spin_unlock_irqrestore(&md->deferred_lock, flags); queue_work(md->wq, &md->work); } /* * Everyone (including functions in this file), should use this * function to access the md->map field, and make sure they call * dm_put_live_table() when finished. */ struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) { *srcu_idx = srcu_read_lock(&md->io_barrier); return srcu_dereference(md->map, &md->io_barrier); } void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) { srcu_read_unlock(&md->io_barrier, srcu_idx); } void dm_sync_table(struct mapped_device *md) { synchronize_srcu(&md->io_barrier); synchronize_rcu_expedited(); } /* * A fast alternative to dm_get_live_table/dm_put_live_table. * The caller must not block between these two functions. */ static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) { rcu_read_lock(); return rcu_dereference(md->map); } static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) { rcu_read_unlock(); } /* * Open a table device so we can use it as a map destination. */ static int open_table_device(struct table_device *td, dev_t dev, struct mapped_device *md) { static char *_claim_ptr = "I belong to device-mapper"; struct block_device *bdev; int r; BUG_ON(td->dm_dev.bdev); bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); if (IS_ERR(bdev)) return PTR_ERR(bdev); r = bd_link_disk_holder(bdev, dm_disk(md)); if (r) { blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); return r; } td->dm_dev.bdev = bdev; td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); return 0; } /* * Close a table device that we've been using. */ static void close_table_device(struct table_device *td, struct mapped_device *md) { if (!td->dm_dev.bdev) return; bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); put_dax(td->dm_dev.dax_dev); td->dm_dev.bdev = NULL; td->dm_dev.dax_dev = NULL; } static struct table_device *find_table_device(struct list_head *l, dev_t dev, fmode_t mode) { struct table_device *td; list_for_each_entry(td, l, list) if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) return td; return NULL; } int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, struct dm_dev **result) { int r; struct table_device *td; mutex_lock(&md->table_devices_lock); td = find_table_device(&md->table_devices, dev, mode); if (!td) { td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); if (!td) { mutex_unlock(&md->table_devices_lock); return -ENOMEM; } td->dm_dev.mode = mode; td->dm_dev.bdev = NULL; if ((r = open_table_device(td, dev, md))) { mutex_unlock(&md->table_devices_lock); kfree(td); return r; } format_dev_t(td->dm_dev.name, dev); refcount_set(&td->count, 1); list_add(&td->list, &md->table_devices); } else { refcount_inc(&td->count); } mutex_unlock(&md->table_devices_lock); *result = &td->dm_dev; return 0; } EXPORT_SYMBOL_GPL(dm_get_table_device); void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) { struct table_device *td = container_of(d, struct table_device, dm_dev); mutex_lock(&md->table_devices_lock); if (refcount_dec_and_test(&td->count)) { close_table_device(td, md); list_del(&td->list); kfree(td); } mutex_unlock(&md->table_devices_lock); } EXPORT_SYMBOL(dm_put_table_device); static void free_table_devices(struct list_head *devices) { struct list_head *tmp, *next; list_for_each_safe(tmp, next, devices) { struct table_device *td = list_entry(tmp, struct table_device, list); DMWARN("dm_destroy: %s still exists with %d references", td->dm_dev.name, refcount_read(&td->count)); kfree(td); } } /* * Get the geometry associated with a dm device */ int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) { *geo = md->geometry; return 0; } /* * Set the geometry of a device. */ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) { sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; if (geo->start > sz) { DMWARN("Start sector is beyond the geometry limits."); return -EINVAL; } md->geometry = *geo; return 0; } /*----------------------------------------------------------------- * CRUD START: * A more elegant soln is in the works that uses the queue * merge fn, unfortunately there are a couple of changes to * the block layer that I want to make for this. So in the * interests of getting something for people to use I give * you this clearly demarcated crap. *---------------------------------------------------------------*/ static int __noflush_suspending(struct mapped_device *md) { return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); } /* * Decrements the number of outstanding ios that a bio has been * cloned into, completing the original io if necc. */ static void dec_pending(struct dm_io *io, blk_status_t error) { unsigned long flags; blk_status_t io_error; struct bio *bio; struct mapped_device *md = io->md; /* Push-back supersedes any I/O errors */ if (unlikely(error)) { spin_lock_irqsave(&io->endio_lock, flags); if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) io->status = error; spin_unlock_irqrestore(&io->endio_lock, flags); } if (atomic_dec_and_test(&io->io_count)) { if (io->status == BLK_STS_DM_REQUEUE) { /* * Target requested pushing back the I/O. */ spin_lock_irqsave(&md->deferred_lock, flags); if (__noflush_suspending(md)) bio_list_add_head(&md->deferred, io->bio); else /* noflush suspend was interrupted. */ io->status = BLK_STS_IOERR; spin_unlock_irqrestore(&md->deferred_lock, flags); } io_error = io->status; bio = io->bio; end_io_acct(io); free_io(md, io); if (io_error == BLK_STS_DM_REQUEUE) return; if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { /* * Preflush done for flush with data, reissue * without REQ_PREFLUSH. */ bio->bi_opf &= ~REQ_PREFLUSH; queue_io(md, bio); } else { /* done with normal IO or empty flush */ bio->bi_status = io_error; bio_endio(bio); } } } void disable_write_same(struct mapped_device *md) { struct queue_limits *limits = dm_get_queue_limits(md); /* device doesn't really support WRITE SAME, disable it */ limits->max_write_same_sectors = 0; } void disable_write_zeroes(struct mapped_device *md) { struct queue_limits *limits = dm_get_queue_limits(md); /* device doesn't really support WRITE ZEROES, disable it */ limits->max_write_zeroes_sectors = 0; } static void clone_endio(struct bio *bio) { blk_status_t error = bio->bi_status; struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); struct dm_io *io = tio->io; struct mapped_device *md = tio->io->md; dm_endio_fn endio = tio->ti->type->end_io; if (unlikely(error == BLK_STS_TARGET)) { if (bio_op(bio) == REQ_OP_WRITE_SAME && !bio->bi_disk->queue->limits.max_write_same_sectors) disable_write_same(md); if (bio_op(bio) == REQ_OP_WRITE_ZEROES && !bio->bi_disk->queue->limits.max_write_zeroes_sectors) disable_write_zeroes(md); } if (endio) { int r = endio(tio->ti, bio, &error); switch (r) { case DM_ENDIO_REQUEUE: error = BLK_STS_DM_REQUEUE; /*FALLTHRU*/ case DM_ENDIO_DONE: break; case DM_ENDIO_INCOMPLETE: /* The target will handle the io */ return; default: DMWARN("unimplemented target endio return value: %d", r); BUG(); } } free_tio(tio); dec_pending(io, error); } /* * Return maximum size of I/O possible at the supplied sector up to the current * target boundary. */ static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) { sector_t target_offset = dm_target_offset(ti, sector); return ti->len - target_offset; } static sector_t max_io_len(sector_t sector, struct dm_target *ti) { sector_t len = max_io_len_target_boundary(sector, ti); sector_t offset, max_len; /* * Does the target need to split even further? */ if (ti->max_io_len) { offset = dm_target_offset(ti, sector); if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) max_len = sector_div(offset, ti->max_io_len); else max_len = offset & (ti->max_io_len - 1); max_len = ti->max_io_len - max_len; if (len > max_len) len = max_len; } return len; } int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) { if (len > UINT_MAX) { DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", (unsigned long long)len, UINT_MAX); ti->error = "Maximum size of target IO is too large"; return -EINVAL; } ti->max_io_len = (uint32_t) len; return 0; } EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, sector_t sector, int *srcu_idx) { struct dm_table *map; struct dm_target *ti; map = dm_get_live_table(md, srcu_idx); if (!map) return NULL; ti = dm_table_find_target(map, sector); if (!dm_target_is_valid(ti)) return NULL; return ti; } static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) { struct mapped_device *md = dax_get_private(dax_dev); sector_t sector = pgoff * PAGE_SECTORS; struct dm_target *ti; long len, ret = -EIO; int srcu_idx; ti = dm_dax_get_live_target(md, sector, &srcu_idx); if (!ti) goto out; if (!ti->type->direct_access) goto out; len = max_io_len(sector, ti) / PAGE_SECTORS; if (len < 1) goto out; nr_pages = min(len, nr_pages); if (ti->type->direct_access) ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); out: dm_put_live_table(md, srcu_idx); return ret; } static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { struct mapped_device *md = dax_get_private(dax_dev); sector_t sector = pgoff * PAGE_SECTORS; struct dm_target *ti; long ret = 0; int srcu_idx; ti = dm_dax_get_live_target(md, sector, &srcu_idx); if (!ti) goto out; if (!ti->type->dax_copy_from_iter) { ret = copy_from_iter(addr, bytes, i); goto out; } ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); out: dm_put_live_table(md, srcu_idx); return ret; } /* * A target may call dm_accept_partial_bio only from the map routine. It is * allowed for all bio types except REQ_PREFLUSH. * * dm_accept_partial_bio informs the dm that the target only wants to process * additional n_sectors sectors of the bio and the rest of the data should be * sent in a next bio. * * A diagram that explains the arithmetics: * +--------------------+---------------+-------+ * | 1 | 2 | 3 | * +--------------------+---------------+-------+ * * <-------------- *tio->len_ptr ---------------> * <------- bi_size -------> * <-- n_sectors --> * * Region 1 was already iterated over with bio_advance or similar function. * (it may be empty if the target doesn't use bio_advance) * Region 2 is the remaining bio size that the target wants to process. * (it may be empty if region 1 is non-empty, although there is no reason * to make it empty) * The target requires that region 3 is to be sent in the next bio. * * If the target wants to receive multiple copies of the bio (via num_*bios, etc), * the partially processed part (the sum of regions 1+2) must be the same for all * copies of the bio. */ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) { struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; BUG_ON(bio->bi_opf & REQ_PREFLUSH); BUG_ON(bi_size > *tio->len_ptr); BUG_ON(n_sectors > bi_size); *tio->len_ptr -= bi_size - n_sectors; bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; } EXPORT_SYMBOL_GPL(dm_accept_partial_bio); /* * The zone descriptors obtained with a zone report indicate * zone positions within the target device. The zone descriptors * must be remapped to match their position within the dm device. * A target may call dm_remap_zone_report after completion of a * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained * from the target device mapping to the dm device. */ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) { #ifdef CONFIG_BLK_DEV_ZONED struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); struct bio *report_bio = tio->io->bio; struct blk_zone_report_hdr *hdr = NULL; struct blk_zone *zone; unsigned int nr_rep = 0; unsigned int ofst; struct bio_vec bvec; struct bvec_iter iter; void *addr; if (bio->bi_status) return; /* * Remap the start sector of the reported zones. For sequential zones, * also remap the write pointer position. */ bio_for_each_segment(bvec, report_bio, iter) { addr = kmap_atomic(bvec.bv_page); /* Remember the report header in the first page */ if (!hdr) { hdr = addr; ofst = sizeof(struct blk_zone_report_hdr); } else ofst = 0; /* Set zones start sector */ while (hdr->nr_zones && ofst < bvec.bv_len) { zone = addr + ofst; if (zone->start >= start + ti->len) { hdr->nr_zones = 0; break; } zone->start = zone->start + ti->begin - start; if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { if (zone->cond == BLK_ZONE_COND_FULL) zone->wp = zone->start + zone->len; else if (zone->cond == BLK_ZONE_COND_EMPTY) zone->wp = zone->start; else zone->wp = zone->wp + ti->begin - start; } ofst += sizeof(struct blk_zone); hdr->nr_zones--; nr_rep++; } if (addr != hdr) kunmap_atomic(addr); if (!hdr->nr_zones) break; } if (hdr) { hdr->nr_zones = nr_rep; kunmap_atomic(hdr); } bio_advance(report_bio, report_bio->bi_iter.bi_size); #else /* !CONFIG_BLK_DEV_ZONED */ bio->bi_status = BLK_STS_NOTSUPP; #endif } EXPORT_SYMBOL_GPL(dm_remap_zone_report); /* * Flush current->bio_list when the target map method blocks. * This fixes deadlocks in snapshot and possibly in other targets. */ struct dm_offload { struct blk_plug plug; struct blk_plug_cb cb; }; static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule) { struct dm_offload *o = container_of(cb, struct dm_offload, cb); struct bio_list list; struct bio *bio; int i; INIT_LIST_HEAD(&o->cb.list); if (unlikely(!current->bio_list)) return; for (i = 0; i < 2; i++) { list = current->bio_list[i]; bio_list_init(&current->bio_list[i]); while ((bio = bio_list_pop(&list))) { struct bio_set *bs = bio->bi_pool; if (unlikely(!bs) || bs == fs_bio_set || !bs->rescue_workqueue) { bio_list_add(&current->bio_list[i], bio); continue; } spin_lock(&bs->rescue_lock); bio_list_add(&bs->rescue_list, bio); queue_work(bs->rescue_workqueue, &bs->rescue_work); spin_unlock(&bs->rescue_lock); } } } static void dm_offload_start(struct dm_offload *o) { blk_start_plug(&o->plug); o->cb.callback = flush_current_bio_list; list_add(&o->cb.list, &current->plug->cb_list); } static void dm_offload_end(struct dm_offload *o) { list_del(&o->cb.list); blk_finish_plug(&o->plug); } static void __map_bio(struct dm_target_io *tio) { int r; sector_t sector; struct dm_offload o; struct bio *clone = &tio->clone; struct dm_target *ti = tio->ti; clone->bi_end_io = clone_endio; /* * Map the clone. If r == 0 we don't need to do * anything, the target has assumed ownership of * this io. */ atomic_inc(&tio->io->io_count); sector = clone->bi_iter.bi_sector; dm_offload_start(&o); r = ti->type->map(ti, clone); dm_offload_end(&o); switch (r) { case DM_MAPIO_SUBMITTED: break; case DM_MAPIO_REMAPPED: /* the bio has been remapped so dispatch it */ trace_block_bio_remap(clone->bi_disk->queue, clone, bio_dev(tio->io->bio), sector); generic_make_request(clone); break; case DM_MAPIO_KILL: dec_pending(tio->io, BLK_STS_IOERR); free_tio(tio); break; case DM_MAPIO_REQUEUE: dec_pending(tio->io, BLK_STS_DM_REQUEUE); free_tio(tio); break; default: DMWARN("unimplemented target map return value: %d", r); BUG(); } } struct clone_info { struct mapped_device *md; struct dm_table *map; struct bio *bio; struct dm_io *io; sector_t sector; unsigned sector_count; }; static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) { bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_size = to_bytes(len); } /* * Creates a bio that consists of range of complete bvecs. */ static int clone_bio(struct dm_target_io *tio, struct bio *bio, sector_t sector, unsigned len) { struct bio *clone = &tio->clone; __bio_clone_fast(clone, bio); if (unlikely(bio_integrity(bio) != NULL)) { int r; if (unlikely(!dm_target_has_integrity(tio->ti->type) && !dm_target_passes_integrity(tio->ti->type))) { DMWARN("%s: the target %s doesn't support integrity data.", dm_device_name(tio->io->md), tio->ti->type->name); return -EIO; } r = bio_integrity_clone(clone, bio, GFP_NOIO); if (r < 0) return r; } if (bio_op(bio) != REQ_OP_ZONE_REPORT) bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); clone->bi_iter.bi_size = to_bytes(len); if (unlikely(bio_integrity(bio) != NULL)) bio_integrity_trim(clone); return 0; } static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, unsigned target_bio_nr) { struct dm_target_io *tio; struct bio *clone; clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); tio = container_of(clone, struct dm_target_io, clone); tio->io = ci->io; tio->ti = ti; tio->target_bio_nr = target_bio_nr; return tio; } static void __clone_and_map_simple_bio(struct clone_info *ci, struct dm_target *ti, unsigned target_bio_nr, unsigned *len) { struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr); struct bio *clone = &tio->clone; tio->len_ptr = len; __bio_clone_fast(clone, ci->bio); if (len) bio_setup_sector(clone, ci->sector, *len); __map_bio(tio); } static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, unsigned num_bios, unsigned *len) { unsigned target_bio_nr; for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++) __clone_and_map_simple_bio(ci, ti, target_bio_nr, len); } static int __send_empty_flush(struct clone_info *ci) { unsigned target_nr = 0; struct dm_target *ti; BUG_ON(bio_has_data(ci->bio)); while ((ti = dm_table_get_target(ci->map, target_nr++))) __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); return 0; } static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, sector_t sector, unsigned *len) { struct bio *bio = ci->bio; struct dm_target_io *tio; unsigned target_bio_nr; unsigned num_target_bios = 1; int r = 0; /* * Does the target want to receive duplicate copies of the bio? */ if (bio_data_dir(bio) == WRITE && ti->num_write_bios) num_target_bios = ti->num_write_bios(ti, bio); for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { tio = alloc_tio(ci, ti, target_bio_nr); tio->len_ptr = len; r = clone_bio(tio, bio, sector, *len); if (r < 0) { free_tio(tio); break; } __map_bio(tio); } return r; } typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); static unsigned get_num_discard_bios(struct dm_target *ti) { return ti->num_discard_bios; } static unsigned get_num_write_same_bios(struct dm_target *ti) { return ti->num_write_same_bios; } static unsigned get_num_write_zeroes_bios(struct dm_target *ti) { return ti->num_write_zeroes_bios; } typedef bool (*is_split_required_fn)(struct dm_target *ti); static bool is_split_required_for_discard(struct dm_target *ti) { return ti->split_discard_bios; } static int __send_changing_extent_only(struct clone_info *ci, get_num_bios_fn get_num_bios, is_split_required_fn is_split_required) { struct dm_target *ti; unsigned len; unsigned num_bios; do { ti = dm_table_find_target(ci->map, ci->sector); if (!dm_target_is_valid(ti)) return -EIO; /* * Even though the device advertised support for this type of * request, that does not mean every target supports it, and * reconfiguration might also have changed that since the * check was performed. */ num_bios = get_num_bios ? get_num_bios(ti) : 0; if (!num_bios) return -EOPNOTSUPP; if (is_split_required && !is_split_required(ti)) len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); else len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); __send_duplicate_bios(ci, ti, num_bios, &len); ci->sector += len; } while (ci->sector_count -= len); return 0; } static int __send_discard(struct clone_info *ci) { return __send_changing_extent_only(ci, get_num_discard_bios, is_split_required_for_discard); } static int __send_write_same(struct clone_info *ci) { return __send_changing_extent_only(ci, get_num_write_same_bios, NULL); } static int __send_write_zeroes(struct clone_info *ci) { return __send_changing_extent_only(ci, get_num_write_zeroes_bios, NULL); } /* * Select the correct strategy for processing a non-flush bio. */ static int __split_and_process_non_flush(struct clone_info *ci) { struct bio *bio = ci->bio; struct dm_target *ti; unsigned len; int r; if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) return __send_discard(ci); else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) return __send_write_same(ci); else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES)) return __send_write_zeroes(ci); ti = dm_table_find_target(ci->map, ci->sector); if (!dm_target_is_valid(ti)) return -EIO; if (bio_op(bio) == REQ_OP_ZONE_REPORT) len = ci->sector_count; else len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); if (r < 0) return r; ci->sector += len; ci->sector_count -= len; return 0; } /* * Entry point to split a bio into clones and submit them to the targets. */ static void __split_and_process_bio(struct mapped_device *md, struct dm_table *map, struct bio *bio) { struct clone_info ci; int error = 0; if (unlikely(!map)) { bio_io_error(bio); return; } ci.map = map; ci.md = md; ci.io = alloc_io(md); ci.io->status = 0; atomic_set(&ci.io->io_count, 1); ci.io->bio = bio; ci.io->md = md; spin_lock_init(&ci.io->endio_lock); ci.sector = bio->bi_iter.bi_sector; start_io_acct(ci.io); if (bio->bi_opf & REQ_PREFLUSH) { ci.bio = &ci.md->flush_bio; ci.sector_count = 0; error = __send_empty_flush(&ci); /* dec_pending submits any data associated with flush */ } else if (bio_op(bio) == REQ_OP_ZONE_RESET) { ci.bio = bio; ci.sector_count = 0; error = __split_and_process_non_flush(&ci); } else { ci.bio = bio; ci.sector_count = bio_sectors(bio); while (ci.sector_count && !error) error = __split_and_process_non_flush(&ci); } /* drop the extra reference count */ dec_pending(ci.io, errno_to_blk_status(error)); } /*----------------------------------------------------------------- * CRUD END *---------------------------------------------------------------*/ /* * The request function that just remaps the bio built up by * dm_merge_bvec. */ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) { int rw = bio_data_dir(bio); struct mapped_device *md = q->queuedata; int srcu_idx; struct dm_table *map; map = dm_get_live_table(md, &srcu_idx); generic_start_io_acct(q, rw, bio_sectors(bio), &dm_disk(md)->part0); /* if we're suspended, we have to queue this io for later */ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { dm_put_live_table(md, srcu_idx); if (!(bio->bi_opf & REQ_RAHEAD)) queue_io(md, bio); else bio_io_error(bio); return BLK_QC_T_NONE; } __split_and_process_bio(md, map, bio); dm_put_live_table(md, srcu_idx); return BLK_QC_T_NONE; } static int dm_any_congested(void *congested_data, int bdi_bits) { int r = bdi_bits; struct mapped_device *md = congested_data; struct dm_table *map; if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { if (dm_request_based(md)) { /* * With request-based DM we only need to check the * top-level queue for congestion. */ r = md->queue->backing_dev_info->wb.state & bdi_bits; } else { map = dm_get_live_table_fast(md); if (map) r = dm_table_any_congested(map, bdi_bits); dm_put_live_table_fast(md); } } return r; } /*----------------------------------------------------------------- * An IDR is used to keep track of allocated minor numbers. *---------------------------------------------------------------*/ static void free_minor(int minor) { spin_lock(&_minor_lock); idr_remove(&_minor_idr, minor); spin_unlock(&_minor_lock); } /* * See if the device with a specific minor # is free. */ static int specific_minor(int minor) { int r; if (minor >= (1 << MINORBITS)) return -EINVAL; idr_preload(GFP_KERNEL); spin_lock(&_minor_lock); r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT); spin_unlock(&_minor_lock); idr_preload_end(); if (r < 0) return r == -ENOSPC ? -EBUSY : r; return 0; } static int next_free_minor(int *minor) { int r; idr_preload(GFP_KERNEL); spin_lock(&_minor_lock); r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT); spin_unlock(&_minor_lock); idr_preload_end(); if (r < 0) return r; *minor = r; return 0; } static const struct block_device_operations dm_blk_dops; static const struct dax_operations dm_dax_ops; static void dm_wq_work(struct work_struct *work); void dm_init_md_queue(struct mapped_device *md) { /* * Request-based dm devices cannot be stacked on top of bio-based dm * devices. The type of this dm device may not have been decided yet. * The type is decided at the first table loading time. * To prevent problematic device stacking, clear the queue flag * for request stacking support until then. * * This queue is new, so no concurrency on the queue_flags. */ queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); /* * Initialize data that will only be used by a non-blk-mq DM queue * - must do so here (in alloc_dev callchain) before queue is used */ md->queue->queuedata = md; md->queue->backing_dev_info->congested_data = md; } void dm_init_normal_md_queue(struct mapped_device *md) { md->use_blk_mq = false; dm_init_md_queue(md); /* * Initialize aspects of queue that aren't relevant for blk-mq */ md->queue->backing_dev_info->congested_fn = dm_any_congested; } static void cleanup_mapped_device(struct mapped_device *md) { if (md->wq) destroy_workqueue(md->wq); if (md->kworker_task) kthread_stop(md->kworker_task); mempool_destroy(md->io_pool); if (md->bs) bioset_free(md->bs); if (md->dax_dev) { kill_dax(md->dax_dev); put_dax(md->dax_dev); md->dax_dev = NULL; } if (md->disk) { spin_lock(&_minor_lock); md->disk->private_data = NULL; spin_unlock(&_minor_lock); del_gendisk(md->disk); put_disk(md->disk); } if (md->queue) blk_cleanup_queue(md->queue); cleanup_srcu_struct(&md->io_barrier); if (md->bdev) { bdput(md->bdev); md->bdev = NULL; } dm_mq_cleanup_mapped_device(md); } /* * Allocate and initialise a blank device with a given minor. */ static struct mapped_device *alloc_dev(int minor) { int r, numa_node_id = dm_get_numa_node(); struct dax_device *dax_dev; struct mapped_device *md; void *old_md; md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); if (!md) { DMWARN("unable to allocate device, out of memory."); return NULL; } if (!try_module_get(THIS_MODULE)) goto bad_module_get; /* get a minor number for the dev */ if (minor == DM_ANY_MINOR) r = next_free_minor(&minor); else r = specific_minor(minor); if (r < 0) goto bad_minor; r = init_srcu_struct(&md->io_barrier); if (r < 0) goto bad_io_barrier; md->numa_node_id = numa_node_id; md->use_blk_mq = dm_use_blk_mq_default(); md->init_tio_pdu = false; md->type = DM_TYPE_NONE; mutex_init(&md->suspend_lock); mutex_init(&md->type_lock); mutex_init(&md->table_devices_lock); spin_lock_init(&md->deferred_lock); atomic_set(&md->holders, 1); atomic_set(&md->open_count, 0); atomic_set(&md->event_nr, 0); atomic_set(&md->uevent_seq, 0); INIT_LIST_HEAD(&md->uevent_list); INIT_LIST_HEAD(&md->table_devices); spin_lock_init(&md->uevent_lock); md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); if (!md->queue) goto bad; dm_init_md_queue(md); md->disk = alloc_disk_node(1, numa_node_id); if (!md->disk) goto bad; atomic_set(&md->pending[0], 0); atomic_set(&md->pending[1], 0); init_waitqueue_head(&md->wait); INIT_WORK(&md->work, dm_wq_work); init_waitqueue_head(&md->eventq); init_completion(&md->kobj_holder.completion); md->kworker_task = NULL; md->disk->major = _major; md->disk->first_minor = minor; md->disk->fops = &dm_blk_dops; md->disk->queue = md->queue; md->disk->private_data = md; sprintf(md->disk->disk_name, "dm-%d", minor); dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops); if (!dax_dev) goto bad; md->dax_dev = dax_dev; add_disk(md->disk); format_dev_t(md->name, MKDEV(_major, minor)); md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); if (!md->wq) goto bad; md->bdev = bdget_disk(md->disk, 0); if (!md->bdev) goto bad; bio_init(&md->flush_bio, NULL, 0); bio_set_dev(&md->flush_bio, md->bdev); md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; dm_stats_init(&md->stats); /* Populate the mapping, nobody knows we exist yet */ spin_lock(&_minor_lock); old_md = idr_replace(&_minor_idr, md, minor); spin_unlock(&_minor_lock); BUG_ON(old_md != MINOR_ALLOCED); return md; bad: cleanup_mapped_device(md); bad_io_barrier: free_minor(minor); bad_minor: module_put(THIS_MODULE); bad_module_get: kvfree(md); return NULL; } static void unlock_fs(struct mapped_device *md); static void free_dev(struct mapped_device *md) { int minor = MINOR(disk_devt(md->disk)); unlock_fs(md); cleanup_mapped_device(md); free_table_devices(&md->table_devices); dm_stats_cleanup(&md->stats); free_minor(minor); module_put(THIS_MODULE); kvfree(md); } static void __bind_mempools(struct mapped_device *md, struct dm_table *t) { struct dm_md_mempools *p = dm_table_get_md_mempools(t); if (md->bs) { /* The md already has necessary mempools. */ if (dm_table_bio_based(t)) { /* * Reload bioset because front_pad may have changed * because a different table was loaded. */ bioset_free(md->bs); md->bs = p->bs; p->bs = NULL; } /* * There's no need to reload with request-based dm * because the size of front_pad doesn't change. * Note for future: If you are to reload bioset, * prep-ed requests in the queue may refer * to bio from the old bioset, so you must walk * through the queue to unprep. */ goto out; } BUG_ON(!p || md->io_pool || md->bs); md->io_pool = p->io_pool; p->io_pool = NULL; md->bs = p->bs; p->bs = NULL; out: /* mempool bind completed, no longer need any mempools in the table */ dm_table_free_md_mempools(t); } /* * Bind a table to the device. */ static void event_callback(void *context) { unsigned long flags; LIST_HEAD(uevents); struct mapped_device *md = (struct mapped_device *) context; spin_lock_irqsave(&md->uevent_lock, flags); list_splice_init(&md->uevent_list, &uevents); spin_unlock_irqrestore(&md->uevent_lock, flags); dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); atomic_inc(&md->event_nr); wake_up(&md->eventq); dm_issue_global_event(); } /* * Protected by md->suspend_lock obtained by dm_swap_table(). */ static void __set_size(struct mapped_device *md, sector_t size) { lockdep_assert_held(&md->suspend_lock); set_capacity(md->disk, size); i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); } /* * Returns old map, which caller must destroy. */ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, struct queue_limits *limits) { struct dm_table *old_map; struct request_queue *q = md->queue; sector_t size; lockdep_assert_held(&md->suspend_lock); size = dm_table_get_size(t); /* * Wipe any geometry if the size of the table changed. */ if (size != dm_get_size(md)) memset(&md->geometry, 0, sizeof(md->geometry)); __set_size(md, size); dm_table_event_callback(t, event_callback, md); /* * The queue hasn't been stopped yet, if the old table type wasn't * for request-based during suspension. So stop it to prevent * I/O mapping before resume. * This must be done before setting the queue restrictions, * because request-based dm may be run just after the setting. */ if (dm_table_request_based(t)) { dm_stop_queue(q); /* * Leverage the fact that request-based DM targets are * immutable singletons and establish md->immutable_target * - used to optimize both dm_request_fn and dm_mq_queue_rq */ md->immutable_target = dm_table_get_immutable_target(t); } __bind_mempools(md, t); old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); rcu_assign_pointer(md->map, (void *)t); md->immutable_target_type = dm_table_get_immutable_target_type(t); dm_table_set_restrictions(t, q, limits); if (old_map) dm_sync_table(md); return old_map; } /* * Returns unbound table for the caller to free. */ static struct dm_table *__unbind(struct mapped_device *md) { struct dm_table *map = rcu_dereference_protected(md->map, 1); if (!map) return NULL; dm_table_event_callback(map, NULL, NULL); RCU_INIT_POINTER(md->map, NULL); dm_sync_table(md); return map; } /* * Constructor for a new device. */ int dm_create(int minor, struct mapped_device **result) { struct mapped_device *md; md = alloc_dev(minor); if (!md) return -ENXIO; dm_sysfs_init(md); *result = md; return 0; } /* * Functions to manage md->type. * All are required to hold md->type_lock. */ void dm_lock_md_type(struct mapped_device *md) { mutex_lock(&md->type_lock); } void dm_unlock_md_type(struct mapped_device *md) { mutex_unlock(&md->type_lock); } void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) { BUG_ON(!mutex_is_locked(&md->type_lock)); md->type = type; } enum dm_queue_mode dm_get_md_type(struct mapped_device *md) { return md->type; } struct target_type *dm_get_immutable_target_type(struct mapped_device *md) { return md->immutable_target_type; } /* * The queue_limits are only valid as long as you have a reference * count on 'md'. */ struct queue_limits *dm_get_queue_limits(struct mapped_device *md) { BUG_ON(!atomic_read(&md->holders)); return &md->queue->limits; } EXPORT_SYMBOL_GPL(dm_get_queue_limits); /* * Setup the DM device's queue based on md's type */ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) { int r; enum dm_queue_mode type = dm_get_md_type(md); switch (type) { case DM_TYPE_REQUEST_BASED: r = dm_old_init_request_queue(md, t); if (r) { DMERR("Cannot initialize queue for request-based mapped device"); return r; } break; case DM_TYPE_MQ_REQUEST_BASED: r = dm_mq_init_request_queue(md, t); if (r) { DMERR("Cannot initialize queue for request-based dm-mq mapped device"); return r; } break; case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: dm_init_normal_md_queue(md); blk_queue_make_request(md->queue, dm_make_request); /* * DM handles splitting bios as needed. Free the bio_split bioset * since it won't be used (saves 1 process per bio-based DM device). */ bioset_free(md->queue->bio_split); md->queue->bio_split = NULL; if (type == DM_TYPE_DAX_BIO_BASED) queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue); break; case DM_TYPE_NONE: WARN_ON_ONCE(true); break; } return 0; } struct mapped_device *dm_get_md(dev_t dev) { struct mapped_device *md; unsigned minor = MINOR(dev); if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) return NULL; spin_lock(&_minor_lock); md = idr_find(&_minor_idr, minor); if (md) { if ((md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || dm_deleting_md(md) || test_bit(DMF_FREEING, &md->flags))) { md = NULL; goto out; } dm_get(md); } out: spin_unlock(&_minor_lock); return md; } EXPORT_SYMBOL_GPL(dm_get_md); void *dm_get_mdptr(struct mapped_device *md) { return md->interface_ptr; } void dm_set_mdptr(struct mapped_device *md, void *ptr) { md->interface_ptr = ptr; } void dm_get(struct mapped_device *md) { atomic_inc(&md->holders); BUG_ON(test_bit(DMF_FREEING, &md->flags)); } int dm_hold(struct mapped_device *md) { spin_lock(&_minor_lock); if (test_bit(DMF_FREEING, &md->flags)) { spin_unlock(&_minor_lock); return -EBUSY; } dm_get(md); spin_unlock(&_minor_lock); return 0; } EXPORT_SYMBOL_GPL(dm_hold); const char *dm_device_name(struct mapped_device *md) { return md->name; } EXPORT_SYMBOL_GPL(dm_device_name); static void __dm_destroy(struct mapped_device *md, bool wait) { struct request_queue *q = dm_get_md_queue(md); struct dm_table *map; int srcu_idx; might_sleep(); spin_lock(&_minor_lock); idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); blk_set_queue_dying(q); if (dm_request_based(md) && md->kworker_task) kthread_flush_worker(&md->kworker); /* * Take suspend_lock so that presuspend and postsuspend methods * do not race with internal suspend. */ mutex_lock(&md->suspend_lock); map = dm_get_live_table(md, &srcu_idx); if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); dm_table_postsuspend_targets(map); } /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ dm_put_live_table(md, srcu_idx); mutex_unlock(&md->suspend_lock); /* * Rare, but there may be I/O requests still going to complete, * for example. Wait for all references to disappear. * No one should increment the reference count of the mapped_device, * after the mapped_device state becomes DMF_FREEING. */ if (wait) while (atomic_read(&md->holders)) msleep(1); else if (atomic_read(&md->holders)) DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", dm_device_name(md), atomic_read(&md->holders)); dm_sysfs_exit(md); dm_table_destroy(__unbind(md)); free_dev(md); } void dm_destroy(struct mapped_device *md) { __dm_destroy(md, true); } void dm_destroy_immediate(struct mapped_device *md) { __dm_destroy(md, false); } void dm_put(struct mapped_device *md) { atomic_dec(&md->holders); } EXPORT_SYMBOL_GPL(dm_put); static int dm_wait_for_completion(struct mapped_device *md, long task_state) { int r = 0; DEFINE_WAIT(wait); while (1) { prepare_to_wait(&md->wait, &wait, task_state); if (!md_in_flight(md)) break; if (signal_pending_state(task_state, current)) { r = -EINTR; break; } io_schedule(); } finish_wait(&md->wait, &wait); return r; } /* * Process the deferred bios */ static void dm_wq_work(struct work_struct *work) { struct mapped_device *md = container_of(work, struct mapped_device, work); struct bio *c; int srcu_idx; struct dm_table *map; map = dm_get_live_table(md, &srcu_idx); while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { spin_lock_irq(&md->deferred_lock); c = bio_list_pop(&md->deferred); spin_unlock_irq(&md->deferred_lock); if (!c) break; if (dm_request_based(md)) generic_make_request(c); else __split_and_process_bio(md, map, c); } dm_put_live_table(md, srcu_idx); } static void dm_queue_flush(struct mapped_device *md) { clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); smp_mb__after_atomic(); queue_work(md->wq, &md->work); } /* * Swap in a new table, returning the old one for the caller to destroy. */ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) { struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); struct queue_limits limits; int r; mutex_lock(&md->suspend_lock); /* device must be suspended */ if (!dm_suspended_md(md)) goto out; /* * If the new table has no data devices, retain the existing limits. * This helps multipath with queue_if_no_path if all paths disappear, * then new I/O is queued based on these limits, and then some paths * reappear. */ if (dm_table_has_no_data_devices(table)) { live_map = dm_get_live_table_fast(md); if (live_map) limits = md->queue->limits; dm_put_live_table_fast(md); } if (!live_map) { r = dm_calculate_queue_limits(table, &limits); if (r) { map = ERR_PTR(r); goto out; } } map = __bind(md, table, &limits); dm_issue_global_event(); out: mutex_unlock(&md->suspend_lock); return map; } /* * Functions to lock and unlock any filesystem running on the * device. */ static int lock_fs(struct mapped_device *md) { int r; WARN_ON(md->frozen_sb); md->frozen_sb = freeze_bdev(md->bdev); if (IS_ERR(md->frozen_sb)) { r = PTR_ERR(md->frozen_sb); md->frozen_sb = NULL; return r; } set_bit(DMF_FROZEN, &md->flags); return 0; } static void unlock_fs(struct mapped_device *md) { if (!test_bit(DMF_FROZEN, &md->flags)) return; thaw_bdev(md->bdev, md->frozen_sb); md->frozen_sb = NULL; clear_bit(DMF_FROZEN, &md->flags); } /* * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY * * If __dm_suspend returns 0, the device is completely quiescent * now. There is no request-processing activity. All new requests * are being added to md->deferred list. */ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, unsigned suspend_flags, long task_state, int dmf_suspended_flag) { bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; int r; lockdep_assert_held(&md->suspend_lock); /* * DMF_NOFLUSH_SUSPENDING must be set before presuspend. * This flag is cleared before dm_suspend returns. */ if (noflush) set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); else pr_debug("%s: suspending with flush\n", dm_device_name(md)); /* * This gets reverted if there's an error later and the targets * provide the .presuspend_undo hook. */ dm_table_presuspend_targets(map); /* * Flush I/O to the device. * Any I/O submitted after lock_fs() may not be flushed. * noflush takes precedence over do_lockfs. * (lock_fs() flushes I/Os and waits for them to complete.) */ if (!noflush && do_lockfs) { r = lock_fs(md); if (r) { dm_table_presuspend_undo_targets(map); return r; } } /* * Here we must make sure that no processes are submitting requests * to target drivers i.e. no one may be executing * __split_and_process_bio. This is called from dm_request and * dm_wq_work. * * To get all processes out of __split_and_process_bio in dm_request, * we take the write lock. To prevent any process from reentering * __split_and_process_bio from dm_request and quiesce the thread * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call * flush_workqueue(md->wq). */ set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); if (map) synchronize_srcu(&md->io_barrier); /* * Stop md->queue before flushing md->wq in case request-based * dm defers requests to md->wq from md->queue. */ if (dm_request_based(md)) { dm_stop_queue(md->queue); if (md->kworker_task) kthread_flush_worker(&md->kworker); } flush_workqueue(md->wq); /* * At this point no more requests are entering target request routines. * We call dm_wait_for_completion to wait for all existing requests * to finish. */ r = dm_wait_for_completion(md, task_state); if (!r) set_bit(dmf_suspended_flag, &md->flags); if (noflush) clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); if (map) synchronize_srcu(&md->io_barrier); /* were we interrupted ? */ if (r < 0) { dm_queue_flush(md); if (dm_request_based(md)) dm_start_queue(md->queue); unlock_fs(md); dm_table_presuspend_undo_targets(map); /* pushback list is already flushed, so skip flush */ } return r; } /* * We need to be able to change a mapping table under a mounted * filesystem. For example we might want to move some data in * the background. Before the table can be swapped with * dm_bind_table, dm_suspend must be called to flush any in * flight bios and ensure that any further io gets deferred. */ /* * Suspend mechanism in request-based dm. * * 1. Flush all I/Os by lock_fs() if needed. * 2. Stop dispatching any I/O by stopping the request_queue. * 3. Wait for all in-flight I/Os to be completed or requeued. * * To abort suspend, start the request_queue. */ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) { struct dm_table *map = NULL; int r = 0; retry: mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); if (dm_suspended_md(md)) { r = -EINVAL; goto out_unlock; } if (dm_suspended_internally_md(md)) { /* already internally suspended, wait for internal resume */ mutex_unlock(&md->suspend_lock); r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); if (r) return r; goto retry; } map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); if (r) goto out_unlock; dm_table_postsuspend_targets(map); out_unlock: mutex_unlock(&md->suspend_lock); return r; } static int __dm_resume(struct mapped_device *md, struct dm_table *map) { if (map) { int r = dm_table_resume_targets(map); if (r) return r; } dm_queue_flush(md); /* * Flushing deferred I/Os must be done after targets are resumed * so that mapping of targets can work correctly. * Request-based dm is queueing the deferred I/Os in its request_queue. */ if (dm_request_based(md)) dm_start_queue(md->queue); unlock_fs(md); return 0; } int dm_resume(struct mapped_device *md) { int r; struct dm_table *map = NULL; retry: r = -EINVAL; mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); if (!dm_suspended_md(md)) goto out; if (dm_suspended_internally_md(md)) { /* already internally suspended, wait for internal resume */ mutex_unlock(&md->suspend_lock); r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); if (r) return r; goto retry; } map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); if (!map || !dm_table_get_size(map)) goto out; r = __dm_resume(md, map); if (r) goto out; clear_bit(DMF_SUSPENDED, &md->flags); out: mutex_unlock(&md->suspend_lock); return r; } /* * Internal suspend/resume works like userspace-driven suspend. It waits * until all bios finish and prevents issuing new bios to the target drivers. * It may be used only from the kernel. */ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) { struct dm_table *map = NULL; lockdep_assert_held(&md->suspend_lock); if (md->internal_suspend_count++) return; /* nested internal suspend */ if (dm_suspended_md(md)) { set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); return; /* nest suspend */ } map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); /* * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend * would require changing .presuspend to return an error -- avoid this * until there is a need for more elaborate variants of internal suspend. */ (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, DMF_SUSPENDED_INTERNALLY); dm_table_postsuspend_targets(map); } static void __dm_internal_resume(struct mapped_device *md) { BUG_ON(!md->internal_suspend_count); if (--md->internal_suspend_count) return; /* resume from nested internal suspend */ if (dm_suspended_md(md)) goto done; /* resume from nested suspend */ /* * NOTE: existing callers don't need to call dm_table_resume_targets * (which may fail -- so best to avoid it for now by passing NULL map) */ (void) __dm_resume(md, NULL); done: clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); smp_mb__after_atomic(); wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); } void dm_internal_suspend_noflush(struct mapped_device *md) { mutex_lock(&md->suspend_lock); __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); mutex_unlock(&md->suspend_lock); } EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush); void dm_internal_resume(struct mapped_device *md) { mutex_lock(&md->suspend_lock); __dm_internal_resume(md); mutex_unlock(&md->suspend_lock); } EXPORT_SYMBOL_GPL(dm_internal_resume); /* * Fast variants of internal suspend/resume hold md->suspend_lock, * which prevents interaction with userspace-driven suspend. */ void dm_internal_suspend_fast(struct mapped_device *md) { mutex_lock(&md->suspend_lock); if (dm_suspended_md(md) || dm_suspended_internally_md(md)) return; set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); synchronize_srcu(&md->io_barrier); flush_workqueue(md->wq); dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); void dm_internal_resume_fast(struct mapped_device *md) { if (dm_suspended_md(md) || dm_suspended_internally_md(md)) goto done; dm_queue_flush(md); done: mutex_unlock(&md->suspend_lock); } EXPORT_SYMBOL_GPL(dm_internal_resume_fast); /*----------------------------------------------------------------- * Event notification. *---------------------------------------------------------------*/ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, unsigned cookie) { char udev_cookie[DM_COOKIE_LENGTH]; char *envp[] = { udev_cookie, NULL }; if (!cookie) return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); else { snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", DM_COOKIE_ENV_VAR_NAME, cookie); return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp); } } uint32_t dm_next_uevent_seq(struct mapped_device *md) { return atomic_add_return(1, &md->uevent_seq); } uint32_t dm_get_event_nr(struct mapped_device *md) { return atomic_read(&md->event_nr); } int dm_wait_event(struct mapped_device *md, int event_nr) { return wait_event_interruptible(md->eventq, (event_nr != atomic_read(&md->event_nr))); } void dm_uevent_add(struct mapped_device *md, struct list_head *elist) { unsigned long flags; spin_lock_irqsave(&md->uevent_lock, flags); list_add(elist, &md->uevent_list); spin_unlock_irqrestore(&md->uevent_lock, flags); } /* * The gendisk is only valid as long as you have a reference * count on 'md'. */ struct gendisk *dm_disk(struct mapped_device *md) { return md->disk; } EXPORT_SYMBOL_GPL(dm_disk); struct kobject *dm_kobject(struct mapped_device *md) { return &md->kobj_holder.kobj; } struct mapped_device *dm_get_from_kobject(struct kobject *kobj) { struct mapped_device *md; md = container_of(kobj, struct mapped_device, kobj_holder.kobj); spin_lock(&_minor_lock); if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { md = NULL; goto out; } dm_get(md); out: spin_unlock(&_minor_lock); return md; } int dm_suspended_md(struct mapped_device *md) { return test_bit(DMF_SUSPENDED, &md->flags); } int dm_suspended_internally_md(struct mapped_device *md) { return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); } int dm_test_deferred_remove_flag(struct mapped_device *md) { return test_bit(DMF_DEFERRED_REMOVE, &md->flags); } int dm_suspended(struct dm_target *ti) { return dm_suspended_md(dm_table_get_md(ti->table)); } EXPORT_SYMBOL_GPL(dm_suspended); int dm_noflush_suspending(struct dm_target *ti) { return __noflush_suspending(dm_table_get_md(ti->table)); } EXPORT_SYMBOL_GPL(dm_noflush_suspending); struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, unsigned integrity, unsigned per_io_data_size) { struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); unsigned int pool_size = 0; unsigned int front_pad; if (!pools) return NULL; switch (type) { case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: pool_size = dm_get_reserved_bio_based_ios(); front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache); if (!pools->io_pool) goto out; break; case DM_TYPE_REQUEST_BASED: case DM_TYPE_MQ_REQUEST_BASED: pool_size = dm_get_reserved_rq_based_ios(); front_pad = offsetof(struct dm_rq_clone_bio_info, clone); /* per_io_data_size is used for blk-mq pdu at queue allocation */ break; default: BUG(); } pools->bs = bioset_create(pool_size, front_pad, BIOSET_NEED_RESCUER); if (!pools->bs) goto out; if (integrity && bioset_integrity_create(pools->bs, pool_size)) goto out; return pools; out: dm_free_md_mempools(pools); return NULL; } void dm_free_md_mempools(struct dm_md_mempools *pools) { if (!pools) return; mempool_destroy(pools->io_pool); if (pools->bs) bioset_free(pools->bs); kfree(pools); } struct dm_pr { u64 old_key; u64 new_key; u32 flags; bool fail_early; }; static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, void *data) { struct mapped_device *md = bdev->bd_disk->private_data; struct dm_table *table; struct dm_target *ti; int ret = -ENOTTY, srcu_idx; table = dm_get_live_table(md, &srcu_idx); if (!table || !dm_table_get_size(table)) goto out; /* We only support devices that have a single target */ if (dm_table_get_num_targets(table) != 1) goto out; ti = dm_table_get_target(table, 0); ret = -EINVAL; if (!ti->type->iterate_devices) goto out; ret = ti->type->iterate_devices(ti, fn, data); out: dm_put_live_table(md, srcu_idx); return ret; } /* * For register / unregister we need to manually call out to every path. */ static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct dm_pr *pr = data; const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; if (!ops || !ops->pr_register) return -EOPNOTSUPP; return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); } static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, u32 flags) { struct dm_pr pr = { .old_key = old_key, .new_key = new_key, .flags = flags, .fail_early = true, }; int ret; ret = dm_call_pr(bdev, __dm_pr_register, &pr); if (ret && new_key) { /* unregister all paths if we failed to register any path */ pr.old_key = new_key; pr.new_key = 0; pr.flags = 0; pr.fail_early = false; dm_call_pr(bdev, __dm_pr_register, &pr); } return ret; } static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, u32 flags) { struct mapped_device *md = bdev->bd_disk->private_data; const struct pr_ops *ops; fmode_t mode; int r; r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); if (r < 0) return r; ops = bdev->bd_disk->fops->pr_ops; if (ops && ops->pr_reserve) r = ops->pr_reserve(bdev, key, type, flags); else r = -EOPNOTSUPP; bdput(bdev); return r; } static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) { struct mapped_device *md = bdev->bd_disk->private_data; const struct pr_ops *ops; fmode_t mode; int r; r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); if (r < 0) return r; ops = bdev->bd_disk->fops->pr_ops; if (ops && ops->pr_release) r = ops->pr_release(bdev, key, type); else r = -EOPNOTSUPP; bdput(bdev); return r; } static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, enum pr_type type, bool abort) { struct mapped_device *md = bdev->bd_disk->private_data; const struct pr_ops *ops; fmode_t mode; int r; r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); if (r < 0) return r; ops = bdev->bd_disk->fops->pr_ops; if (ops && ops->pr_preempt) r = ops->pr_preempt(bdev, old_key, new_key, type, abort); else r = -EOPNOTSUPP; bdput(bdev); return r; } static int dm_pr_clear(struct block_device *bdev, u64 key) { struct mapped_device *md = bdev->bd_disk->private_data; const struct pr_ops *ops; fmode_t mode; int r; r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); if (r < 0) return r; ops = bdev->bd_disk->fops->pr_ops; if (ops && ops->pr_clear) r = ops->pr_clear(bdev, key); else r = -EOPNOTSUPP; bdput(bdev); return r; } static const struct pr_ops dm_pr_ops = { .pr_register = dm_pr_register, .pr_reserve = dm_pr_reserve, .pr_release = dm_pr_release, .pr_preempt = dm_pr_preempt, .pr_clear = dm_pr_clear, }; static const struct block_device_operations dm_blk_dops = { .open = dm_blk_open, .release = dm_blk_close, .ioctl = dm_blk_ioctl, .getgeo = dm_blk_getgeo, .pr_ops = &dm_pr_ops, .owner = THIS_MODULE }; static const struct dax_operations dm_dax_ops = { .direct_access = dm_dax_direct_access, .copy_from_iter = dm_dax_copy_from_iter, }; /* * module hooks */ module_init(dm_init); module_exit(dm_exit); module_param(major, uint, 0); MODULE_PARM_DESC(major, "The major number of the device mapper"); module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); MODULE_DESCRIPTION(DM_NAME " driver"); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-362/c/good_3014_0
crossvul-cpp_data_bad_3021_0
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2002, 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <asm/byteorder.h> #include <linux/swap.h> #include <linux/pipe_fs_i.h> #include <linux/mpage.h> #include <linux/quotaops.h> #include <linux/blkdev.h> #include <linux/uio.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "alloc.h" #include "aops.h" #include "dlmglue.h" #include "extent_map.h" #include "file.h" #include "inode.h" #include "journal.h" #include "suballoc.h" #include "super.h" #include "symlink.h" #include "refcounttree.h" #include "ocfs2_trace.h" #include "buffer_head_io.h" #include "dir.h" #include "namei.h" #include "sysfile.h" static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int err = -EIO; int status; struct ocfs2_dinode *fe = NULL; struct buffer_head *bh = NULL; struct buffer_head *buffer_cache_bh = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); void *kaddr; trace_ocfs2_symlink_get_block( (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)iblock, bh_result, create); BUG_ON(ocfs2_inode_is_fast_symlink(inode)); if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) { mlog(ML_ERROR, "block offset > PATH_MAX: %llu", (unsigned long long)iblock); goto bail; } status = ocfs2_read_inode_block(inode, &bh); if (status < 0) { mlog_errno(status); goto bail; } fe = (struct ocfs2_dinode *) bh->b_data; if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb, le32_to_cpu(fe->i_clusters))) { err = -ENOMEM; mlog(ML_ERROR, "block offset is outside the allocated size: " "%llu\n", (unsigned long long)iblock); goto bail; } /* We don't use the page cache to create symlink data, so if * need be, copy it over from the buffer cache. */ if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) { u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock; buffer_cache_bh = sb_getblk(osb->sb, blkno); if (!buffer_cache_bh) { err = -ENOMEM; mlog(ML_ERROR, "couldn't getblock for symlink!\n"); goto bail; } /* we haven't locked out transactions, so a commit * could've happened. Since we've got a reference on * the bh, even if it commits while we're doing the * copy, the data is still good. */ if (buffer_jbd(buffer_cache_bh) && ocfs2_inode_is_new(inode)) { kaddr = kmap_atomic(bh_result->b_page); if (!kaddr) { mlog(ML_ERROR, "couldn't kmap!\n"); goto bail; } memcpy(kaddr + (bh_result->b_size * iblock), buffer_cache_bh->b_data, bh_result->b_size); kunmap_atomic(kaddr); set_buffer_uptodate(bh_result); } brelse(buffer_cache_bh); } map_bh(bh_result, inode->i_sb, le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock); err = 0; bail: brelse(bh); return err; } int ocfs2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int err = 0; unsigned int ext_flags; u64 max_blocks = bh_result->b_size >> inode->i_blkbits; u64 p_blkno, count, past_eof; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)iblock, bh_result, create); if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n", inode, inode->i_ino); if (S_ISLNK(inode->i_mode)) { /* this always does I/O for some reason. */ err = ocfs2_symlink_get_block(inode, iblock, bh_result, create); goto bail; } err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count, &ext_flags); if (err) { mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, " "%llu, NULL)\n", err, inode, (unsigned long long)iblock, (unsigned long long)p_blkno); goto bail; } if (max_blocks < count) count = max_blocks; /* * ocfs2 never allocates in this function - the only time we * need to use BH_New is when we're extending i_size on a file * system which doesn't support holes, in which case BH_New * allows __block_write_begin() to zero. * * If we see this on a sparse file system, then a truncate has * raced us and removed the cluster. In this case, we clear * the buffers dirty and uptodate bits and let the buffer code * ignore it as a hole. */ if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) { clear_buffer_dirty(bh_result); clear_buffer_uptodate(bh_result); goto bail; } /* Treat the unwritten extent as a hole for zeroing purposes. */ if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) map_bh(bh_result, inode->i_sb, p_blkno); bh_result->b_size = count << inode->i_blkbits; if (!ocfs2_sparse_alloc(osb)) { if (p_blkno == 0) { err = -EIO; mlog(ML_ERROR, "iblock = %llu p_blkno = %llu blkno=(%llu)\n", (unsigned long long)iblock, (unsigned long long)p_blkno, (unsigned long long)OCFS2_I(inode)->ip_blkno); mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters); dump_stack(); goto bail; } } past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)past_eof); if (create && (iblock >= past_eof)) set_buffer_new(bh_result); bail: if (err < 0) err = -EIO; return err; } int ocfs2_read_inline_data(struct inode *inode, struct page *page, struct buffer_head *di_bh) { void *kaddr; loff_t size; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) { ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); return -EROFS; } size = i_size_read(inode); if (size > PAGE_SIZE || size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) { ocfs2_error(inode->i_sb, "Inode %llu has with inline data has bad size: %Lu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)size); return -EROFS; } kaddr = kmap_atomic(page); if (size) memcpy(kaddr, di->id2.i_data.id_data, size); /* Clear the remaining part of the page */ memset(kaddr + size, 0, PAGE_SIZE - size); flush_dcache_page(page); kunmap_atomic(kaddr); SetPageUptodate(page); return 0; } static int ocfs2_readpage_inline(struct inode *inode, struct page *page) { int ret; struct buffer_head *di_bh = NULL; BUG_ON(!PageLocked(page)); BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); ret = ocfs2_read_inode_block(inode, &di_bh); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_read_inline_data(inode, page, di_bh); out: unlock_page(page); brelse(di_bh); return ret; } static int ocfs2_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct ocfs2_inode_info *oi = OCFS2_I(inode); loff_t start = (loff_t)page->index << PAGE_SHIFT; int ret, unlock = 1; trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, (page ? page->index : 0)); ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page); if (ret != 0) { if (ret == AOP_TRUNCATED_PAGE) unlock = 0; mlog_errno(ret); goto out; } if (down_read_trylock(&oi->ip_alloc_sem) == 0) { /* * Unlock the page and cycle ip_alloc_sem so that we don't * busyloop waiting for ip_alloc_sem to unlock */ ret = AOP_TRUNCATED_PAGE; unlock_page(page); unlock = 0; down_read(&oi->ip_alloc_sem); up_read(&oi->ip_alloc_sem); goto out_inode_unlock; } /* * i_size might have just been updated as we grabed the meta lock. We * might now be discovering a truncate that hit on another node. * block_read_full_page->get_block freaks out if it is asked to read * beyond the end of a file, so we check here. Callers * (generic_file_read, vm_ops->fault) are clever enough to check i_size * and notice that the page they just read isn't needed. * * XXX sys_readahead() seems to get that wrong? */ if (start >= i_size_read(inode)) { zero_user(page, 0, PAGE_SIZE); SetPageUptodate(page); ret = 0; goto out_alloc; } if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) ret = ocfs2_readpage_inline(inode, page); else ret = block_read_full_page(page, ocfs2_get_block); unlock = 0; out_alloc: up_read(&OCFS2_I(inode)->ip_alloc_sem); out_inode_unlock: ocfs2_inode_unlock(inode, 0); out: if (unlock) unlock_page(page); return ret; } /* * This is used only for read-ahead. Failures or difficult to handle * situations are safe to ignore. * * Right now, we don't bother with BH_Boundary - in-inode extent lists * are quite large (243 extents on 4k blocks), so most inodes don't * grow out to a tree. If need be, detecting boundary extents could * trivially be added in a future version of ocfs2_get_block(). */ static int ocfs2_readpages(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { int ret, err = -EIO; struct inode *inode = mapping->host; struct ocfs2_inode_info *oi = OCFS2_I(inode); loff_t start; struct page *last; /* * Use the nonblocking flag for the dlm code to avoid page * lock inversion, but don't bother with retrying. */ ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK); if (ret) return err; if (down_read_trylock(&oi->ip_alloc_sem) == 0) { ocfs2_inode_unlock(inode, 0); return err; } /* * Don't bother with inline-data. There isn't anything * to read-ahead in that case anyway... */ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) goto out_unlock; /* * Check whether a remote node truncated this file - we just * drop out in that case as it's not worth handling here. */ last = list_entry(pages->prev, struct page, lru); start = (loff_t)last->index << PAGE_SHIFT; if (start >= i_size_read(inode)) goto out_unlock; err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block); out_unlock: up_read(&oi->ip_alloc_sem); ocfs2_inode_unlock(inode, 0); return err; } /* Note: Because we don't support holes, our allocation has * already happened (allocation writes zeros to the file data) * so we don't have to worry about ordered writes in * ocfs2_writepage. * * ->writepage is called during the process of invalidating the page cache * during blocked lock processing. It can't block on any cluster locks * to during block mapping. It's relying on the fact that the block * mapping can't have disappeared under the dirty pages that it is * being asked to write back. */ static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) { trace_ocfs2_writepage( (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno, page->index); return block_write_full_page(page, ocfs2_get_block, wbc); } /* Taken from ext3. We don't necessarily need the full blown * functionality yet, but IMHO it's better to cut and paste the whole * thing so we can avoid introducing our own bugs (and easily pick up * their fixes when they happen) --Mark */ int walk_page_buffers( handle_t *handle, struct buffer_head *head, unsigned from, unsigned to, int *partial, int (*fn)( handle_t *handle, struct buffer_head *bh)) { struct buffer_head *bh; unsigned block_start, block_end; unsigned blocksize = head->b_size; int err, ret = 0; struct buffer_head *next; for ( bh = head, block_start = 0; ret == 0 && (bh != head || !block_start); block_start = block_end, bh = next) { next = bh->b_this_page; block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (partial && !buffer_uptodate(bh)) *partial = 1; continue; } err = (*fn)(handle, bh); if (!ret) ret = err; } return ret; } static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) { sector_t status; u64 p_blkno = 0; int err = 0; struct inode *inode = mapping->host; trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)block); /* * The swap code (ab-)uses ->bmap to get a block mapping and then * bypasseѕ the file system for actual I/O. We really can't allow * that on refcounted inodes, so we have to skip out here. And yes, * 0 is the magic code for a bmap error.. */ if (ocfs2_is_refcount_inode(inode)) return 0; /* We don't need to lock journal system files, since they aren't * accessed concurrently from multiple nodes. */ if (!INODE_JOURNAL(inode)) { err = ocfs2_inode_lock(inode, NULL, 0); if (err) { if (err != -ENOENT) mlog_errno(err); goto bail; } down_read(&OCFS2_I(inode)->ip_alloc_sem); } if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, NULL); if (!INODE_JOURNAL(inode)) { up_read(&OCFS2_I(inode)->ip_alloc_sem); ocfs2_inode_unlock(inode, 0); } if (err) { mlog(ML_ERROR, "get_blocks() failed, block = %llu\n", (unsigned long long)block); mlog_errno(err); goto bail; } bail: status = err ? 0 : p_blkno; return status; } static int ocfs2_releasepage(struct page *page, gfp_t wait) { if (!page_has_buffers(page)) return 0; return try_to_free_buffers(page); } static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, u32 cpos, unsigned int *start, unsigned int *end) { unsigned int cluster_start = 0, cluster_end = PAGE_SIZE; if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) { unsigned int cpp; cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits); cluster_start = cpos % cpp; cluster_start = cluster_start << osb->s_clustersize_bits; cluster_end = cluster_start + osb->s_clustersize; } BUG_ON(cluster_start > PAGE_SIZE); BUG_ON(cluster_end > PAGE_SIZE); if (start) *start = cluster_start; if (end) *end = cluster_end; } /* * 'from' and 'to' are the region in the page to avoid zeroing. * * If pagesize > clustersize, this function will avoid zeroing outside * of the cluster boundary. * * from == to == 0 is code for "zero the entire cluster region" */ static void ocfs2_clear_page_regions(struct page *page, struct ocfs2_super *osb, u32 cpos, unsigned from, unsigned to) { void *kaddr; unsigned int cluster_start, cluster_end; ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end); kaddr = kmap_atomic(page); if (from || to) { if (from > cluster_start) memset(kaddr + cluster_start, 0, from - cluster_start); if (to < cluster_end) memset(kaddr + to, 0, cluster_end - to); } else { memset(kaddr + cluster_start, 0, cluster_end - cluster_start); } kunmap_atomic(kaddr); } /* * Nonsparse file systems fully allocate before we get to the write * code. This prevents ocfs2_write() from tagging the write as an * allocating one, which means ocfs2_map_page_blocks() might try to * read-in the blocks at the tail of our file. Avoid reading them by * testing i_size against each block offset. */ static int ocfs2_should_read_blk(struct inode *inode, struct page *page, unsigned int block_start) { u64 offset = page_offset(page) + block_start; if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) return 1; if (i_size_read(inode) > offset) return 1; return 0; } /* * Some of this taken from __block_write_begin(). We already have our * mapping by now though, and the entire write will be allocating or * it won't, so not much need to use BH_New. * * This will also skip zeroing, which is handled externally. */ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, struct inode *inode, unsigned int from, unsigned int to, int new) { int ret = 0; struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; unsigned int block_end, block_start; unsigned int bsize = i_blocksize(inode); if (!page_has_buffers(page)) create_empty_buffers(page, bsize, 0); head = page_buffers(page); for (bh = head, block_start = 0; bh != head || !block_start; bh = bh->b_this_page, block_start += bsize) { block_end = block_start + bsize; clear_buffer_new(bh); /* * Ignore blocks outside of our i/o range - * they may belong to unallocated clusters. */ if (block_start >= to || block_end <= from) { if (PageUptodate(page)) set_buffer_uptodate(bh); continue; } /* * For an allocating write with cluster size >= page * size, we always write the entire page. */ if (new) set_buffer_new(bh); if (!buffer_mapped(bh)) { map_bh(bh, inode->i_sb, *p_blkno); clean_bdev_bh_alias(bh); } if (PageUptodate(page)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); } else if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_new(bh) && ocfs2_should_read_blk(inode, page, block_start) && (block_start < from || block_end > to)) { ll_rw_block(REQ_OP_READ, 0, 1, &bh); *wait_bh++=bh; } *p_blkno = *p_blkno + 1; } /* * If we issued read requests - let them complete. */ while(wait_bh > wait) { wait_on_buffer(*--wait_bh); if (!buffer_uptodate(*wait_bh)) ret = -EIO; } if (ret == 0 || !new) return ret; /* * If we get -EIO above, zero out any newly allocated blocks * to avoid exposing stale data. */ bh = head; block_start = 0; do { block_end = block_start + bsize; if (block_end <= from) goto next_bh; if (block_start >= to) break; zero_user(page, block_start, bh->b_size); set_buffer_uptodate(bh); mark_buffer_dirty(bh); next_bh: block_start = block_end; bh = bh->b_this_page; } while (bh != head); return ret; } #if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE) #define OCFS2_MAX_CTXT_PAGES 1 #else #define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE) #endif #define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE) struct ocfs2_unwritten_extent { struct list_head ue_node; struct list_head ue_ip_node; u32 ue_cpos; u32 ue_phys; }; /* * Describe the state of a single cluster to be written to. */ struct ocfs2_write_cluster_desc { u32 c_cpos; u32 c_phys; /* * Give this a unique field because c_phys eventually gets * filled. */ unsigned c_new; unsigned c_clear_unwritten; unsigned c_needs_zero; }; struct ocfs2_write_ctxt { /* Logical cluster position / len of write */ u32 w_cpos; u32 w_clen; /* First cluster allocated in a nonsparse extend */ u32 w_first_new_cpos; /* Type of caller. Must be one of buffer, mmap, direct. */ ocfs2_write_type_t w_type; struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE]; /* * This is true if page_size > cluster_size. * * It triggers a set of special cases during write which might * have to deal with allocating writes to partial pages. */ unsigned int w_large_pages; /* * Pages involved in this write. * * w_target_page is the page being written to by the user. * * w_pages is an array of pages which always contains * w_target_page, and in the case of an allocating write with * page_size < cluster size, it will contain zero'd and mapped * pages adjacent to w_target_page which need to be written * out in so that future reads from that region will get * zero's. */ unsigned int w_num_pages; struct page *w_pages[OCFS2_MAX_CTXT_PAGES]; struct page *w_target_page; /* * w_target_locked is used for page_mkwrite path indicating no unlocking * against w_target_page in ocfs2_write_end_nolock. */ unsigned int w_target_locked:1; /* * ocfs2_write_end() uses this to know what the real range to * write in the target should be. */ unsigned int w_target_from; unsigned int w_target_to; /* * We could use journal_current_handle() but this is cleaner, * IMHO -Mark */ handle_t *w_handle; struct buffer_head *w_di_bh; struct ocfs2_cached_dealloc_ctxt w_dealloc; struct list_head w_unwritten_list; }; void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) { int i; for(i = 0; i < num_pages; i++) { if (pages[i]) { unlock_page(pages[i]); mark_page_accessed(pages[i]); put_page(pages[i]); } } } static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc) { int i; /* * w_target_locked is only set to true in the page_mkwrite() case. * The intent is to allow us to lock the target page from write_begin() * to write_end(). The caller must hold a ref on w_target_page. */ if (wc->w_target_locked) { BUG_ON(!wc->w_target_page); for (i = 0; i < wc->w_num_pages; i++) { if (wc->w_target_page == wc->w_pages[i]) { wc->w_pages[i] = NULL; break; } } mark_page_accessed(wc->w_target_page); put_page(wc->w_target_page); } ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); } static void ocfs2_free_unwritten_list(struct inode *inode, struct list_head *head) { struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_unwritten_extent *ue = NULL, *tmp = NULL; list_for_each_entry_safe(ue, tmp, head, ue_node) { list_del(&ue->ue_node); spin_lock(&oi->ip_lock); list_del(&ue->ue_ip_node); spin_unlock(&oi->ip_lock); kfree(ue); } } static void ocfs2_free_write_ctxt(struct inode *inode, struct ocfs2_write_ctxt *wc) { ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list); ocfs2_unlock_pages(wc); brelse(wc->w_di_bh); kfree(wc); } static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, struct ocfs2_super *osb, loff_t pos, unsigned len, ocfs2_write_type_t type, struct buffer_head *di_bh) { u32 cend; struct ocfs2_write_ctxt *wc; wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS); if (!wc) return -ENOMEM; wc->w_cpos = pos >> osb->s_clustersize_bits; wc->w_first_new_cpos = UINT_MAX; cend = (pos + len - 1) >> osb->s_clustersize_bits; wc->w_clen = cend - wc->w_cpos + 1; get_bh(di_bh); wc->w_di_bh = di_bh; wc->w_type = type; if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) wc->w_large_pages = 1; else wc->w_large_pages = 0; ocfs2_init_dealloc_ctxt(&wc->w_dealloc); INIT_LIST_HEAD(&wc->w_unwritten_list); *wcp = wc; return 0; } /* * If a page has any new buffers, zero them out here, and mark them uptodate * and dirty so they'll be written out (in order to prevent uninitialised * block data from leaking). And clear the new bit. */ static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to) { unsigned int block_start, block_end; struct buffer_head *head, *bh; BUG_ON(!PageLocked(page)); if (!page_has_buffers(page)) return; bh = head = page_buffers(page); block_start = 0; do { block_end = block_start + bh->b_size; if (buffer_new(bh)) { if (block_end > from && block_start < to) { if (!PageUptodate(page)) { unsigned start, end; start = max(from, block_start); end = min(to, block_end); zero_user_segment(page, start, end); set_buffer_uptodate(bh); } clear_buffer_new(bh); mark_buffer_dirty(bh); } } block_start = block_end; bh = bh->b_this_page; } while (bh != head); } /* * Only called when we have a failure during allocating write to write * zero's to the newly allocated region. */ static void ocfs2_write_failure(struct inode *inode, struct ocfs2_write_ctxt *wc, loff_t user_pos, unsigned user_len) { int i; unsigned from = user_pos & (PAGE_SIZE - 1), to = user_pos + user_len; struct page *tmppage; if (wc->w_target_page) ocfs2_zero_new_buffers(wc->w_target_page, from, to); for(i = 0; i < wc->w_num_pages; i++) { tmppage = wc->w_pages[i]; if (tmppage && page_has_buffers(tmppage)) { if (ocfs2_should_order_data(inode)) ocfs2_jbd2_file_inode(wc->w_handle, inode); block_commit_write(tmppage, from, to); } } } static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, struct ocfs2_write_ctxt *wc, struct page *page, u32 cpos, loff_t user_pos, unsigned user_len, int new) { int ret; unsigned int map_from = 0, map_to = 0; unsigned int cluster_start, cluster_end; unsigned int user_data_from = 0, user_data_to = 0; ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, &cluster_start, &cluster_end); /* treat the write as new if the a hole/lseek spanned across * the page boundary. */ new = new | ((i_size_read(inode) <= page_offset(page)) && (page_offset(page) <= user_pos)); if (page == wc->w_target_page) { map_from = user_pos & (PAGE_SIZE - 1); map_to = map_from + user_len; if (new) ret = ocfs2_map_page_blocks(page, p_blkno, inode, cluster_start, cluster_end, new); else ret = ocfs2_map_page_blocks(page, p_blkno, inode, map_from, map_to, new); if (ret) { mlog_errno(ret); goto out; } user_data_from = map_from; user_data_to = map_to; if (new) { map_from = cluster_start; map_to = cluster_end; } } else { /* * If we haven't allocated the new page yet, we * shouldn't be writing it out without copying user * data. This is likely a math error from the caller. */ BUG_ON(!new); map_from = cluster_start; map_to = cluster_end; ret = ocfs2_map_page_blocks(page, p_blkno, inode, cluster_start, cluster_end, new); if (ret) { mlog_errno(ret); goto out; } } /* * Parts of newly allocated pages need to be zero'd. * * Above, we have also rewritten 'to' and 'from' - as far as * the rest of the function is concerned, the entire cluster * range inside of a page needs to be written. * * We can skip this if the page is up to date - it's already * been zero'd from being read in as a hole. */ if (new && !PageUptodate(page)) ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb), cpos, user_data_from, user_data_to); flush_dcache_page(page); out: return ret; } /* * This function will only grab one clusters worth of pages. */ static int ocfs2_grab_pages_for_write(struct address_space *mapping, struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos, unsigned user_len, int new, struct page *mmap_page) { int ret = 0, i; unsigned long start, target_index, end_index, index; struct inode *inode = mapping->host; loff_t last_byte; target_index = user_pos >> PAGE_SHIFT; /* * Figure out how many pages we'll be manipulating here. For * non allocating write, we just change the one * page. Otherwise, we'll need a whole clusters worth. If we're * writing past i_size, we only need enough pages to cover the * last page of the write. */ if (new) { wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb); start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos); /* * We need the index *past* the last page we could possibly * touch. This is the page past the end of the write or * i_size, whichever is greater. */ last_byte = max(user_pos + user_len, i_size_read(inode)); BUG_ON(last_byte < 1); end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1; if ((start + wc->w_num_pages) > end_index) wc->w_num_pages = end_index - start; } else { wc->w_num_pages = 1; start = target_index; } end_index = (user_pos + user_len - 1) >> PAGE_SHIFT; for(i = 0; i < wc->w_num_pages; i++) { index = start + i; if (index >= target_index && index <= end_index && wc->w_type == OCFS2_WRITE_MMAP) { /* * ocfs2_pagemkwrite() is a little different * and wants us to directly use the page * passed in. */ lock_page(mmap_page); /* Exit and let the caller retry */ if (mmap_page->mapping != mapping) { WARN_ON(mmap_page->mapping); unlock_page(mmap_page); ret = -EAGAIN; goto out; } get_page(mmap_page); wc->w_pages[i] = mmap_page; wc->w_target_locked = true; } else if (index >= target_index && index <= end_index && wc->w_type == OCFS2_WRITE_DIRECT) { /* Direct write has no mapping page. */ wc->w_pages[i] = NULL; continue; } else { wc->w_pages[i] = find_or_create_page(mapping, index, GFP_NOFS); if (!wc->w_pages[i]) { ret = -ENOMEM; mlog_errno(ret); goto out; } } wait_for_stable_page(wc->w_pages[i]); if (index == target_index) wc->w_target_page = wc->w_pages[i]; } out: if (ret) wc->w_target_locked = false; return ret; } /* * Prepare a single cluster for write one cluster into the file. */ static int ocfs2_write_cluster(struct address_space *mapping, u32 *phys, unsigned int new, unsigned int clear_unwritten, unsigned int should_zero, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos, unsigned user_len) { int ret, i; u64 p_blkno; struct inode *inode = mapping->host; struct ocfs2_extent_tree et; int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); if (new) { u32 tmp_pos; /* * This is safe to call with the page locks - it won't take * any additional semaphores or cluster locks. */ tmp_pos = cpos; ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode, &tmp_pos, 1, !clear_unwritten, wc->w_di_bh, wc->w_handle, data_ac, meta_ac, NULL); /* * This shouldn't happen because we must have already * calculated the correct meta data allocation required. The * internal tree allocation code should know how to increase * transaction credits itself. * * If need be, we could handle -EAGAIN for a * RESTART_TRANS here. */ mlog_bug_on_msg(ret == -EAGAIN, "Inode %llu: EAGAIN return during allocation.\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); if (ret < 0) { mlog_errno(ret); goto out; } } else if (clear_unwritten) { ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), wc->w_di_bh); ret = ocfs2_mark_extent_written(inode, &et, wc->w_handle, cpos, 1, *phys, meta_ac, &wc->w_dealloc); if (ret < 0) { mlog_errno(ret); goto out; } } /* * The only reason this should fail is due to an inability to * find the extent added. */ ret = ocfs2_get_clusters(inode, cpos, phys, NULL, NULL); if (ret < 0) { mlog(ML_ERROR, "Get physical blkno failed for inode %llu, " "at logical cluster %u", (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos); goto out; } BUG_ON(*phys == 0); p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *phys); if (!should_zero) p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1); for(i = 0; i < wc->w_num_pages; i++) { int tmpret; /* This is the direct io target page. */ if (wc->w_pages[i] == NULL) { p_blkno++; continue; } tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc, wc->w_pages[i], cpos, user_pos, user_len, should_zero); if (tmpret) { mlog_errno(tmpret); if (ret == 0) ret = tmpret; } } /* * We only have cleanup to do in case of allocating write. */ if (ret && new) ocfs2_write_failure(inode, wc, user_pos, user_len); out: return ret; } static int ocfs2_write_cluster_by_desc(struct address_space *mapping, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, struct ocfs2_write_ctxt *wc, loff_t pos, unsigned len) { int ret, i; loff_t cluster_off; unsigned int local_len = len; struct ocfs2_write_cluster_desc *desc; struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb); for (i = 0; i < wc->w_clen; i++) { desc = &wc->w_desc[i]; /* * We have to make sure that the total write passed in * doesn't extend past a single cluster. */ local_len = len; cluster_off = pos & (osb->s_clustersize - 1); if ((cluster_off + local_len) > osb->s_clustersize) local_len = osb->s_clustersize - cluster_off; ret = ocfs2_write_cluster(mapping, &desc->c_phys, desc->c_new, desc->c_clear_unwritten, desc->c_needs_zero, data_ac, meta_ac, wc, desc->c_cpos, pos, local_len); if (ret) { mlog_errno(ret); goto out; } len -= local_len; pos += local_len; } ret = 0; out: return ret; } /* * ocfs2_write_end() wants to know which parts of the target page it * should complete the write on. It's easiest to compute them ahead of * time when a more complete view of the write is available. */ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb, struct ocfs2_write_ctxt *wc, loff_t pos, unsigned len, int alloc) { struct ocfs2_write_cluster_desc *desc; wc->w_target_from = pos & (PAGE_SIZE - 1); wc->w_target_to = wc->w_target_from + len; if (alloc == 0) return; /* * Allocating write - we may have different boundaries based * on page size and cluster size. * * NOTE: We can no longer compute one value from the other as * the actual write length and user provided length may be * different. */ if (wc->w_large_pages) { /* * We only care about the 1st and last cluster within * our range and whether they should be zero'd or not. Either * value may be extended out to the start/end of a * newly allocated cluster. */ desc = &wc->w_desc[0]; if (desc->c_needs_zero) ocfs2_figure_cluster_boundaries(osb, desc->c_cpos, &wc->w_target_from, NULL); desc = &wc->w_desc[wc->w_clen - 1]; if (desc->c_needs_zero) ocfs2_figure_cluster_boundaries(osb, desc->c_cpos, NULL, &wc->w_target_to); } else { wc->w_target_from = 0; wc->w_target_to = PAGE_SIZE; } } /* * Check if this extent is marked UNWRITTEN by direct io. If so, we need not to * do the zero work. And should not to clear UNWRITTEN since it will be cleared * by the direct io procedure. * If this is a new extent that allocated by direct io, we should mark it in * the ip_unwritten_list. */ static int ocfs2_unwritten_check(struct inode *inode, struct ocfs2_write_ctxt *wc, struct ocfs2_write_cluster_desc *desc) { struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_unwritten_extent *ue = NULL, *new = NULL; int ret = 0; if (!desc->c_needs_zero) return 0; retry: spin_lock(&oi->ip_lock); /* Needs not to zero no metter buffer or direct. The one who is zero * the cluster is doing zero. And he will clear unwritten after all * cluster io finished. */ list_for_each_entry(ue, &oi->ip_unwritten_list, ue_ip_node) { if (desc->c_cpos == ue->ue_cpos) { BUG_ON(desc->c_new); desc->c_needs_zero = 0; desc->c_clear_unwritten = 0; goto unlock; } } if (wc->w_type != OCFS2_WRITE_DIRECT) goto unlock; if (new == NULL) { spin_unlock(&oi->ip_lock); new = kmalloc(sizeof(struct ocfs2_unwritten_extent), GFP_NOFS); if (new == NULL) { ret = -ENOMEM; goto out; } goto retry; } /* This direct write will doing zero. */ new->ue_cpos = desc->c_cpos; new->ue_phys = desc->c_phys; desc->c_clear_unwritten = 0; list_add_tail(&new->ue_ip_node, &oi->ip_unwritten_list); list_add_tail(&new->ue_node, &wc->w_unwritten_list); new = NULL; unlock: spin_unlock(&oi->ip_lock); out: if (new) kfree(new); return ret; } /* * Populate each single-cluster write descriptor in the write context * with information about the i/o to be done. * * Returns the number of clusters that will have to be allocated, as * well as a worst case estimate of the number of extent records that * would have to be created during a write to an unwritten region. */ static int ocfs2_populate_write_desc(struct inode *inode, struct ocfs2_write_ctxt *wc, unsigned int *clusters_to_alloc, unsigned int *extents_to_split) { int ret; struct ocfs2_write_cluster_desc *desc; unsigned int num_clusters = 0; unsigned int ext_flags = 0; u32 phys = 0; int i; *clusters_to_alloc = 0; *extents_to_split = 0; for (i = 0; i < wc->w_clen; i++) { desc = &wc->w_desc[i]; desc->c_cpos = wc->w_cpos + i; if (num_clusters == 0) { /* * Need to look up the next extent record. */ ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys, &num_clusters, &ext_flags); if (ret) { mlog_errno(ret); goto out; } /* We should already CoW the refcountd extent. */ BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); /* * Assume worst case - that we're writing in * the middle of the extent. * * We can assume that the write proceeds from * left to right, in which case the extent * insert code is smart enough to coalesce the * next splits into the previous records created. */ if (ext_flags & OCFS2_EXT_UNWRITTEN) *extents_to_split = *extents_to_split + 2; } else if (phys) { /* * Only increment phys if it doesn't describe * a hole. */ phys++; } /* * If w_first_new_cpos is < UINT_MAX, we have a non-sparse * file that got extended. w_first_new_cpos tells us * where the newly allocated clusters are so we can * zero them. */ if (desc->c_cpos >= wc->w_first_new_cpos) { BUG_ON(phys == 0); desc->c_needs_zero = 1; } desc->c_phys = phys; if (phys == 0) { desc->c_new = 1; desc->c_needs_zero = 1; desc->c_clear_unwritten = 1; *clusters_to_alloc = *clusters_to_alloc + 1; } if (ext_flags & OCFS2_EXT_UNWRITTEN) { desc->c_clear_unwritten = 1; desc->c_needs_zero = 1; } ret = ocfs2_unwritten_check(inode, wc, desc); if (ret) { mlog_errno(ret); goto out; } num_clusters--; } ret = 0; out: return ret; } static int ocfs2_write_begin_inline(struct address_space *mapping, struct inode *inode, struct ocfs2_write_ctxt *wc) { int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct page *page; handle_t *handle; struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } page = find_or_create_page(mapping, 0, GFP_NOFS); if (!page) { ocfs2_commit_trans(osb, handle); ret = -ENOMEM; mlog_errno(ret); goto out; } /* * If we don't set w_num_pages then this page won't get unlocked * and freed on cleanup of the write context. */ wc->w_pages[0] = wc->w_target_page = page; wc->w_num_pages = 1; ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { ocfs2_commit_trans(osb, handle); mlog_errno(ret); goto out; } if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) ocfs2_set_inode_data_inline(inode, di); if (!PageUptodate(page)) { ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh); if (ret) { ocfs2_commit_trans(osb, handle); goto out; } } wc->w_handle = handle; out: return ret; } int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size) { struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; if (new_size <= le16_to_cpu(di->id2.i_data.id_count)) return 1; return 0; } static int ocfs2_try_to_write_inline_data(struct address_space *mapping, struct inode *inode, loff_t pos, unsigned len, struct page *mmap_page, struct ocfs2_write_ctxt *wc) { int ret, written = 0; loff_t end = pos + len; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_dinode *di = NULL; trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno, len, (unsigned long long)pos, oi->ip_dyn_features); /* * Handle inodes which already have inline data 1st. */ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { if (mmap_page == NULL && ocfs2_size_fits_inline_data(wc->w_di_bh, end)) goto do_inline_write; /* * The write won't fit - we have to give this inode an * inline extent list now. */ ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh); if (ret) mlog_errno(ret); goto out; } /* * Check whether the inode can accept inline data. */ if (oi->ip_clusters != 0 || i_size_read(inode) != 0) return 0; /* * Check whether the write can fit. */ di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; if (mmap_page || end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) return 0; do_inline_write: ret = ocfs2_write_begin_inline(mapping, inode, wc); if (ret) { mlog_errno(ret); goto out; } /* * This signals to the caller that the data can be written * inline. */ written = 1; out: return written ? written : ret; } /* * This function only does anything for file systems which can't * handle sparse files. * * What we want to do here is fill in any hole between the current end * of allocation and the end of our write. That way the rest of the * write path can treat it as an non-allocating write, which has no * special case code for sparse/nonsparse files. */ static int ocfs2_expand_nonsparse_inode(struct inode *inode, struct buffer_head *di_bh, loff_t pos, unsigned len, struct ocfs2_write_ctxt *wc) { int ret; loff_t newsize = pos + len; BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))); if (newsize <= i_size_read(inode)) return 0; ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos); if (ret) mlog_errno(ret); /* There is no wc if this is call from direct. */ if (wc) wc->w_first_new_cpos = ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)); return ret; } static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh, loff_t pos) { int ret = 0; BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))); if (pos > i_size_read(inode)) ret = ocfs2_zero_extend(inode, di_bh, pos); return ret; } int ocfs2_write_begin_nolock(struct address_space *mapping, loff_t pos, unsigned len, ocfs2_write_type_t type, struct page **pagep, void **fsdata, struct buffer_head *di_bh, struct page *mmap_page) { int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0; struct ocfs2_write_ctxt *wc; struct inode *inode = mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di; struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_alloc_context *meta_ac = NULL; handle_t *handle; struct ocfs2_extent_tree et; int try_free = 1, ret1; try_again: ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, type, di_bh); if (ret) { mlog_errno(ret); return ret; } if (ocfs2_supports_inline_data(osb)) { ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len, mmap_page, wc); if (ret == 1) { ret = 0; goto success; } if (ret < 0) { mlog_errno(ret); goto out; } } /* Direct io change i_size late, should not zero tail here. */ if (type != OCFS2_WRITE_DIRECT) { if (ocfs2_sparse_alloc(osb)) ret = ocfs2_zero_tail(inode, di_bh, pos); else ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, len, wc); if (ret) { mlog_errno(ret); goto out; } } ret = ocfs2_check_range_for_refcount(inode, pos, len); if (ret < 0) { mlog_errno(ret); goto out; } else if (ret == 1) { clusters_need = wc->w_clen; ret = ocfs2_refcount_cow(inode, di_bh, wc->w_cpos, wc->w_clen, UINT_MAX); if (ret) { mlog_errno(ret); goto out; } } ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc, &extents_to_split); if (ret) { mlog_errno(ret); goto out; } clusters_need += clusters_to_alloc; di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; trace_ocfs2_write_begin_nolock( (unsigned long long)OCFS2_I(inode)->ip_blkno, (long long)i_size_read(inode), le32_to_cpu(di->i_clusters), pos, len, type, mmap_page, clusters_to_alloc, extents_to_split); /* * We set w_target_from, w_target_to here so that * ocfs2_write_end() knows which range in the target page to * write out. An allocation requires that we write the entire * cluster range. */ if (clusters_to_alloc || extents_to_split) { /* * XXX: We are stretching the limits of * ocfs2_lock_allocators(). It greatly over-estimates * the work to be done. */ ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), wc->w_di_bh); ret = ocfs2_lock_allocators(inode, &et, clusters_to_alloc, extents_to_split, &data_ac, &meta_ac); if (ret) { mlog_errno(ret); goto out; } if (data_ac) data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv; credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list); } else if (type == OCFS2_WRITE_DIRECT) /* direct write needs not to start trans if no extents alloc. */ goto success; /* * We have to zero sparse allocated clusters, unwritten extent clusters, * and non-sparse clusters we just extended. For non-sparse writes, * we know zeros will only be needed in the first and/or last cluster. */ if (wc->w_clen && (wc->w_desc[0].c_needs_zero || wc->w_desc[wc->w_clen - 1].c_needs_zero)) cluster_of_pages = 1; else cluster_of_pages = 0; ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages); handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } wc->w_handle = handle; if (clusters_to_alloc) { ret = dquot_alloc_space_nodirty(inode, ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); if (ret) goto out_commit; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_quota; } /* * Fill our page array first. That way we've grabbed enough so * that we can zero and flush if we error after adding the * extent. */ ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, cluster_of_pages, mmap_page); if (ret && ret != -EAGAIN) { mlog_errno(ret); goto out_quota; } /* * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock * the target page. In this case, we exit with no error and no target * page. This will trigger the caller, page_mkwrite(), to re-try * the operation. */ if (ret == -EAGAIN) { BUG_ON(wc->w_target_page); ret = 0; goto out_quota; } ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, len); if (ret) { mlog_errno(ret); goto out_quota; } if (data_ac) ocfs2_free_alloc_context(data_ac); if (meta_ac) ocfs2_free_alloc_context(meta_ac); success: if (pagep) *pagep = wc->w_target_page; *fsdata = wc; return 0; out_quota: if (clusters_to_alloc) dquot_free_space(inode, ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); out_commit: ocfs2_commit_trans(osb, handle); out: /* * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(), * even in case of error here like ENOSPC and ENOMEM. So, we need * to unlock the target page manually to prevent deadlocks when * retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED * to VM code. */ if (wc->w_target_locked) unlock_page(mmap_page); ocfs2_free_write_ctxt(inode, wc); if (data_ac) { ocfs2_free_alloc_context(data_ac); data_ac = NULL; } if (meta_ac) { ocfs2_free_alloc_context(meta_ac); meta_ac = NULL; } if (ret == -ENOSPC && try_free) { /* * Try to free some truncate log so that we can have enough * clusters to allocate. */ try_free = 0; ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need); if (ret1 == 1) goto try_again; if (ret1 < 0) mlog_errno(ret1); } return ret; } static int ocfs2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; struct buffer_head *di_bh = NULL; struct inode *inode = mapping->host; ret = ocfs2_inode_lock(inode, &di_bh, 1); if (ret) { mlog_errno(ret); return ret; } /* * Take alloc sem here to prevent concurrent lookups. That way * the mapping, zeroing and tree manipulation within * ocfs2_write() will be safe against ->readpage(). This * should also serve to lock out allocation from a shared * writeable region. */ down_write(&OCFS2_I(inode)->ip_alloc_sem); ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER, pagep, fsdata, di_bh, NULL); if (ret) { mlog_errno(ret); goto out_fail; } brelse(di_bh); return 0; out_fail: up_write(&OCFS2_I(inode)->ip_alloc_sem); brelse(di_bh); ocfs2_inode_unlock(inode, 1); return ret; } static void ocfs2_write_end_inline(struct inode *inode, loff_t pos, unsigned len, unsigned *copied, struct ocfs2_dinode *di, struct ocfs2_write_ctxt *wc) { void *kaddr; if (unlikely(*copied < len)) { if (!PageUptodate(wc->w_target_page)) { *copied = 0; return; } } kaddr = kmap_atomic(wc->w_target_page); memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); kunmap_atomic(kaddr); trace_ocfs2_write_end_inline( (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)pos, *copied, le16_to_cpu(di->id2.i_data.id_count), le16_to_cpu(di->i_dyn_features)); } int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, void *fsdata) { int i, ret; unsigned from, to, start = pos & (PAGE_SIZE - 1); struct inode *inode = mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_write_ctxt *wc = fsdata; struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; handle_t *handle = wc->w_handle; struct page *tmppage; BUG_ON(!list_empty(&wc->w_unwritten_list)); if (handle) { ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { copied = ret; mlog_errno(ret); goto out; } } if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { ocfs2_write_end_inline(inode, pos, len, &copied, di, wc); goto out_write_size; } if (unlikely(copied < len) && wc->w_target_page) { if (!PageUptodate(wc->w_target_page)) copied = 0; ocfs2_zero_new_buffers(wc->w_target_page, start+copied, start+len); } if (wc->w_target_page) flush_dcache_page(wc->w_target_page); for(i = 0; i < wc->w_num_pages; i++) { tmppage = wc->w_pages[i]; /* This is the direct io target page. */ if (tmppage == NULL) continue; if (tmppage == wc->w_target_page) { from = wc->w_target_from; to = wc->w_target_to; BUG_ON(from > PAGE_SIZE || to > PAGE_SIZE || to < from); } else { /* * Pages adjacent to the target (if any) imply * a hole-filling write in which case we want * to flush their entire range. */ from = 0; to = PAGE_SIZE; } if (page_has_buffers(tmppage)) { if (handle && ocfs2_should_order_data(inode)) ocfs2_jbd2_file_inode(handle, inode); block_commit_write(tmppage, from, to); } } out_write_size: /* Direct io do not update i_size here. */ if (wc->w_type != OCFS2_WRITE_DIRECT) { pos += copied; if (pos > i_size_read(inode)) { i_size_write(inode, pos); mark_inode_dirty(inode); } inode->i_blocks = ocfs2_inode_sector_count(inode); di->i_size = cpu_to_le64((u64)i_size_read(inode)); inode->i_mtime = inode->i_ctime = current_time(inode); di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); ocfs2_update_inode_fsync_trans(handle, inode, 1); } if (handle) ocfs2_journal_dirty(handle, wc->w_di_bh); out: /* unlock pages before dealloc since it needs acquiring j_trans_barrier * lock, or it will cause a deadlock since journal commit threads holds * this lock and will ask for the page lock when flushing the data. * put it here to preserve the unlock order. */ ocfs2_unlock_pages(wc); if (handle) ocfs2_commit_trans(osb, handle); ocfs2_run_deallocs(osb, &wc->w_dealloc); brelse(wc->w_di_bh); kfree(wc); return copied; } static int ocfs2_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { int ret; struct inode *inode = mapping->host; ret = ocfs2_write_end_nolock(mapping, pos, len, copied, fsdata); up_write(&OCFS2_I(inode)->ip_alloc_sem); ocfs2_inode_unlock(inode, 1); return ret; } struct ocfs2_dio_write_ctxt { struct list_head dw_zero_list; unsigned dw_zero_count; int dw_orphaned; pid_t dw_writer_pid; }; static struct ocfs2_dio_write_ctxt * ocfs2_dio_alloc_write_ctx(struct buffer_head *bh, int *alloc) { struct ocfs2_dio_write_ctxt *dwc = NULL; if (bh->b_private) return bh->b_private; dwc = kmalloc(sizeof(struct ocfs2_dio_write_ctxt), GFP_NOFS); if (dwc == NULL) return NULL; INIT_LIST_HEAD(&dwc->dw_zero_list); dwc->dw_zero_count = 0; dwc->dw_orphaned = 0; dwc->dw_writer_pid = task_pid_nr(current); bh->b_private = dwc; *alloc = 1; return dwc; } static void ocfs2_dio_free_write_ctx(struct inode *inode, struct ocfs2_dio_write_ctxt *dwc) { ocfs2_free_unwritten_list(inode, &dwc->dw_zero_list); kfree(dwc); } /* * TODO: Make this into a generic get_blocks function. * * From do_direct_io in direct-io.c: * "So what we do is to permit the ->get_blocks function to populate * bh.b_size with the size of IO which is permitted at this offset and * this i_blkbits." * * This function is called directly from get_more_blocks in direct-io.c. * * called like this: dio->get_blocks(dio->inode, fs_startblk, * fs_count, map_bh, dio->rw == WRITE); */ static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_write_ctxt *wc; struct ocfs2_write_cluster_desc *desc = NULL; struct ocfs2_dio_write_ctxt *dwc = NULL; struct buffer_head *di_bh = NULL; u64 p_blkno; loff_t pos = iblock << inode->i_sb->s_blocksize_bits; unsigned len, total_len = bh_result->b_size; int ret = 0, first_get_block = 0; len = osb->s_clustersize - (pos & (osb->s_clustersize - 1)); len = min(total_len, len); mlog(0, "get block of %lu at %llu:%u req %u\n", inode->i_ino, pos, len, total_len); /* * Because we need to change file size in ocfs2_dio_end_io_write(), or * we may need to add it to orphan dir. So can not fall to fast path * while file size will be changed. */ if (pos + total_len <= i_size_read(inode)) { down_read(&oi->ip_alloc_sem); /* This is the fast path for re-write. */ ret = ocfs2_get_block(inode, iblock, bh_result, create); up_read(&oi->ip_alloc_sem); if (buffer_mapped(bh_result) && !buffer_new(bh_result) && ret == 0) goto out; /* Clear state set by ocfs2_get_block. */ bh_result->b_state = 0; } dwc = ocfs2_dio_alloc_write_ctx(bh_result, &first_get_block); if (unlikely(dwc == NULL)) { ret = -ENOMEM; mlog_errno(ret); goto out; } if (ocfs2_clusters_for_bytes(inode->i_sb, pos + total_len) > ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)) && !dwc->dw_orphaned) { /* * when we are going to alloc extents beyond file size, add the * inode to orphan dir, so we can recall those spaces when * system crashed during write. */ ret = ocfs2_add_inode_to_orphan(osb, inode); if (ret < 0) { mlog_errno(ret); goto out; } dwc->dw_orphaned = 1; } ret = ocfs2_inode_lock(inode, &di_bh, 1); if (ret) { mlog_errno(ret); goto out; } down_write(&oi->ip_alloc_sem); if (first_get_block) { if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) ret = ocfs2_zero_tail(inode, di_bh, pos); else ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, total_len, NULL); if (ret < 0) { mlog_errno(ret); goto unlock; } } ret = ocfs2_write_begin_nolock(inode->i_mapping, pos, len, OCFS2_WRITE_DIRECT, NULL, (void **)&wc, di_bh, NULL); if (ret) { mlog_errno(ret); goto unlock; } desc = &wc->w_desc[0]; p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, desc->c_phys); BUG_ON(p_blkno == 0); p_blkno += iblock & (u64)(ocfs2_clusters_to_blocks(inode->i_sb, 1) - 1); map_bh(bh_result, inode->i_sb, p_blkno); bh_result->b_size = len; if (desc->c_needs_zero) set_buffer_new(bh_result); /* May sleep in end_io. It should not happen in a irq context. So defer * it to dio work queue. */ set_buffer_defer_completion(bh_result); if (!list_empty(&wc->w_unwritten_list)) { struct ocfs2_unwritten_extent *ue = NULL; ue = list_first_entry(&wc->w_unwritten_list, struct ocfs2_unwritten_extent, ue_node); BUG_ON(ue->ue_cpos != desc->c_cpos); /* The physical address may be 0, fill it. */ ue->ue_phys = desc->c_phys; list_splice_tail_init(&wc->w_unwritten_list, &dwc->dw_zero_list); dwc->dw_zero_count++; } ret = ocfs2_write_end_nolock(inode->i_mapping, pos, len, len, wc); BUG_ON(ret != len); ret = 0; unlock: up_write(&oi->ip_alloc_sem); ocfs2_inode_unlock(inode, 1); brelse(di_bh); out: if (ret < 0) ret = -EIO; return ret; } static int ocfs2_dio_end_io_write(struct inode *inode, struct ocfs2_dio_write_ctxt *dwc, loff_t offset, ssize_t bytes) { struct ocfs2_cached_dealloc_ctxt dealloc; struct ocfs2_extent_tree et; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_unwritten_extent *ue = NULL; struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_alloc_context *meta_ac = NULL; handle_t *handle = NULL; loff_t end = offset + bytes; int ret = 0, credits = 0, locked = 0; ocfs2_init_dealloc_ctxt(&dealloc); /* We do clear unwritten, delete orphan, change i_size here. If neither * of these happen, we can skip all this. */ if (list_empty(&dwc->dw_zero_list) && end <= i_size_read(inode) && !dwc->dw_orphaned) goto out; /* ocfs2_file_write_iter will get i_mutex, so we need not lock if we * are in that context. */ if (dwc->dw_writer_pid != task_pid_nr(current)) { inode_lock(inode); locked = 1; } ret = ocfs2_inode_lock(inode, &di_bh, 1); if (ret < 0) { mlog_errno(ret); goto out; } down_write(&oi->ip_alloc_sem); /* Delete orphan before acquire i_mutex. */ if (dwc->dw_orphaned) { BUG_ON(dwc->dw_writer_pid != task_pid_nr(current)); end = end > i_size_read(inode) ? end : 0; ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, !!end, end); if (ret < 0) mlog_errno(ret); } di = (struct ocfs2_dinode *)di_bh->b_data; ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); ret = ocfs2_lock_allocators(inode, &et, 0, dwc->dw_zero_count*2, &data_ac, &meta_ac); if (ret) { mlog_errno(ret); goto unlock; } credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list); handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto unlock; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto commit; } list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) { ret = ocfs2_mark_extent_written(inode, &et, handle, ue->ue_cpos, 1, ue->ue_phys, meta_ac, &dealloc); if (ret < 0) { mlog_errno(ret); break; } } if (end > i_size_read(inode)) { ret = ocfs2_set_inode_size(handle, inode, di_bh, end); if (ret < 0) mlog_errno(ret); } commit: ocfs2_commit_trans(osb, handle); unlock: up_write(&oi->ip_alloc_sem); ocfs2_inode_unlock(inode, 1); brelse(di_bh); out: if (data_ac) ocfs2_free_alloc_context(data_ac); if (meta_ac) ocfs2_free_alloc_context(meta_ac); ocfs2_run_deallocs(osb, &dealloc); if (locked) inode_unlock(inode); ocfs2_dio_free_write_ctx(inode, dwc); return ret; } /* * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're * particularly interested in the aio/dio case. We use the rw_lock DLM lock * to protect io on one node from truncation on another. */ static int ocfs2_dio_end_io(struct kiocb *iocb, loff_t offset, ssize_t bytes, void *private) { struct inode *inode = file_inode(iocb->ki_filp); int level; int ret = 0; /* this io's submitter should not have unlocked this before we could */ BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); if (bytes > 0 && private) ret = ocfs2_dio_end_io_write(inode, private, offset, bytes); ocfs2_iocb_clear_rw_locked(iocb); level = ocfs2_iocb_rw_locked_level(iocb); ocfs2_rw_unlock(inode, level); return ret; } static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); get_block_t *get_block; /* * Fallback to buffered I/O if we see an inode without * extents. */ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return 0; /* Fallback to buffered I/O if we do not support append dio. */ if (iocb->ki_pos + iter->count > i_size_read(inode) && !ocfs2_supports_append_dio(osb)) return 0; if (iov_iter_rw(iter) == READ) get_block = ocfs2_get_block; else get_block = ocfs2_dio_get_block; return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, get_block, ocfs2_dio_end_io, NULL, 0); } const struct address_space_operations ocfs2_aops = { .readpage = ocfs2_readpage, .readpages = ocfs2_readpages, .writepage = ocfs2_writepage, .write_begin = ocfs2_write_begin, .write_end = ocfs2_write_end, .bmap = ocfs2_bmap, .direct_IO = ocfs2_direct_IO, .invalidatepage = block_invalidatepage, .releasepage = ocfs2_releasepage, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, };
./CrossVul/dataset_final_sorted/CWE-362/c/bad_3021_0
crossvul-cpp_data_good_3726_3
/* * DCCP over IPv6 * Linux INET6 implementation * * Based on net/dccp6/ipv6.c * * Arnaldo Carvalho de Melo <acme@ghostprotocols.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/xfrm.h> #include <net/addrconf.h> #include <net/inet_common.h> #include <net/inet_hashtables.h> #include <net/inet_sock.h> #include <net/inet6_connection_sock.h> #include <net/inet6_hashtables.h> #include <net/ip6_route.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/ip6_checksum.h> #include <net/xfrm.h> #include "dccp.h" #include "ipv6.h" #include "feat.h" /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped; static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops; static void dccp_v6_hash(struct sock *sk) { if (sk->sk_state != DCCP_CLOSED) { if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) { inet_hash(sk); return; } local_bh_disable(); __inet6_hash(sk, NULL); local_bh_enable(); } } /* add pseudo-header to DCCP checksum stored in skb->csum */ static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb, const struct in6_addr *saddr, const struct in6_addr *daddr) { return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum); } static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); struct dccp_hdr *dh = dccp_hdr(skb); dccp_csum_outgoing(skb); dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr); } static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, __be16 sport, __be16 dport ) { return secure_tcpv6_sequence_number(saddr, daddr, sport, dport); } static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb) { return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, ipv6_hdr(skb)->saddr.s6_addr32, dccp_hdr(skb)->dccph_dport, dccp_hdr(skb)->dccph_sport ); } static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); struct dccp_sock *dp; struct ipv6_pinfo *np; struct sock *sk; int err; __u64 seq; struct net *net = dev_net(skb->dev); if (skb->len < offset + sizeof(*dh) || skb->len < offset + __dccp_basic_hdr_len(dh)) { ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); return; } sk = inet6_lookup(net, &dccp_hashinfo, &hdr->daddr, dh->dccph_dport, &hdr->saddr, dh->dccph_sport, inet6_iif(skb)); if (sk == NULL) { ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); return; } if (sk->sk_state == DCCP_TIME_WAIT) { inet_twsk_put(inet_twsk(sk)); return; } bh_lock_sock(sk); if (sock_owned_by_user(sk)) NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); if (sk->sk_state == DCCP_CLOSED) goto out; dp = dccp_sk(sk); seq = dccp_hdr_seq(dh); if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && !between48(seq, dp->dccps_awl, dp->dccps_awh)) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } np = inet6_sk(sk); if (type == ICMPV6_PKT_TOOBIG) { struct dst_entry *dst = NULL; if (sock_owned_by_user(sk)) goto out; if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED)) goto out; /* icmp should have updated the destination cache entry */ dst = __sk_dst_check(sk, np->dst_cookie); if (dst == NULL) { struct inet_sock *inet = inet_sk(sk); struct flowi6 fl6; /* BUGGG_FUTURE: Again, it is not clear how to handle rthdr case. Ignore this complexity for now. */ memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_DCCP; ipv6_addr_copy(&fl6.daddr, &np->daddr); ipv6_addr_copy(&fl6.saddr, &np->saddr); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.fl6_dport = inet->inet_dport; fl6.fl6_sport = inet->inet_sport; security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false); if (IS_ERR(dst)) { sk->sk_err_soft = -PTR_ERR(dst); goto out; } } else dst_hold(dst); if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { dccp_sync_mss(sk, dst_mtu(dst)); } /* else let the usual retransmit timer handle it */ dst_release(dst); goto out; } icmpv6_err_convert(type, code, &err); /* Might be for an request_sock */ switch (sk->sk_state) { struct request_sock *req, **prev; case DCCP_LISTEN: if (sock_owned_by_user(sk)) goto out; req = inet6_csk_search_req(sk, &prev, dh->dccph_dport, &hdr->daddr, &hdr->saddr, inet6_iif(skb)); if (req == NULL) goto out; /* * ICMPs are not backlogged, hence we cannot get an established * socket here. */ WARN_ON(req->sk != NULL); if (seq != dccp_rsk(req)->dreq_iss) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } inet_csk_reqsk_queue_drop(sk, req, prev); goto out; case DCCP_REQUESTING: case DCCP_RESPOND: /* Cannot happen. It can, it SYNs are crossed. --ANK */ if (!sock_owned_by_user(sk)) { DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); sk->sk_err = err; /* * Wake people up to see the error * (see connect in sock.c) */ sk->sk_error_report(sk); dccp_done(sk); } else sk->sk_err_soft = err; goto out; } if (!sock_owned_by_user(sk) && np->recverr) { sk->sk_err = err; sk->sk_error_report(sk); } else sk->sk_err_soft = err; out: bh_unlock_sock(sk); sock_put(sk); } static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, struct request_values *rv_unused) { struct inet6_request_sock *ireq6 = inet6_rsk(req); struct ipv6_pinfo *np = inet6_sk(sk); struct sk_buff *skb; struct ipv6_txoptions *opt = NULL; struct in6_addr *final_p, final; struct flowi6 fl6; int err = -1; struct dst_entry *dst; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_DCCP; ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr); ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr); fl6.flowlabel = 0; fl6.flowi6_oif = ireq6->iif; fl6.fl6_dport = inet_rsk(req)->rmt_port; fl6.fl6_sport = inet_rsk(req)->loc_port; security_req_classify_flow(req, flowi6_to_flowi(&fl6)); opt = np->opt; final_p = fl6_update_dst(&fl6, opt, &final); dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); if (IS_ERR(dst)) { err = PTR_ERR(dst); dst = NULL; goto done; } skb = dccp_make_response(sk, dst, req); if (skb != NULL) { struct dccp_hdr *dh = dccp_hdr(skb); dh->dccph_checksum = dccp_v6_csum_finish(skb, &ireq6->loc_addr, &ireq6->rmt_addr); ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr); err = ip6_xmit(sk, skb, &fl6, opt); err = net_xmit_eval(err); } done: if (opt != NULL && opt != np->opt) sock_kfree_s(sk, opt, opt->tot_len); dst_release(dst); return err; } static void dccp_v6_reqsk_destructor(struct request_sock *req) { dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg); if (inet6_rsk(req)->pktopts != NULL) kfree_skb(inet6_rsk(req)->pktopts); } static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) { const struct ipv6hdr *rxip6h; struct sk_buff *skb; struct flowi6 fl6; struct net *net = dev_net(skb_dst(rxskb)->dev); struct sock *ctl_sk = net->dccp.v6_ctl_sk; struct dst_entry *dst; if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) return; if (!ipv6_unicast_destination(rxskb)) return; skb = dccp_ctl_make_reset(ctl_sk, rxskb); if (skb == NULL) return; rxip6h = ipv6_hdr(rxskb); dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr, &rxip6h->daddr); memset(&fl6, 0, sizeof(fl6)); ipv6_addr_copy(&fl6.daddr, &rxip6h->saddr); ipv6_addr_copy(&fl6.saddr, &rxip6h->daddr); fl6.flowi6_proto = IPPROTO_DCCP; fl6.flowi6_oif = inet6_iif(rxskb); fl6.fl6_dport = dccp_hdr(skb)->dccph_dport; fl6.fl6_sport = dccp_hdr(skb)->dccph_sport; security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6)); /* sk = NULL, but it is safe for now. RST socket required. */ dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false); if (!IS_ERR(dst)) { skb_dst_set(skb, dst); ip6_xmit(ctl_sk, skb, &fl6, NULL); DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); return; } kfree_skb(skb); } static struct request_sock_ops dccp6_request_sock_ops = { .family = AF_INET6, .obj_size = sizeof(struct dccp6_request_sock), .rtx_syn_ack = dccp_v6_send_response, .send_ack = dccp_reqsk_send_ack, .destructor = dccp_v6_reqsk_destructor, .send_reset = dccp_v6_ctl_send_reset, }; static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) { const struct dccp_hdr *dh = dccp_hdr(skb); const struct ipv6hdr *iph = ipv6_hdr(skb); struct sock *nsk; struct request_sock **prev; /* Find possible connection requests. */ struct request_sock *req = inet6_csk_search_req(sk, &prev, dh->dccph_sport, &iph->saddr, &iph->daddr, inet6_iif(skb)); if (req != NULL) return dccp_check_req(sk, skb, req, prev); nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo, &iph->saddr, dh->dccph_sport, &iph->daddr, ntohs(dh->dccph_dport), inet6_iif(skb)); if (nsk != NULL) { if (nsk->sk_state != DCCP_TIME_WAIT) { bh_lock_sock(nsk); return nsk; } inet_twsk_put(inet_twsk(nsk)); return NULL; } return sk; } static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) { struct request_sock *req; struct dccp_request_sock *dreq; struct inet6_request_sock *ireq6; struct ipv6_pinfo *np = inet6_sk(sk); const __be32 service = dccp_hdr_request(skb)->dccph_req_service; struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); if (skb->protocol == htons(ETH_P_IP)) return dccp_v4_conn_request(sk, skb); if (!ipv6_unicast_destination(skb)) return 0; /* discard, don't send a reset here */ if (dccp_bad_service_code(sk, service)) { dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; goto drop; } /* * There are no SYN attacks on IPv6, yet... */ dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; if (inet_csk_reqsk_queue_is_full(sk)) goto drop; if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) goto drop; req = inet6_reqsk_alloc(&dccp6_request_sock_ops); if (req == NULL) goto drop; if (dccp_reqsk_init(req, dccp_sk(sk), skb)) goto drop_and_free; dreq = dccp_rsk(req); if (dccp_parse_options(sk, dreq, skb)) goto drop_and_free; if (security_inet_conn_request(sk, skb, req)) goto drop_and_free; ireq6 = inet6_rsk(req); ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr); ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr); if (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { atomic_inc(&skb->users); ireq6->pktopts = skb; } ireq6->iif = sk->sk_bound_dev_if; /* So that link locals have meaning */ if (!sk->sk_bound_dev_if && ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL) ireq6->iif = inet6_iif(skb); /* * Step 3: Process LISTEN state * * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie * * In fact we defer setting S.GSR, S.SWL, S.SWH to * dccp_create_openreq_child. */ dreq->dreq_isr = dcb->dccpd_seq; dreq->dreq_iss = dccp_v6_init_sequence(skb); dreq->dreq_service = service; if (dccp_v6_send_response(sk, req, NULL)) goto drop_and_free; inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); return 0; drop_and_free: reqsk_free(req); drop: DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); return -1; } static struct sock *dccp_v6_request_recv_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst) { struct inet6_request_sock *ireq6 = inet6_rsk(req); struct ipv6_pinfo *newnp, *np = inet6_sk(sk); struct inet_sock *newinet; struct dccp6_sock *newdp6; struct sock *newsk; struct ipv6_txoptions *opt; if (skb->protocol == htons(ETH_P_IP)) { /* * v6 mapped */ newsk = dccp_v4_request_recv_sock(sk, skb, req, dst); if (newsk == NULL) return NULL; newdp6 = (struct dccp6_sock *)newsk; newinet = inet_sk(newsk); newinet->pinet6 = &newdp6->inet6; newnp = inet6_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr); ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped; newsk->sk_backlog_rcv = dccp_v4_do_rcv; newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = inet6_iif(skb); newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; /* * No need to charge this sock to the relevant IPv6 refcnt debug socks count * here, dccp_create_openreq_child now does this for us, see the comment in * that function for the gory details. -acme */ /* It is tricky place. Until this moment IPv4 tcp worked with IPv6 icsk.icsk_af_ops. Sync it now. */ dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); return newsk; } opt = np->opt; if (sk_acceptq_is_full(sk)) goto out_overflow; if (dst == NULL) { struct in6_addr *final_p, final; struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_DCCP; ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr); final_p = fl6_update_dst(&fl6, opt, &final); ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.fl6_dport = inet_rsk(req)->rmt_port; fl6.fl6_sport = inet_rsk(req)->loc_port; security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); if (IS_ERR(dst)) goto out; } newsk = dccp_create_openreq_child(sk, req, skb); if (newsk == NULL) goto out_nonewsk; /* * No need to charge this sock to the relevant IPv6 refcnt debug socks * count here, dccp_create_openreq_child now does this for us, see the * comment in that function for the gory details. -acme */ __ip6_dst_store(newsk, dst, NULL, NULL); newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | NETIF_F_TSO); newdp6 = (struct dccp6_sock *)newsk; newinet = inet_sk(newsk); newinet->pinet6 = &newdp6->inet6; newnp = inet6_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr); ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr); ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr); newsk->sk_bound_dev_if = ireq6->iif; /* Now IPv6 options... First: no IPv4 options. */ newinet->inet_opt = NULL; /* Clone RX bits */ newnp->rxopt.all = np->rxopt.all; /* Clone pktoptions received with SYN */ newnp->pktoptions = NULL; if (ireq6->pktopts != NULL) { newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC); kfree_skb(ireq6->pktopts); ireq6->pktopts = NULL; if (newnp->pktoptions) skb_set_owner_r(newnp->pktoptions, newsk); } newnp->opt = NULL; newnp->mcast_oif = inet6_iif(skb); newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; /* * Clone native IPv6 options from listening socket (if any) * * Yes, keeping reference count would be much more clever, but we make * one more one thing there: reattach optmem to newsk. */ if (opt != NULL) { newnp->opt = ipv6_dup_options(newsk, opt); if (opt != np->opt) sock_kfree_s(sk, opt, opt->tot_len); } inet_csk(newsk)->icsk_ext_hdr_len = 0; if (newnp->opt != NULL) inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + newnp->opt->opt_flen); dccp_sync_mss(newsk, dst_mtu(dst)); newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; newinet->inet_rcv_saddr = LOOPBACK4_IPV6; if (__inet_inherit_port(sk, newsk) < 0) { sock_put(newsk); goto out; } __inet6_hash(newsk, NULL); return newsk; out_overflow: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); out_nonewsk: dst_release(dst); out: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); if (opt != NULL && opt != np->opt) sock_kfree_s(sk, opt, opt->tot_len); return NULL; } /* The socket must have it's spinlock held when we get * here. * * We have a potential double-lock case here, so even when * doing backlog processing we use the BH locking scheme. * This is because we cannot sleep with the original spinlock * held. */ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); struct sk_buff *opt_skb = NULL; /* Imagine: socket is IPv6. IPv4 packet arrives, goes to IPv4 receive handler and backlogged. From backlog it always goes here. Kerboom... Fortunately, dccp_rcv_established and rcv_established handle them correctly, but it is not case with dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK */ if (skb->protocol == htons(ETH_P_IP)) return dccp_v4_do_rcv(sk, skb); if (sk_filter(sk, skb)) goto discard; /* * socket locking is here for SMP purposes as backlog rcv is currently * called with bh processing disabled. */ /* Do Stevens' IPV6_PKTOPTIONS. Yes, guys, it is the only place in our code, where we may make it not affecting IPv4. The rest of code is protocol independent, and I do not like idea to uglify IPv4. Actually, all the idea behind IPV6_PKTOPTIONS looks not very well thought. For now we latch options, received in the last packet, enqueued by tcp. Feel free to propose better solution. --ANK (980728) */ if (np->rxopt.all) /* * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example. */ opt_skb = skb_clone(skb, GFP_ATOMIC); if (sk->sk_state == DCCP_OPEN) { /* Fast path */ if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len)) goto reset; if (opt_skb) { /* XXX This is where we would goto ipv6_pktoptions. */ __kfree_skb(opt_skb); } return 0; } /* * Step 3: Process LISTEN state * If S.state == LISTEN, * If P.type == Request or P contains a valid Init Cookie option, * (* Must scan the packet's options to check for Init * Cookies. Only Init Cookies are processed here, * however; other options are processed in Step 8. This * scan need only be performed if the endpoint uses Init * Cookies *) * (* Generate a new socket and switch to that socket *) * Set S := new socket for this port pair * S.state = RESPOND * Choose S.ISS (initial seqno) or set from Init Cookies * Initialize S.GAR := S.ISS * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies * Continue with S.state == RESPOND * (* A Response packet will be generated in Step 11 *) * Otherwise, * Generate Reset(No Connection) unless P.type == Reset * Drop packet and return * * NOTE: the check for the packet types is done in * dccp_rcv_state_process */ if (sk->sk_state == DCCP_LISTEN) { struct sock *nsk = dccp_v6_hnd_req(sk, skb); if (nsk == NULL) goto discard; /* * Queue it on the new socket if the new socket is active, * otherwise we just shortcircuit this and continue with * the new socket.. */ if (nsk != sk) { if (dccp_child_process(sk, nsk, skb)) goto reset; if (opt_skb != NULL) __kfree_skb(opt_skb); return 0; } } if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len)) goto reset; if (opt_skb) { /* XXX This is where we would goto ipv6_pktoptions. */ __kfree_skb(opt_skb); } return 0; reset: dccp_v6_ctl_send_reset(sk, skb); discard: if (opt_skb != NULL) __kfree_skb(opt_skb); kfree_skb(skb); return 0; } static int dccp_v6_rcv(struct sk_buff *skb) { const struct dccp_hdr *dh; struct sock *sk; int min_cov; /* Step 1: Check header basics */ if (dccp_invalid_packet(skb)) goto discard_it; /* Step 1: If header checksum is incorrect, drop packet and return. */ if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr)) { DCCP_WARN("dropped packet with invalid checksum\n"); goto discard_it; } dh = dccp_hdr(skb); DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh); DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; if (dccp_packet_without_ack(skb)) DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ; else DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb); /* Step 2: * Look up flow ID in table and get corresponding socket */ sk = __inet6_lookup_skb(&dccp_hashinfo, skb, dh->dccph_sport, dh->dccph_dport); /* * Step 2: * If no socket ... */ if (sk == NULL) { dccp_pr_debug("failed to look up flow ID in table and " "get corresponding socket\n"); goto no_dccp_socket; } /* * Step 2: * ... or S.state == TIMEWAIT, * Generate Reset(No Connection) unless P.type == Reset * Drop packet and return */ if (sk->sk_state == DCCP_TIME_WAIT) { dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n"); inet_twsk_put(inet_twsk(sk)); goto no_dccp_socket; } /* * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage * o if MinCsCov = 0, only packets with CsCov = 0 are accepted * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov */ min_cov = dccp_sk(sk)->dccps_pcrlen; if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) { dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n", dh->dccph_cscov, min_cov); /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */ goto discard_and_relse; } if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_and_relse; return sk_receive_skb(sk, skb, 1) ? -1 : 0; no_dccp_socket: if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; /* * Step 2: * If no socket ... * Generate Reset(No Connection) unless P.type == Reset * Drop packet and return */ if (dh->dccph_type != DCCP_PKT_RESET) { DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; dccp_v6_ctl_send_reset(sk, skb); } discard_it: kfree_skb(skb); return 0; discard_and_relse: sock_put(sk); goto discard_it; } static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr; struct inet_connection_sock *icsk = inet_csk(sk); struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct dccp_sock *dp = dccp_sk(sk); struct in6_addr *saddr = NULL, *final_p, final; struct flowi6 fl6; struct dst_entry *dst; int addr_type; int err; dp->dccps_role = DCCP_ROLE_CLIENT; if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; if (usin->sin6_family != AF_INET6) return -EAFNOSUPPORT; memset(&fl6, 0, sizeof(fl6)); if (np->sndflow) { fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; IP6_ECN_flow_init(fl6.flowlabel); if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) { struct ip6_flowlabel *flowlabel; flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); fl6_sock_release(flowlabel); } } /* * connect() to INADDR_ANY means loopback (BSD'ism). */ if (ipv6_addr_any(&usin->sin6_addr)) usin->sin6_addr.s6_addr[15] = 1; addr_type = ipv6_addr_type(&usin->sin6_addr); if (addr_type & IPV6_ADDR_MULTICAST) return -ENETUNREACH; if (addr_type & IPV6_ADDR_LINKLOCAL) { if (addr_len >= sizeof(struct sockaddr_in6) && usin->sin6_scope_id) { /* If interface is set while binding, indices * must coincide. */ if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != usin->sin6_scope_id) return -EINVAL; sk->sk_bound_dev_if = usin->sin6_scope_id; } /* Connect to link-local address requires an interface */ if (!sk->sk_bound_dev_if) return -EINVAL; } ipv6_addr_copy(&np->daddr, &usin->sin6_addr); np->flow_label = fl6.flowlabel; /* * DCCP over IPv4 */ if (addr_type == IPV6_ADDR_MAPPED) { u32 exthdrlen = icsk->icsk_ext_hdr_len; struct sockaddr_in sin; SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); if (__ipv6_only_sock(sk)) return -ENETUNREACH; sin.sin_family = AF_INET; sin.sin_port = usin->sin6_port; sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; icsk->icsk_af_ops = &dccp_ipv6_mapped; sk->sk_backlog_rcv = dccp_v4_do_rcv; err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); if (err) { icsk->icsk_ext_hdr_len = exthdrlen; icsk->icsk_af_ops = &dccp_ipv6_af_ops; sk->sk_backlog_rcv = dccp_v6_do_rcv; goto failure; } ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr); return err; } if (!ipv6_addr_any(&np->rcv_saddr)) saddr = &np->rcv_saddr; fl6.flowi6_proto = IPPROTO_DCCP; ipv6_addr_copy(&fl6.daddr, &np->daddr); ipv6_addr_copy(&fl6.saddr, saddr ? saddr : &np->saddr); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.fl6_dport = usin->sin6_port; fl6.fl6_sport = inet->inet_sport; security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); final_p = fl6_update_dst(&fl6, np->opt, &final); dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto failure; } if (saddr == NULL) { saddr = &fl6.saddr; ipv6_addr_copy(&np->rcv_saddr, saddr); } /* set the source address */ ipv6_addr_copy(&np->saddr, saddr); inet->inet_rcv_saddr = LOOPBACK4_IPV6; __ip6_dst_store(sk, dst, NULL, NULL); icsk->icsk_ext_hdr_len = 0; if (np->opt != NULL) icsk->icsk_ext_hdr_len = (np->opt->opt_flen + np->opt->opt_nflen); inet->inet_dport = usin->sin6_port; dccp_set_state(sk, DCCP_REQUESTING); err = inet6_hash_connect(&dccp_death_row, sk); if (err) goto late_failure; dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32, np->daddr.s6_addr32, inet->inet_sport, inet->inet_dport); err = dccp_connect(sk); if (err) goto late_failure; return 0; late_failure: dccp_set_state(sk, DCCP_CLOSED); __sk_dst_reset(sk); failure: inet->inet_dport = 0; sk->sk_route_caps = 0; return err; } static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { .queue_xmit = inet6_csk_xmit, .send_check = dccp_v6_send_check, .rebuild_header = inet6_sk_rebuild_header, .conn_request = dccp_v6_conn_request, .syn_recv_sock = dccp_v6_request_recv_sock, .net_header_len = sizeof(struct ipv6hdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), .bind_conflict = inet6_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, .compat_getsockopt = compat_ipv6_getsockopt, #endif }; /* * DCCP over IPv4 via INET6 API */ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { .queue_xmit = ip_queue_xmit, .send_check = dccp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, .conn_request = dccp_v6_conn_request, .syn_recv_sock = dccp_v6_request_recv_sock, .net_header_len = sizeof(struct iphdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, .compat_getsockopt = compat_ipv6_getsockopt, #endif }; /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */ static int dccp_v6_init_sock(struct sock *sk) { static __u8 dccp_v6_ctl_sock_initialized; int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized); if (err == 0) { if (unlikely(!dccp_v6_ctl_sock_initialized)) dccp_v6_ctl_sock_initialized = 1; inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; } return err; } static void dccp_v6_destroy_sock(struct sock *sk) { dccp_destroy_sock(sk); inet6_destroy_sock(sk); } static struct timewait_sock_ops dccp6_timewait_sock_ops = { .twsk_obj_size = sizeof(struct dccp6_timewait_sock), }; static struct proto dccp_v6_prot = { .name = "DCCPv6", .owner = THIS_MODULE, .close = dccp_close, .connect = dccp_v6_connect, .disconnect = dccp_disconnect, .ioctl = dccp_ioctl, .init = dccp_v6_init_sock, .setsockopt = dccp_setsockopt, .getsockopt = dccp_getsockopt, .sendmsg = dccp_sendmsg, .recvmsg = dccp_recvmsg, .backlog_rcv = dccp_v6_do_rcv, .hash = dccp_v6_hash, .unhash = inet_unhash, .accept = inet_csk_accept, .get_port = inet_csk_get_port, .shutdown = dccp_shutdown, .destroy = dccp_v6_destroy_sock, .orphan_count = &dccp_orphan_count, .max_header = MAX_DCCP_HEADER, .obj_size = sizeof(struct dccp6_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .rsk_prot = &dccp6_request_sock_ops, .twsk_prot = &dccp6_timewait_sock_ops, .h.hashinfo = &dccp_hashinfo, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_dccp_setsockopt, .compat_getsockopt = compat_dccp_getsockopt, #endif }; static const struct inet6_protocol dccp_v6_protocol = { .handler = dccp_v6_rcv, .err_handler = dccp_v6_err, .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL, }; static const struct proto_ops inet6_dccp_ops = { .family = PF_INET6, .owner = THIS_MODULE, .release = inet6_release, .bind = inet6_bind, .connect = inet_stream_connect, .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = inet6_getname, .poll = dccp_poll, .ioctl = inet6_ioctl, .listen = inet_dccp_listen, .shutdown = inet_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, .sendmsg = inet_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, #endif }; static struct inet_protosw dccp_v6_protosw = { .type = SOCK_DCCP, .protocol = IPPROTO_DCCP, .prot = &dccp_v6_prot, .ops = &inet6_dccp_ops, .flags = INET_PROTOSW_ICSK, }; static int __net_init dccp_v6_init_net(struct net *net) { if (dccp_hashinfo.bhash == NULL) return -ESOCKTNOSUPPORT; return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6, SOCK_DCCP, IPPROTO_DCCP, net); } static void __net_exit dccp_v6_exit_net(struct net *net) { inet_ctl_sock_destroy(net->dccp.v6_ctl_sk); } static struct pernet_operations dccp_v6_ops = { .init = dccp_v6_init_net, .exit = dccp_v6_exit_net, }; static int __init dccp_v6_init(void) { int err = proto_register(&dccp_v6_prot, 1); if (err != 0) goto out; err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP); if (err != 0) goto out_unregister_proto; inet6_register_protosw(&dccp_v6_protosw); err = register_pernet_subsys(&dccp_v6_ops); if (err != 0) goto out_destroy_ctl_sock; out: return err; out_destroy_ctl_sock: inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP); inet6_unregister_protosw(&dccp_v6_protosw); out_unregister_proto: proto_unregister(&dccp_v6_prot); goto out; } static void __exit dccp_v6_exit(void) { unregister_pernet_subsys(&dccp_v6_ops); inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP); inet6_unregister_protosw(&dccp_v6_protosw); proto_unregister(&dccp_v6_prot); } module_init(dccp_v6_init); module_exit(dccp_v6_exit); /* * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33) * values directly, Also cover the case where the protocol is not specified, * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP */ MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
./CrossVul/dataset_final_sorted/CWE-362/c/good_3726_3
crossvul-cpp_data_bad_3536_0
/* SCTP kernel implementation * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2003 International Business Machines, Corp. * Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 Nokia, Inc. * Copyright (c) 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel implementation * * These functions handle all input from the IP layer into SCTP. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <lksctp-developers@lists.sourceforge.net> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Xingang Guo <xingang.guo@intel.com> * Jon Grimm <jgrimm@us.ibm.com> * Hui Huang <hui.huang@nokia.com> * Daisy Chang <daisyc@us.ibm.com> * Sridhar Samudrala <sri@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #include <linux/types.h> #include <linux/list.h> /* For struct list_head */ #include <linux/socket.h> #include <linux/ip.h> #include <linux/time.h> /* For struct timeval */ #include <net/ip.h> #include <net/icmp.h> #include <net/snmp.h> #include <net/sock.h> #include <net/xfrm.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> #include <net/sctp/checksum.h> #include <net/net_namespace.h> /* Forward declarations for internal helpers. */ static int sctp_rcv_ootb(struct sk_buff *); static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb, const union sctp_addr *laddr, const union sctp_addr *paddr, struct sctp_transport **transportp); static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr); static struct sctp_association *__sctp_lookup_association( const union sctp_addr *local, const union sctp_addr *peer, struct sctp_transport **pt); static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); /* Calculate the SCTP checksum of an SCTP packet. */ static inline int sctp_rcv_checksum(struct sk_buff *skb) { struct sk_buff *list = skb_shinfo(skb)->frag_list; struct sctphdr *sh = sctp_hdr(skb); __be32 cmp = sh->checksum; __be32 val = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); for (; list; list = list->next) val = sctp_update_cksum((__u8 *)list->data, skb_headlen(list), val); val = sctp_end_cksum(val); if (val != cmp) { /* CRC failure, dump it. */ SCTP_INC_STATS_BH(SCTP_MIB_CHECKSUMERRORS); return -1; } return 0; } struct sctp_input_cb { union { struct inet_skb_parm h4; #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) struct inet6_skb_parm h6; #endif } header; struct sctp_chunk *chunk; }; #define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0])) /* * This is the routine which IP calls when receiving an SCTP packet. */ int sctp_rcv(struct sk_buff *skb) { struct sock *sk; struct sctp_association *asoc; struct sctp_endpoint *ep = NULL; struct sctp_ep_common *rcvr; struct sctp_transport *transport = NULL; struct sctp_chunk *chunk; struct sctphdr *sh; union sctp_addr src; union sctp_addr dest; int family; struct sctp_af *af; if (skb->pkt_type!=PACKET_HOST) goto discard_it; SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS); if (skb_linearize(skb)) goto discard_it; sh = sctp_hdr(skb); /* Pull up the IP and SCTP headers. */ __skb_pull(skb, skb_transport_offset(skb)); if (skb->len < sizeof(struct sctphdr)) goto discard_it; if (!skb_csum_unnecessary(skb) && sctp_rcv_checksum(skb) < 0) goto discard_it; skb_pull(skb, sizeof(struct sctphdr)); /* Make sure we at least have chunk headers worth of data left. */ if (skb->len < sizeof(struct sctp_chunkhdr)) goto discard_it; family = ipver2af(ip_hdr(skb)->version); af = sctp_get_af_specific(family); if (unlikely(!af)) goto discard_it; /* Initialize local addresses for lookups. */ af->from_skb(&src, skb, 1); af->from_skb(&dest, skb, 0); /* If the packet is to or from a non-unicast address, * silently discard the packet. * * This is not clearly defined in the RFC except in section * 8.4 - OOTB handling. However, based on the book "Stream Control * Transmission Protocol" 2.1, "It is important to note that the * IP address of an SCTP transport address must be a routable * unicast address. In other words, IP multicast addresses and * IP broadcast addresses cannot be used in an SCTP transport * address." */ if (!af->addr_valid(&src, NULL, skb) || !af->addr_valid(&dest, NULL, skb)) goto discard_it; asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport); if (!asoc) ep = __sctp_rcv_lookup_endpoint(&dest); /* Retrieve the common input handling substructure. */ rcvr = asoc ? &asoc->base : &ep->base; sk = rcvr->sk; /* * If a frame arrives on an interface and the receiving socket is * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB */ if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { if (asoc) { sctp_association_put(asoc); asoc = NULL; } else { sctp_endpoint_put(ep); ep = NULL; } sk = sctp_get_ctl_sock(); ep = sctp_sk(sk)->ep; sctp_endpoint_hold(ep); rcvr = &ep->base; } /* * RFC 2960, 8.4 - Handle "Out of the blue" Packets. * An SCTP packet is called an "out of the blue" (OOTB) * packet if it is correctly formed, i.e., passed the * receiver's checksum check, but the receiver is not * able to identify the association to which this * packet belongs. */ if (!asoc) { if (sctp_rcv_ootb(skb)) { SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES); goto discard_release; } } if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family)) goto discard_release; nf_reset(skb); if (sk_filter(sk, skb)) goto discard_release; /* Create an SCTP packet structure. */ chunk = sctp_chunkify(skb, asoc, sk); if (!chunk) goto discard_release; SCTP_INPUT_CB(skb)->chunk = chunk; /* Remember what endpoint is to handle this packet. */ chunk->rcvr = rcvr; /* Remember the SCTP header. */ chunk->sctp_hdr = sh; /* Set the source and destination addresses of the incoming chunk. */ sctp_init_addrs(chunk, &src, &dest); /* Remember where we came from. */ chunk->transport = transport; /* Acquire access to the sock lock. Note: We are safe from other * bottom halves on this lock, but a user may be in the lock too, * so check if it is busy. */ sctp_bh_lock_sock(sk); if (sock_owned_by_user(sk)) { SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); sctp_add_backlog(sk, skb); } else { SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ); sctp_inq_push(&chunk->rcvr->inqueue, chunk); } sctp_bh_unlock_sock(sk); /* Release the asoc/ep ref we took in the lookup calls. */ if (asoc) sctp_association_put(asoc); else sctp_endpoint_put(ep); return 0; discard_it: SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_DISCARDS); kfree_skb(skb); return 0; discard_release: /* Release the asoc/ep ref we took in the lookup calls. */ if (asoc) sctp_association_put(asoc); else sctp_endpoint_put(ep); goto discard_it; } /* Process the backlog queue of the socket. Every skb on * the backlog holds a ref on an association or endpoint. * We hold this ref throughout the state machine to make * sure that the structure we need is still around. */ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) { struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; struct sctp_inq *inqueue = &chunk->rcvr->inqueue; struct sctp_ep_common *rcvr = NULL; int backloged = 0; rcvr = chunk->rcvr; /* If the rcvr is dead then the association or endpoint * has been deleted and we can safely drop the chunk * and refs that we are holding. */ if (rcvr->dead) { sctp_chunk_free(chunk); goto done; } if (unlikely(rcvr->sk != sk)) { /* In this case, the association moved from one socket to * another. We are currently sitting on the backlog of the * old socket, so we need to move. * However, since we are here in the process context we * need to take make sure that the user doesn't own * the new socket when we process the packet. * If the new socket is user-owned, queue the chunk to the * backlog of the new socket without dropping any refs. * Otherwise, we can safely push the chunk on the inqueue. */ sk = rcvr->sk; sctp_bh_lock_sock(sk); if (sock_owned_by_user(sk)) { sk_add_backlog(sk, skb); backloged = 1; } else sctp_inq_push(inqueue, chunk); sctp_bh_unlock_sock(sk); /* If the chunk was backloged again, don't drop refs */ if (backloged) return 0; } else { sctp_inq_push(inqueue, chunk); } done: /* Release the refs we took in sctp_add_backlog */ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) sctp_association_put(sctp_assoc(rcvr)); else if (SCTP_EP_TYPE_SOCKET == rcvr->type) sctp_endpoint_put(sctp_ep(rcvr)); else BUG(); return 0; } static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb) { struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; struct sctp_ep_common *rcvr = chunk->rcvr; /* Hold the assoc/ep while hanging on the backlog queue. * This way, we know structures we need will not disappear from us */ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) sctp_association_hold(sctp_assoc(rcvr)); else if (SCTP_EP_TYPE_SOCKET == rcvr->type) sctp_endpoint_hold(sctp_ep(rcvr)); else BUG(); sk_add_backlog(sk, skb); } /* Handle icmp frag needed error. */ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, struct sctp_transport *t, __u32 pmtu) { if (!t || (t->pathmtu <= pmtu)) return; if (sock_owned_by_user(sk)) { asoc->pmtu_pending = 1; t->pmtu_pending = 1; return; } if (t->param_flags & SPP_PMTUD_ENABLE) { /* Update transports view of the MTU */ sctp_transport_update_pmtu(t, pmtu); /* Update association pmtu. */ sctp_assoc_sync_pmtu(asoc); } /* Retransmit with the new pmtu setting. * Normally, if PMTU discovery is disabled, an ICMP Fragmentation * Needed will never be sent, but if a message was sent before * PMTU discovery was disabled that was larger than the PMTU, it * would not be fragmented, so it must be re-transmitted fragmented. */ sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD); } /* * SCTP Implementer's Guide, 2.37 ICMP handling procedures * * ICMP8) If the ICMP code is a "Unrecognized next header type encountered" * or a "Protocol Unreachable" treat this message as an abort * with the T bit set. * * This function sends an event to the state machine, which will abort the * association. * */ void sctp_icmp_proto_unreachable(struct sock *sk, struct sctp_association *asoc, struct sctp_transport *t) { SCTP_DEBUG_PRINTK("%s\n", __func__); sctp_do_sm(SCTP_EVENT_T_OTHER, SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), asoc->state, asoc->ep, asoc, t, GFP_ATOMIC); } /* Common lookup code for icmp/icmpv6 error handler. */ struct sock *sctp_err_lookup(int family, struct sk_buff *skb, struct sctphdr *sctphdr, struct sctp_association **app, struct sctp_transport **tpp) { union sctp_addr saddr; union sctp_addr daddr; struct sctp_af *af; struct sock *sk = NULL; struct sctp_association *asoc; struct sctp_transport *transport = NULL; struct sctp_init_chunk *chunkhdr; __u32 vtag = ntohl(sctphdr->vtag); int len = skb->len - ((void *)sctphdr - (void *)skb->data); *app = NULL; *tpp = NULL; af = sctp_get_af_specific(family); if (unlikely(!af)) { return NULL; } /* Initialize local addresses for lookups. */ af->from_skb(&saddr, skb, 1); af->from_skb(&daddr, skb, 0); /* Look for an association that matches the incoming ICMP error * packet. */ asoc = __sctp_lookup_association(&saddr, &daddr, &transport); if (!asoc) return NULL; sk = asoc->base.sk; /* RFC 4960, Appendix C. ICMP Handling * * ICMP6) An implementation MUST validate that the Verification Tag * contained in the ICMP message matches the Verification Tag of * the peer. If the Verification Tag is not 0 and does NOT * match, discard the ICMP message. If it is 0 and the ICMP * message contains enough bytes to verify that the chunk type is * an INIT chunk and that the Initiate Tag matches the tag of the * peer, continue with ICMP7. If the ICMP message is too short * or the chunk type or the Initiate Tag does not match, silently * discard the packet. */ if (vtag == 0) { chunkhdr = (struct sctp_init_chunk *)((void *)sctphdr + sizeof(struct sctphdr)); if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t) + sizeof(__be32) || chunkhdr->chunk_hdr.type != SCTP_CID_INIT || ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) { goto out; } } else if (vtag != asoc->c.peer_vtag) { goto out; } sctp_bh_lock_sock(sk); /* If too many ICMPs get dropped on busy * servers this needs to be solved differently. */ if (sock_owned_by_user(sk)) NET_INC_STATS_BH(&init_net, LINUX_MIB_LOCKDROPPEDICMPS); *app = asoc; *tpp = transport; return sk; out: if (asoc) sctp_association_put(asoc); return NULL; } /* Common cleanup code for icmp/icmpv6 error handler. */ void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) { sctp_bh_unlock_sock(sk); if (asoc) sctp_association_put(asoc); } /* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. After adjustment * header points to the first 8 bytes of the sctp header. We need * to find the appropriate port. * * The locking strategy used here is very "optimistic". When * someone else accesses the socket the ICMP is just dropped * and for some paths there is no check at all. * A more general error queue to queue errors for later handling * is probably better. * */ void sctp_v4_err(struct sk_buff *skb, __u32 info) { struct iphdr *iph = (struct iphdr *)skb->data; const int ihlen = iph->ihl * 4; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct sock *sk; struct sctp_association *asoc = NULL; struct sctp_transport *transport; struct inet_sock *inet; sk_buff_data_t saveip, savesctp; int err; if (skb->len < ihlen + 8) { ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS); return; } /* Fix up skb to look at the embedded net header. */ saveip = skb->network_header; savesctp = skb->transport_header; skb_reset_network_header(skb); skb_set_transport_header(skb, ihlen); sk = sctp_err_lookup(AF_INET, skb, sctp_hdr(skb), &asoc, &transport); /* Put back, the original values. */ skb->network_header = saveip; skb->transport_header = savesctp; if (!sk) { ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS); return; } /* Warning: The sock lock is held. Remember to call * sctp_err_finish! */ switch (type) { case ICMP_PARAMETERPROB: err = EPROTO; break; case ICMP_DEST_UNREACH: if (code > NR_ICMP_UNREACH) goto out_unlock; /* PMTU discovery (RFC1191) */ if (ICMP_FRAG_NEEDED == code) { sctp_icmp_frag_needed(sk, asoc, transport, info); goto out_unlock; } else { if (ICMP_PROT_UNREACH == code) { sctp_icmp_proto_unreachable(sk, asoc, transport); goto out_unlock; } } err = icmp_err_convert[code].errno; break; case ICMP_TIME_EXCEEDED: /* Ignore any time exceeded errors due to fragment reassembly * timeouts. */ if (ICMP_EXC_FRAGTIME == code) goto out_unlock; err = EHOSTUNREACH; break; default: goto out_unlock; } inet = inet_sk(sk); if (!sock_owned_by_user(sk) && inet->recverr) { sk->sk_err = err; sk->sk_error_report(sk); } else { /* Only an error on timeout */ sk->sk_err_soft = err; } out_unlock: sctp_err_finish(sk, asoc); } /* * RFC 2960, 8.4 - Handle "Out of the blue" Packets. * * This function scans all the chunks in the OOTB packet to determine if * the packet should be discarded right away. If a response might be needed * for this packet, or, if further processing is possible, the packet will * be queued to a proper inqueue for the next phase of handling. * * Output: * Return 0 - If further processing is needed. * Return 1 - If the packet can be discarded right away. */ static int sctp_rcv_ootb(struct sk_buff *skb) { sctp_chunkhdr_t *ch; __u8 *ch_end; sctp_errhdr_t *err; ch = (sctp_chunkhdr_t *) skb->data; /* Scan through all the chunks in the packet. */ do { /* Break out if chunk length is less then minimal. */ if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) break; ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); if (ch_end > skb_tail_pointer(skb)) break; /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the * receiver MUST silently discard the OOTB packet and take no * further action. */ if (SCTP_CID_ABORT == ch->type) goto discard; /* RFC 8.4, 6) If the packet contains a SHUTDOWN COMPLETE * chunk, the receiver should silently discard the packet * and take no further action. */ if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type) goto discard; /* RFC 4460, 2.11.2 * This will discard packets with INIT chunk bundled as * subsequent chunks in the packet. When INIT is first, * the normal INIT processing will discard the chunk. */ if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data) goto discard; /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR * or a COOKIE ACK the SCTP Packet should be silently * discarded. */ if (SCTP_CID_COOKIE_ACK == ch->type) goto discard; if (SCTP_CID_ERROR == ch->type) { sctp_walk_errors(err, ch) { if (SCTP_ERROR_STALE_COOKIE == err->cause) goto discard; } } ch = (sctp_chunkhdr_t *) ch_end; } while (ch_end < skb_tail_pointer(skb)); return 0; discard: return 1; } /* Insert endpoint into the hash table. */ static void __sctp_hash_endpoint(struct sctp_endpoint *ep) { struct sctp_ep_common *epb; struct sctp_hashbucket *head; epb = &ep->base; epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); head = &sctp_ep_hashtable[epb->hashent]; sctp_write_lock(&head->lock); hlist_add_head(&epb->node, &head->chain); sctp_write_unlock(&head->lock); } /* Add an endpoint to the hash. Local BH-safe. */ void sctp_hash_endpoint(struct sctp_endpoint *ep) { sctp_local_bh_disable(); __sctp_hash_endpoint(ep); sctp_local_bh_enable(); } /* Remove endpoint from the hash table. */ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep) { struct sctp_hashbucket *head; struct sctp_ep_common *epb; epb = &ep->base; if (hlist_unhashed(&epb->node)) return; epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); head = &sctp_ep_hashtable[epb->hashent]; sctp_write_lock(&head->lock); __hlist_del(&epb->node); sctp_write_unlock(&head->lock); } /* Remove endpoint from the hash. Local BH-safe. */ void sctp_unhash_endpoint(struct sctp_endpoint *ep) { sctp_local_bh_disable(); __sctp_unhash_endpoint(ep); sctp_local_bh_enable(); } /* Look up an endpoint. */ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr) { struct sctp_hashbucket *head; struct sctp_ep_common *epb; struct sctp_endpoint *ep; struct hlist_node *node; int hash; hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port)); head = &sctp_ep_hashtable[hash]; read_lock(&head->lock); sctp_for_each_hentry(epb, node, &head->chain) { ep = sctp_ep(epb); if (sctp_endpoint_is_match(ep, laddr)) goto hit; } ep = sctp_sk((sctp_get_ctl_sock()))->ep; hit: sctp_endpoint_hold(ep); read_unlock(&head->lock); return ep; } /* Insert association into the hash table. */ static void __sctp_hash_established(struct sctp_association *asoc) { struct sctp_ep_common *epb; struct sctp_hashbucket *head; epb = &asoc->base; /* Calculate which chain this entry will belong to. */ epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, asoc->peer.port); head = &sctp_assoc_hashtable[epb->hashent]; sctp_write_lock(&head->lock); hlist_add_head(&epb->node, &head->chain); sctp_write_unlock(&head->lock); } /* Add an association to the hash. Local BH-safe. */ void sctp_hash_established(struct sctp_association *asoc) { if (asoc->temp) return; sctp_local_bh_disable(); __sctp_hash_established(asoc); sctp_local_bh_enable(); } /* Remove association from the hash table. */ static void __sctp_unhash_established(struct sctp_association *asoc) { struct sctp_hashbucket *head; struct sctp_ep_common *epb; epb = &asoc->base; epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, asoc->peer.port); head = &sctp_assoc_hashtable[epb->hashent]; sctp_write_lock(&head->lock); __hlist_del(&epb->node); sctp_write_unlock(&head->lock); } /* Remove association from the hash table. Local BH-safe. */ void sctp_unhash_established(struct sctp_association *asoc) { if (asoc->temp) return; sctp_local_bh_disable(); __sctp_unhash_established(asoc); sctp_local_bh_enable(); } /* Look up an association. */ static struct sctp_association *__sctp_lookup_association( const union sctp_addr *local, const union sctp_addr *peer, struct sctp_transport **pt) { struct sctp_hashbucket *head; struct sctp_ep_common *epb; struct sctp_association *asoc; struct sctp_transport *transport; struct hlist_node *node; int hash; /* Optimize here for direct hit, only listening connections can * have wildcards anyways. */ hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port)); head = &sctp_assoc_hashtable[hash]; read_lock(&head->lock); sctp_for_each_hentry(epb, node, &head->chain) { asoc = sctp_assoc(epb); transport = sctp_assoc_is_match(asoc, local, peer); if (transport) goto hit; } read_unlock(&head->lock); return NULL; hit: *pt = transport; sctp_association_hold(asoc); read_unlock(&head->lock); return asoc; } /* Look up an association. BH-safe. */ SCTP_STATIC struct sctp_association *sctp_lookup_association(const union sctp_addr *laddr, const union sctp_addr *paddr, struct sctp_transport **transportp) { struct sctp_association *asoc; sctp_local_bh_disable(); asoc = __sctp_lookup_association(laddr, paddr, transportp); sctp_local_bh_enable(); return asoc; } /* Is there an association matching the given local and peer addresses? */ int sctp_has_association(const union sctp_addr *laddr, const union sctp_addr *paddr) { struct sctp_association *asoc; struct sctp_transport *transport; if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) { sctp_association_put(asoc); return 1; } return 0; } /* * SCTP Implementors Guide, 2.18 Handling of address * parameters within the INIT or INIT-ACK. * * D) When searching for a matching TCB upon reception of an INIT * or INIT-ACK chunk the receiver SHOULD use not only the * source address of the packet (containing the INIT or * INIT-ACK) but the receiver SHOULD also use all valid * address parameters contained within the chunk. * * 2.18.3 Solution description * * This new text clearly specifies to an implementor the need * to look within the INIT or INIT-ACK. Any implementation that * does not do this, may not be able to establish associations * in certain circumstances. * */ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb, const union sctp_addr *laddr, struct sctp_transport **transportp) { struct sctp_association *asoc; union sctp_addr addr; union sctp_addr *paddr = &addr; struct sctphdr *sh = sctp_hdr(skb); sctp_chunkhdr_t *ch; union sctp_params params; sctp_init_chunk_t *init; struct sctp_transport *transport; struct sctp_af *af; ch = (sctp_chunkhdr_t *) skb->data; /* * This code will NOT touch anything inside the chunk--it is * strictly READ-ONLY. * * RFC 2960 3 SCTP packet Format * * Multiple chunks can be bundled into one SCTP packet up to * the MTU size, except for the INIT, INIT ACK, and SHUTDOWN * COMPLETE chunks. These chunks MUST NOT be bundled with any * other chunk in a packet. See Section 6.10 for more details * on chunk bundling. */ /* Find the start of the TLVs and the end of the chunk. This is * the region we search for address parameters. */ init = (sctp_init_chunk_t *)skb->data; /* Walk the parameters looking for embedded addresses. */ sctp_walk_params(params, init, init_hdr.params) { /* Note: Ignoring hostname addresses. */ af = sctp_get_af_specific(param_type2af(params.p->type)); if (!af) continue; af->from_addr_param(paddr, params.addr, sh->source, 0); asoc = __sctp_lookup_association(laddr, paddr, &transport); if (asoc) return asoc; } return NULL; } /* ADD-IP, Section 5.2 * When an endpoint receives an ASCONF Chunk from the remote peer * special procedures may be needed to identify the association the * ASCONF Chunk is associated with. To properly find the association * the following procedures SHOULD be followed: * * D2) If the association is not found, use the address found in the * Address Parameter TLV combined with the port number found in the * SCTP common header. If found proceed to rule D4. * * D2-ext) If more than one ASCONF Chunks are packed together, use the * address found in the ASCONF Address Parameter TLV of each of the * subsequent ASCONF Chunks. If found, proceed to rule D4. */ static struct sctp_association *__sctp_rcv_asconf_lookup( sctp_chunkhdr_t *ch, const union sctp_addr *laddr, __be16 peer_port, struct sctp_transport **transportp) { sctp_addip_chunk_t *asconf = (struct sctp_addip_chunk *)ch; struct sctp_af *af; union sctp_addr_param *param; union sctp_addr paddr; /* Skip over the ADDIP header and find the Address parameter */ param = (union sctp_addr_param *)(asconf + 1); af = sctp_get_af_specific(param_type2af(param->v4.param_hdr.type)); if (unlikely(!af)) return NULL; af->from_addr_param(&paddr, param, peer_port, 0); return __sctp_lookup_association(laddr, &paddr, transportp); } /* SCTP-AUTH, Section 6.3: * If the receiver does not find a STCB for a packet containing an AUTH * chunk as the first chunk and not a COOKIE-ECHO chunk as the second * chunk, it MUST use the chunks after the AUTH chunk to look up an existing * association. * * This means that any chunks that can help us identify the association need * to be looked at to find this assocation. */ static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb, const union sctp_addr *laddr, struct sctp_transport **transportp) { struct sctp_association *asoc = NULL; sctp_chunkhdr_t *ch; int have_auth = 0; unsigned int chunk_num = 1; __u8 *ch_end; /* Walk through the chunks looking for AUTH or ASCONF chunks * to help us find the association. */ ch = (sctp_chunkhdr_t *) skb->data; do { /* Break out if chunk length is less then minimal. */ if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) break; ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); if (ch_end > skb_tail_pointer(skb)) break; switch(ch->type) { case SCTP_CID_AUTH: have_auth = chunk_num; break; case SCTP_CID_COOKIE_ECHO: /* If a packet arrives containing an AUTH chunk as * a first chunk, a COOKIE-ECHO chunk as the second * chunk, and possibly more chunks after them, and * the receiver does not have an STCB for that * packet, then authentication is based on * the contents of the COOKIE- ECHO chunk. */ if (have_auth == 1 && chunk_num == 2) return NULL; break; case SCTP_CID_ASCONF: if (have_auth || sctp_addip_noauth) asoc = __sctp_rcv_asconf_lookup(ch, laddr, sctp_hdr(skb)->source, transportp); default: break; } if (asoc) break; ch = (sctp_chunkhdr_t *) ch_end; chunk_num++; } while (ch_end < skb_tail_pointer(skb)); return asoc; } /* * There are circumstances when we need to look inside the SCTP packet * for information to help us find the association. Examples * include looking inside of INIT/INIT-ACK chunks or after the AUTH * chunks. */ static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb, const union sctp_addr *laddr, struct sctp_transport **transportp) { sctp_chunkhdr_t *ch; ch = (sctp_chunkhdr_t *) skb->data; /* The code below will attempt to walk the chunk and extract * parameter information. Before we do that, we need to verify * that the chunk length doesn't cause overflow. Otherwise, we'll * walk off the end. */ if (WORD_ROUND(ntohs(ch->length)) > skb->len) return NULL; /* If this is INIT/INIT-ACK look inside the chunk too. */ switch (ch->type) { case SCTP_CID_INIT: case SCTP_CID_INIT_ACK: return __sctp_rcv_init_lookup(skb, laddr, transportp); break; default: return __sctp_rcv_walk_lookup(skb, laddr, transportp); break; } return NULL; } /* Lookup an association for an inbound skb. */ static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb, const union sctp_addr *paddr, const union sctp_addr *laddr, struct sctp_transport **transportp) { struct sctp_association *asoc; asoc = __sctp_lookup_association(laddr, paddr, transportp); /* Further lookup for INIT/INIT-ACK packets. * SCTP Implementors Guide, 2.18 Handling of address * parameters within the INIT or INIT-ACK. */ if (!asoc) asoc = __sctp_rcv_lookup_harder(skb, laddr, transportp); return asoc; }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_3536_0